repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
mikeshardmind/wakfu-utils | wakautosolver/versioned_entrypoints.py | [
{
"identifier": "encode",
"path": "wakautosolver/b2048/encoder.py",
"snippet": "def encode(bys: bytes, /) -> str:\n ret = StringIO()\n stage = 0\n remaining = 0\n\n for byte in bys:\n need = 11 - remaining\n if need < 8:\n remaining = 8 - need\n index = (stage << need) | (byte >> remaining)\n ret.write(ENC_TABLE[index])\n stage = byte & ((1 << remaining) - 1)\n else:\n stage = (stage << 8) | byte\n remaining += 8\n\n if remaining > 0:\n ret.write(TAIL[stage] if remaining <= 3 else ENC_TABLE[stage])\n\n ret.seek(0)\n return ret.read()"
},
{
"identifier": "load_item_source_data",
"path": "wakautosolver/object_parsing.py",
"snippet": "@lru_cache\ndef load_item_source_data() -> SourceData:\n data_file_path = pathlib.Path(__file__).with_name(\"data\") / \"source_info.bz2\"\n with bz2.open(data_file_path, mode=\"rb\", compresslevel=9) as fp:\n return msgpack.decode(fp.read(), type=SourceData)"
},
{
"identifier": "DUMMY_MAX",
"path": "wakautosolver/restructured_types.py",
"snippet": "class ClassesEnum(enum.IntEnum):\nclass ElementsEnum(enum.IntFlag):\nclass Priority(enum.IntEnum):\nclass StatPriority(Struct, frozen=True, array_like=True):\nclass Stats(Struct, frozen=True, gc=True):\nclass SetMinimums(Stats, frozen=True, gc=False):\nclass SetMaximums(Stats, frozen=True, gc=False):\nclass v1Config(Struct, kw_only=True):\n EMPTY = -1\nDUMMY_MIN: int = -1_000_000\nDUMMY_MAX: int = 1_000_000\nSIMMABLE = [\"ap\", \"mp\", \"wp\", \"ra\", \"block\", \"armor_given\"]\n def is_valid(self) -> bool:\n def __eq__(self, other: object) -> bool:\n def __ne__(self, other: object) -> bool:\n def __sub__(self, other: object) -> Stats:\n def __add__(self, other: object) -> Stats:\n def __le__(self, other: object) -> bool:\n def stats_met(self, other: Stats) -> bool:\n def get_sim_keys(self) -> list[str]:\n def unhandled(self) -> bool:\n def __and__(self, other: object) -> SetMinimums:\n def __le__(self, other: object):\n def unhandled(self) -> bool:\n def __and__(self, other: object) -> SetMaximums:\ndef effective_mastery(stats: Stats, rel_mastery_key: Callable[[Stats], int]) -> float:\ndef effective_healing(stats: Stats, rel_mastery_key: Callable[[Stats], int]) -> float:\ndef apply_w2h(stats: Stats) -> Stats:\ndef apply_unravel(stats: Stats) -> Stats:\ndef apply_elementalism(stats: Stats) -> Stats:"
},
{
"identifier": "SetMaximums",
"path": "wakautosolver/restructured_types.py",
"snippet": "class SetMaximums(Stats, frozen=True, gc=False):\n ap: int = DUMMY_MAX\n mp: int = DUMMY_MAX\n wp: int = DUMMY_MAX\n ra: int = DUMMY_MAX\n critical_hit: int = DUMMY_MAX\n critical_mastery: int = DUMMY_MAX\n elemental_mastery: int = DUMMY_MAX\n mastery_3_elements: int = DUMMY_MAX\n mastery_2_elements: int = DUMMY_MAX\n mastery_1_element: int = DUMMY_MAX\n distance_mastery: int = DUMMY_MAX\n rear_mastery: int = DUMMY_MAX\n healing_mastery: int = DUMMY_MAX\n berserk_mastery: int = DUMMY_MAX\n melee_mastery: int = DUMMY_MAX\n control: int = DUMMY_MAX\n block: int = DUMMY_MAX\n fd: int = DUMMY_MAX\n heals_performed: int = DUMMY_MAX\n lock: int = DUMMY_MAX\n dodge: int = DUMMY_MAX\n armor_given: int = DUMMY_MAX\n\n def unhandled(self) -> bool:\n _ap, _mp, _wp, _ra, _crit, *rest = astuple(self)\n return any(stat != DUMMY_MAX for stat in rest)\n\n def __and__(self, other: object) -> SetMaximums:\n if not isinstance(other, SetMaximums):\n return NotImplemented\n\n return SetMaximums(\n min(self.ap, other.ap),\n min(self.mp, other.mp),\n min(self.wp, other.wp),\n min(self.ra, other.ra),\n min(self.critical_hit, other.critical_hit),\n min(self.critical_mastery, other.critical_mastery),\n min(self.elemental_mastery, other.elemental_mastery),\n min(self.mastery_3_elements, other.mastery_3_elements),\n min(self.mastery_2_elements, other.mastery_2_elements),\n min(self.mastery_1_element, other.mastery_1_element),\n min(self.distance_mastery, other.distance_mastery),\n min(self.rear_mastery, other.rear_mastery),\n min(self.healing_mastery, other.healing_mastery),\n min(self.berserk_mastery, other.berserk_mastery),\n min(self.melee_mastery, other.melee_mastery),\n min(self.control, other.control),\n min(self.block, other.block),\n min(self.fd, other.fd),\n min(self.heals_performed, other.heals_performed),\n min(self.lock, other.lock),\n min(self.dodge, other.dodge),\n min(self.armor_given, other.armor_given),\n )"
},
{
"identifier": "SetMinimums",
"path": "wakautosolver/restructured_types.py",
"snippet": "class SetMinimums(Stats, frozen=True, gc=False):\n ap: int = DUMMY_MIN\n mp: int = DUMMY_MIN\n wp: int = DUMMY_MIN\n ra: int = DUMMY_MIN\n critical_hit: int = DUMMY_MIN\n critical_mastery: int = DUMMY_MIN\n elemental_mastery: int = DUMMY_MIN\n mastery_3_elements: int = DUMMY_MIN\n mastery_2_elements: int = DUMMY_MIN\n mastery_1_element: int = DUMMY_MIN\n distance_mastery: int = DUMMY_MIN\n rear_mastery: int = DUMMY_MIN\n healing_mastery: int = DUMMY_MIN\n berserk_mastery: int = DUMMY_MIN\n melee_mastery: int = DUMMY_MIN\n control: int = DUMMY_MIN\n block: int = DUMMY_MIN\n fd: int = DUMMY_MIN\n heals_performed: int = DUMMY_MIN\n lock: int = DUMMY_MIN\n dodge: int = DUMMY_MIN\n armor_given: int = DUMMY_MIN\n\n def stats_met(self, other: Stats) -> bool:\n return not any(o < s for s, o in zip(astuple(self), astuple(other), strict=True))\n\n def get_sim_keys(self) -> list[str]:\n return [k for k, v in asdict(self).items() if v != DUMMY_MIN and k in SIMMABLE]\n\n def unhandled(self) -> bool:\n _ap, _mp, _wp, _ra, _crit, *rest = astuple(self)\n return any(stat != DUMMY_MIN for stat in rest)\n\n def __and__(self, other: object) -> SetMinimums:\n if not isinstance(other, SetMinimums):\n return NotImplemented\n\n return SetMinimums(\n max(self.ap, other.ap),\n max(self.mp, other.mp),\n max(self.wp, other.wp),\n max(self.ra, other.ra),\n max(self.critical_hit, other.critical_hit),\n max(self.critical_mastery, other.critical_mastery),\n max(self.elemental_mastery, other.elemental_mastery),\n max(self.mastery_3_elements, other.mastery_3_elements),\n max(self.mastery_2_elements, other.mastery_2_elements),\n max(self.mastery_1_element, other.mastery_1_element),\n max(self.distance_mastery, other.distance_mastery),\n max(self.rear_mastery, other.rear_mastery),\n max(self.healing_mastery, other.healing_mastery),\n max(self.berserk_mastery, other.berserk_mastery),\n max(self.melee_mastery, other.melee_mastery),\n max(self.control, other.control),\n max(self.block, other.block),\n max(self.fd, other.fd),\n max(self.heals_performed, other.heals_performed),\n max(self.lock, other.lock),\n max(self.dodge, other.dodge),\n max(self.armor_given, other.armor_given),\n )\n\n def __le__(self, other: object):\n if not isinstance(other, Stats):\n return NotImplemented\n\n return all(\n (\n self.ap <= other.ap,\n self.mp <= other.mp,\n self.wp <= other.wp,\n self.ra <= other.ra,\n self.critical_hit <= other.critical_hit,\n self.critical_mastery <= other.critical_mastery,\n self.elemental_mastery <= other.elemental_mastery,\n self.mastery_3_elements <= other.mastery_3_elements,\n self.mastery_2_elements <= other.mastery_2_elements,\n self.mastery_1_element <= other.mastery_1_element,\n self.distance_mastery <= other.distance_mastery,\n self.rear_mastery <= other.rear_mastery,\n self.healing_mastery <= other.healing_mastery,\n self.berserk_mastery <= other.berserk_mastery,\n self.melee_mastery <= other.melee_mastery,\n self.control <= other.control,\n self.block <= other.block,\n self.fd <= other.fd,\n self.heals_performed <= other.heals_performed,\n self.lock <= other.lock,\n self.dodge <= other.dodge,\n self.armor_given <= other.armor_given,\n )\n )"
},
{
"identifier": "ImpossibleStatError",
"path": "wakautosolver/solver.py",
"snippet": "T = TypeVar(\"T\")\nALWAYS_SIMMED = \"ap\", \"mp\", \"ra\", \"wp\", \"critical_hit\", \"critical_mastery\"\n ALL_OBJS = get_all_items()\n LOW_BOUND = max(ns.lv - ns.tolerance, 1)\n NATION_RELIC_EPIC_IDS = [26494, 26495, 26496, 26497, 26575, 26576, 26577, 26578]\n FORBIDDEN: list[int] = []\n FORBIDDEN_NAMES: list[str] = ns.forbid if (ns and ns.forbid) else []\n BASE_STAT_SCORE = _score_key(base_stats)\n FINDABLE_AP_MP_NEEDED = sum(attrgetter(\"ap\", \"mp\")(stat_mins - base_stats - _af_stats))\n OBJS: Final[list[EquipableItem]] = list(filter(initial_filter, ALL_OBJS))\n AOBJS: collections.defaultdict[str, list[EquipableItem]] = collections.defaultdict(list)\n OFF_HANDS = solve_DAGGERS + solve_SHIELDS\n REM_SLOTS = [\n \"LEGS\",\n \"BACK\",\n \"HEAD\",\n \"CHEST\",\n \"SHOULDERS\",\n \"BELT\",\n \"LEFT_HAND\",\n \"LEFT_HAND\",\n \"NECK\",\n \"ACCESSORY\",\n \"MOUNT\",\n \"PET\",\n ]\n UNRAVEL_ACTIVE = ns.unraveling and critical_hit >= 40\nclass SupportsWrite(Protocol[T_contra]):\nclass SolveError(Exception):\nclass ImpossibleStatError(SolveError):\n def write(self, s: T_contra, /) -> object:\ndef setup_logging(output: SupportsWrite[str]) -> None:\ndef ordered_keep_by_key(it: Iterable[T], key: Callable[[T], Hashable], k: int = 1) -> list[T]:\ndef inplace_ordered_keep_by_key(it: list[T], key: Callable[[T], Hashable], k: int = 1) -> None:\ndef solve(\n ns: v1Config,\n use_tqdm: bool = False,\n progress_callback: Callable[[int, int], None] | None = None,\n) -> list[tuple[float, list[EquipableItem]]]:\n def _score_key(item: EquipableItem | Stats | None) -> float:\n def crit_score_key(item: EquipableItem | None) -> float:\n def has_currently_unhandled_item_condition(item: EquipableItem) -> bool:\n def item_condition_conflicts_requested_stats(item: EquipableItem) -> bool:\n def level_filter(item: EquipableItem) -> bool:\n def relic_epic_level_filter(item: EquipableItem) -> bool:\n def minus_relicepic(item: EquipableItem) -> bool:\n def missing_common_major(item: EquipableItem) -> bool:\n def initial_filter(item: EquipableItem) -> bool:\n def compat_with_forced(item: EquipableItem) -> bool:\n def needs_full_sim_key(item: EquipableItem) -> Hashable:\n def tuple_expander(seq: Iterable[tuple[EquipableItem, EquipableItem] | EquipableItem]) -> Iterator[EquipableItem]:\n def re_key_func(pair: tuple[EquipableItem | None, EquipableItem | None]) -> Hashable:\n def re_score_key(pair: tuple[EquipableItem | None, EquipableItem | None]) -> tuple[int, float, float]:\ndef entrypoint(output: SupportsWrite[str], ns: v1Config | None = None) -> None:\n def write(*args: object, sep: str = \" \", end: str = \"\\n\") -> None:"
},
{
"identifier": "Buildv1",
"path": "wakautosolver/wakforge_buildcodes.py",
"snippet": "class Buildv1(Struct, array_like=True):\n buildcodeversion: SupportedVersions = 1\n classenum: WFClasses = WFClasses.EMPTY\n level: int = 230\n # allocated stats\n s_int_percent_hp: STAT_MAX = 0\n s_int_elemental_res: UP_TO_10 = 0\n s_int_barrier: UP_TO_10 = 0\n s_int_heals_recv: UP_TO_5 = 0\n s_int_percent_armor: UP_TO_10 = 0\n s_str_elemental_mastery: STAT_MAX = 0\n s_str_melee_mastery: UP_TO_40 = 0\n s_str_distance_mastery: UP_TO_40 = 0\n s_str_hp: STAT_MAX = 0\n s_agi_lock: STAT_MAX = 0\n s_agi_dodge: STAT_MAX = 0\n s_agi_initiative: UP_TO_20 = 0\n s_agi_lockdodge: STAT_MAX = 0\n s_agi_fow: UP_TO_20 = 0\n s_fortune_percent_crit: UP_TO_20 = 0\n s_fortune_percent_block: UP_TO_20 = 0\n s_fortune_crit_mastery: STAT_MAX = 0\n s_fortune_rear_mastery: STAT_MAX = 0\n s_fortune_berserk_mastery: STAT_MAX = 0\n s_fortune_healing_mastery: STAT_MAX = 0\n s_fortune_rear_res: UP_TO_20 = 0\n s_fortune_crit_res: UP_TO_20 = 0\n s_major_ap: ZERO_OR_ONE = 0\n s_major_mp: ZERO_OR_ONE = 0\n s_major_ra: ZERO_OR_ONE = 0\n s_major_wp: ZERO_OR_ONE = 0\n s_major_control: ZERO_OR_ONE = 0\n s_major_damage: ZERO_OR_ONE = 0\n s_major_res: ZERO_OR_ONE = 0\n item_1: Item | list[object] = field(default_factory=list)\n item_2: Item | list[object] = field(default_factory=list)\n item_3: Item | list[object] = field(default_factory=list)\n item_4: Item | list[object] = field(default_factory=list)\n item_5: Item | list[object] = field(default_factory=list)\n item_6: Item | list[object] = field(default_factory=list)\n item_7: Item | list[object] = field(default_factory=list)\n item_8: Item | list[object] = field(default_factory=list)\n item_9: Item | list[object] = field(default_factory=list)\n item_10: Item | list[object] = field(default_factory=list)\n item_11: Item | list[object] = field(default_factory=list)\n item_12: Item | list[object] = field(default_factory=list)\n item_13: Item | list[object] = field(default_factory=list)\n item_14: Item | list[object] = field(default_factory=list)\n active_1: int = -1\n active_2: int = -1\n active_3: int = -1\n active_4: int = -1\n active_5: int = -1\n active_6: int = -1\n active_7: int = -1\n active_8: int = -1\n active_9: int = -1\n active_10: int = -1\n active_11: int = -1\n active_12: int = -1\n passive_1: int = -1\n passive_2: int = -1\n passive_3: int = -1\n passive_4: int = -1\n passive_5: int = -1\n passive_6: int = -1\n epic_sublimation_id: int = -1\n relic_sublimation_id: int = -1\n\n @classmethod\n def from_code(cls, code: str) -> Buildv1:\n # wakforge sending empty arrays...\n s = msgpack.decode(zlib.decompress(b2048.decode(code), wbits=-15))\n s[1] = WFClasses(s[1])\n items = s[32:46]\n for idx, item in enumerate(items, 32):\n if not item:\n s[idx] = Item()\n else:\n item_id, elements, runes, subs = item\n if item_id == -1:\n s[idx] = Item()\n continue\n runes = [Rune() for r in runes if r]\n s[idx] = Item(item_id, WFElements(elements), runes, subs)\n\n return cls(*s)\n\n def get_allocated_stats(self) -> AllocatedStats:\n tup = astuple(self)\n return AllocatedStats(*tup[3:32])\n\n def clear_items(self) -> None:\n empty = Item()\n for idx in range(1, 15):\n setattr(self, f\"item_{idx}\", empty)\n\n def get_items(self) -> list[Item]:\n \"\"\"\n Wakforge attaches 2 sublimations to an item matching how\n the game does it instead of the idealized structure,\n converstion to an idealized build requires knowing which sublimations\n are relic and epic sublimations, and isn't important right now.\n \"\"\"\n items = astuple(self)[32:46]\n # wakforge sends fake items rather than not sending them, a subarray for items would be lovely...\n return [i for i in items if isinstance(i, Item) and i]\n\n def add_elements_to_item(self, item_id: int, elements: WFElements) -> None:\n for idx in range(1, 15):\n item: Item | None = getattr(self, f\"item_{idx}\", None)\n if item and item.item_id == item_id:\n item.assignable_elements = elements\n setattr(self, f\"item_{idx}\", item)\n break\n\n def add_item(self, item: EquipableItem, elements: WFElements = WFElements.empty, /) -> None:\n indices = compress(count(1), map(partial(eq, item.item_slot), v1BuildSlotsOrder))\n for index in indices:\n if not getattr(self, f\"item_{index}\", None):\n setattr(self, f\"item_{index}\", Item(item_id=item.item_id, assignable_elements=elements))\n break\n else:\n msg = f\"Can't find a valid slot for this thing. {item}\"\n raise RuntimeError(msg)\n\n def to_code(self) -> str:\n packed = msgpack.encode(self)\n compressor = zlib.compressobj(level=9, wbits=-15)\n return b2048.encode(compressor.compress(packed) + compressor.flush())"
}
] | import traceback
import zlib
from collections.abc import Callable
from typing import Literal
from msgspec import Struct, field, msgpack
from msgspec.structs import asdict
from .b2048 import encode as b2048encode
from .object_parsing import load_item_source_data
from .restructured_types import DUMMY_MAX, DUMMY_MIN, ClassElements, ElementsEnum, Priority, StatPriority, Stats
from .restructured_types import SetMaximums as RealSetMaxs
from .restructured_types import SetMinimums as RealSetMins
from .solver import ImpossibleStatError, SolveError, solve, v1Config
from .wakforge_buildcodes import Buildv1 as WFBuild | 7,674 | ignore_existing_items: bool = False
forbidden_sources: list[Literal["arch", "horde", "pvp", "ultimate_boss"]] = field(default_factory=list)
stats_maxs: SetMaximums = field(default_factory=SetMaximums)
class v2Result(Struct):
build_code: str | None = None
error_code: str | None = None
item_ids: list[int] = field(default_factory=list)
debug_info: str | None = None
def compressed_encode(obj: object) -> str:
compressor = zlib.compressobj(level=9, wbits=-15)
packed = msgpack.encode(obj)
return b2048encode(compressor.compress(packed) + compressor.flush())
def partial_solve_v2(
*,
build_code: str,
config: v2Config,
progress_callback: Callable[[int, int], None] | None = None,
) -> v2Result:
# pyodide proxies aren't actually lists...
config.allowed_rarities = [i for i in config.allowed_rarities if i]
config.forbidden_items = [i for i in config.forbidden_items if i]
config.forbidden_sources = [s for s in config.forbidden_sources if s]
# This may look redundant, but it's exceptionally cheap validation
try:
config = msgpack.decode(msgpack.encode(config), type=v2Config)
except Exception as exc: # noqa: BLE001
msg = traceback.format_exception(exc)
return v2Result(None, "Invalid config (get debug info if opening an issue)", debug_info=compressed_encode(msg))
target_stats = config.target_stats.to_real()
item_sources = load_item_source_data()
forbidden_ids: set[int] = set()
for source in config.forbidden_sources:
forbidden_ids |= getattr(item_sources, source)
forbidden_ids -= item_sources.non_finite_arch_horde
config.forbidden_items.extend(forbidden_ids)
if not config.objectives.is_valid:
msg = ("objectives", config.objectives)
return v2Result(None, "Invalid config (get debug info if opening an issue)", debug_info=compressed_encode(msg))
build = WFBuild.from_code(build_code)
if config.ignore_existing_items:
build.clear_items()
stats = build.get_allocated_stats().to_stat_values(build.classenum)
item_ids = [i.item_id for i in build.get_items() if i.item_id > 0]
ap = target_stats.ap - stats.ap
mp = target_stats.mp - stats.mp
wp = target_stats.wp - stats.wp
ra = target_stats.ra - stats.ra
forbidden_rarities = [i for i in range(1, 8) if i not in config.allowed_rarities]
# TODO: modify internals to not need this level of wrapping
lookup: dict[Priority, Literal["full", "half", "none"]] = {
Priority.full_negative_only: "full",
Priority.half_negative_only: "half",
}
cfg = v1Config(
lv=build.level,
wakfu_class=build.classenum,
ap=ap,
mp=mp,
wp=wp,
ra=ra,
stat_minimums=target_stats,
stat_maximums=config.stats_maxs.to_real(),
base_stats=stats,
baseap=stats.ap,
basemp=stats.mp,
basera=stats.ra,
bawewp=stats.wp,
bcrit=stats.critical_hit,
bcmast=stats.critical_mastery,
bmast=stats.elemental_mastery,
num_mastery=config.objectives.elements.bit_count(),
forbid_rarity=forbidden_rarities,
idforce=item_ids,
dist=config.objectives.distance_mastery == Priority.prioritized,
melee=config.objectives.melee_mastery == Priority.prioritized,
heal=config.objectives.heal_mastery == Priority.prioritized,
zerk=config.objectives.berserk_mastery == Priority.prioritized,
rear=config.objectives.rear_mastery == Priority.prioritized,
negrear=lookup.get(config.objectives.rear_mastery, "none"),
negzerk=lookup.get(config.objectives.berserk_mastery, "none"),
dry_run=config.dry_run,
hard_cap_depth=35,
tolerance=_adaptive_tolerance_map.get(build.level, 14),
search_depth=1,
elements=config.objectives.elements,
)
try:
result = solve(cfg, progress_callback=progress_callback)
best = result[0]
except ImpossibleStatError as exc:
return v2Result(None, exc.args[0], debug_info=None)
except (IndexError, SolveError):
return v2Result(None, "No possible solution found", debug_info=None)
except Exception as exc: # noqa: BLE001
msg = traceback.format_exception(exc)
return v2Result(None, "Unknown error, see debug info", debug_info=compressed_encode(msg))
score, found_items = best
found_item_ids = [i.item_id for i in found_items]
if config.dry_run:
return v2Result(None, None, found_item_ids, None)
ecount = config.objectives.elements.bit_count()
| """
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (C) 2023 Michael Hall <https://github.com/mikeshardmind>
"""
from __future__ import annotations
ClassNames = Literal[
"Feca",
"Osa",
"Enu",
"Sram",
"Xel",
"Eca",
"Eni",
"Iop",
"Cra",
"Sadi",
"Sac",
"Panda",
"Rogue",
"Masq",
"Ougi",
"Fog",
"Elio",
"Hupper",
]
_adaptive_tolerance_map: dict[int, int] = {
20: 20,
35: 35,
50: 50,
65: 30,
80: 30,
95: 30,
110: 30,
125: 15,
140: 15,
155: 15,
170: 15,
185: 15,
200: 14,
215: 15,
230: 14,
}
v1Result = tuple[list[int] | None, str | None]
# Exists because versioning
class SetMinimums(Struct, frozen=True, gc=True):
ap: int = DUMMY_MIN
mp: int = DUMMY_MIN
wp: int = DUMMY_MIN
ra: int = DUMMY_MIN
crit: int = DUMMY_MIN
crit_mastery: int = DUMMY_MIN
elemental_mastery: int = DUMMY_MIN
one_element_mastery: int = DUMMY_MIN
two_element_mastery: int = DUMMY_MIN
three_element_mastery: int = DUMMY_MIN
distance_mastery: int = DUMMY_MIN
rear_mastery: int = DUMMY_MIN
heal_mastery: int = DUMMY_MIN
beserk_mastery: int = DUMMY_MIN
melee_mastery: int = DUMMY_MIN
control: int = DUMMY_MIN
block: int = DUMMY_MIN
fd: int = DUMMY_MIN
heals_performed: int = DUMMY_MIN
lock: int = DUMMY_MIN
dodge: int = DUMMY_MIN
armor_given: int = DUMMY_MIN
def to_real(self) -> RealSetMins:
data = asdict(self)
for new, old in (
("critical_hit", "crit"),
("critical_mastery", "crit_mastery"),
("mastery_3_elements", "three_element_mastery"),
("mastery_2_elements", "two_element_mastery"),
("mastery_1_element", "one_element_mastery"),
("healing_mastery", "heal_mastery"),
("berserk_mastery", "beserk_mastery"),
):
data[new] = data.pop(old)
return RealSetMins(**data)
class SetMaximums(Struct, frozen=True, gc=True):
ap: int = DUMMY_MAX
mp: int = DUMMY_MAX
wp: int = DUMMY_MAX
ra: int = DUMMY_MAX
crit: int = DUMMY_MAX
crit_mastery: int = DUMMY_MAX
elemental_mastery: int = DUMMY_MAX
one_element_mastery: int = DUMMY_MAX
two_element_mastery: int = DUMMY_MAX
three_element_mastery: int = DUMMY_MAX
distance_mastery: int = DUMMY_MAX
rear_mastery: int = DUMMY_MAX
heal_mastery: int = DUMMY_MAX
beserk_mastery: int = DUMMY_MAX
melee_mastery: int = DUMMY_MAX
control: int = DUMMY_MAX
block: int = DUMMY_MAX
fd: int = DUMMY_MAX
heals_performed: int = DUMMY_MAX
lock: int = DUMMY_MAX
dodge: int = DUMMY_MAX
armor_given: int = DUMMY_MAX
def to_real(self) -> RealSetMaxs:
data = asdict(self)
for new, old in (
("critical_hit", "crit"),
("critical_mastery", "crit_mastery"),
("mastery_3_elements", "three_element_mastery"),
("mastery_2_elements", "two_element_mastery"),
("mastery_1_element", "one_element_mastery"),
("healing_mastery", "heal_mastery"),
("berserk_mastery", "beserk_mastery"),
):
data[new] = data.pop(old)
return RealSetMaxs(**data)
def partial_solve_v1(
*,
lv: int,
stats: Stats,
target_stats: RealSetMins,
equipped_items: list[int],
num_mastery: int,
allowed_rarities: list[int],
dist: bool = False,
melee: bool = False,
heal: bool = False,
zerk: bool = False,
rear: bool = False,
dry_run: bool = False,
) -> v1Result:
"""
Doesn't handle sublimations, passives, etc yet
Use from pyodide:
// passing in other values besides the below
// may cause problems with solve quality for v1
let targets = SetMinimum.callKwargs({ap: 12, mp: 6, ra: 2, wp: 0});
// for the full list of supported Stats, see Stats class
let stats = Stats.callKwargs({ap: 7, mp: 4, ...});
let [result, error] = partial_solve_v1.callKwargs(
{
stats: stats,
target_stats: targets,
}
)
"""
ap = target_stats.ap - stats.ap
mp = target_stats.mp - stats.mp
ra = target_stats.ra - stats.ra
wp = target_stats.wp - stats.wp
forbidden_rarities = [i for i in range(1, 8) if i not in allowed_rarities]
equipped = [i for i in equipped_items if i] if equipped_items else []
cfg = v1Config(
lv=lv,
ap=ap,
mp=mp,
wp=wp,
ra=ra,
baseap=stats.ap,
basemp=stats.mp,
basera=stats.ra,
bawewp=stats.wp,
bcrit=stats.critical_hit - 3, # wakforge is doing something wrong here, won't be fixes for this entrypoint
bcmast=stats.critical_mastery,
bmast=stats.elemental_mastery,
num_mastery=num_mastery,
forbid_rarity=forbidden_rarities,
idforce=equipped,
dist=dist,
melee=melee,
heal=heal,
zerk=zerk,
rear=rear,
dry_run=dry_run,
hard_cap_depth=15,
tolerance=_adaptive_tolerance_map.get(lv, 14),
search_depth=1 if dry_run else 1,
)
try:
result = solve(cfg)
best = result[0]
except (IndexError, SolveError):
return (None, "No possible solution found")
_score, items = best
item_ids = [i.item_id for i in items]
return (item_ids, None)
class v2Config(Struct):
allowed_rarities: list[int] = field(default_factory=lambda: [1, 2, 3, 4, 5, 6, 7])
target_stats: SetMinimums = field(default_factory=SetMinimums)
dry_run: bool = False
objectives: StatPriority = field(default_factory=StatPriority)
forbidden_items: list[int] = field(default_factory=list)
ignore_existing_items: bool = False
forbidden_sources: list[Literal["arch", "horde", "pvp", "ultimate_boss"]] = field(default_factory=list)
stats_maxs: SetMaximums = field(default_factory=SetMaximums)
class v2Result(Struct):
build_code: str | None = None
error_code: str | None = None
item_ids: list[int] = field(default_factory=list)
debug_info: str | None = None
def compressed_encode(obj: object) -> str:
compressor = zlib.compressobj(level=9, wbits=-15)
packed = msgpack.encode(obj)
return b2048encode(compressor.compress(packed) + compressor.flush())
def partial_solve_v2(
*,
build_code: str,
config: v2Config,
progress_callback: Callable[[int, int], None] | None = None,
) -> v2Result:
# pyodide proxies aren't actually lists...
config.allowed_rarities = [i for i in config.allowed_rarities if i]
config.forbidden_items = [i for i in config.forbidden_items if i]
config.forbidden_sources = [s for s in config.forbidden_sources if s]
# This may look redundant, but it's exceptionally cheap validation
try:
config = msgpack.decode(msgpack.encode(config), type=v2Config)
except Exception as exc: # noqa: BLE001
msg = traceback.format_exception(exc)
return v2Result(None, "Invalid config (get debug info if opening an issue)", debug_info=compressed_encode(msg))
target_stats = config.target_stats.to_real()
item_sources = load_item_source_data()
forbidden_ids: set[int] = set()
for source in config.forbidden_sources:
forbidden_ids |= getattr(item_sources, source)
forbidden_ids -= item_sources.non_finite_arch_horde
config.forbidden_items.extend(forbidden_ids)
if not config.objectives.is_valid:
msg = ("objectives", config.objectives)
return v2Result(None, "Invalid config (get debug info if opening an issue)", debug_info=compressed_encode(msg))
build = WFBuild.from_code(build_code)
if config.ignore_existing_items:
build.clear_items()
stats = build.get_allocated_stats().to_stat_values(build.classenum)
item_ids = [i.item_id for i in build.get_items() if i.item_id > 0]
ap = target_stats.ap - stats.ap
mp = target_stats.mp - stats.mp
wp = target_stats.wp - stats.wp
ra = target_stats.ra - stats.ra
forbidden_rarities = [i for i in range(1, 8) if i not in config.allowed_rarities]
# TODO: modify internals to not need this level of wrapping
lookup: dict[Priority, Literal["full", "half", "none"]] = {
Priority.full_negative_only: "full",
Priority.half_negative_only: "half",
}
cfg = v1Config(
lv=build.level,
wakfu_class=build.classenum,
ap=ap,
mp=mp,
wp=wp,
ra=ra,
stat_minimums=target_stats,
stat_maximums=config.stats_maxs.to_real(),
base_stats=stats,
baseap=stats.ap,
basemp=stats.mp,
basera=stats.ra,
bawewp=stats.wp,
bcrit=stats.critical_hit,
bcmast=stats.critical_mastery,
bmast=stats.elemental_mastery,
num_mastery=config.objectives.elements.bit_count(),
forbid_rarity=forbidden_rarities,
idforce=item_ids,
dist=config.objectives.distance_mastery == Priority.prioritized,
melee=config.objectives.melee_mastery == Priority.prioritized,
heal=config.objectives.heal_mastery == Priority.prioritized,
zerk=config.objectives.berserk_mastery == Priority.prioritized,
rear=config.objectives.rear_mastery == Priority.prioritized,
negrear=lookup.get(config.objectives.rear_mastery, "none"),
negzerk=lookup.get(config.objectives.berserk_mastery, "none"),
dry_run=config.dry_run,
hard_cap_depth=35,
tolerance=_adaptive_tolerance_map.get(build.level, 14),
search_depth=1,
elements=config.objectives.elements,
)
try:
result = solve(cfg, progress_callback=progress_callback)
best = result[0]
except ImpossibleStatError as exc:
return v2Result(None, exc.args[0], debug_info=None)
except (IndexError, SolveError):
return v2Result(None, "No possible solution found", debug_info=None)
except Exception as exc: # noqa: BLE001
msg = traceback.format_exception(exc)
return v2Result(None, "Unknown error, see debug info", debug_info=compressed_encode(msg))
score, found_items = best
found_item_ids = [i.item_id for i in found_items]
if config.dry_run:
return v2Result(None, None, found_item_ids, None)
ecount = config.objectives.elements.bit_count() | remaining_elements = [e for e in ElementsEnum if e not in config.objectives.elements] | 2 | 2023-10-10 21:54:23+00:00 | 12k |
bittranslateio/bittranslate | neurons/validator.py | [
{
"identifier": "Validator",
"path": "bittranslate/validator.py",
"snippet": "class Validator:\n def __init__(self, device: str = \"cpu\", out_dir: str= \"bittranslate_out/\" ):\n self._reward_models = [BertScore(device=device), VectorSim(device=device)]\n\n self._reward_weights = [0.5, 0.5]\n self._mgpt_pipeline = pipeline(\"text-generation\", \"ai-forever/mGPT\", device=device)\n\n self._wenzhong_gpt2_pipeline = pipeline(\"text-generation\", \"IDEA-CCNL/Wenzhong-GPT2-110M\", device=device)\n\n self._langs = [\"ar\", \"bg\", \"de\", \"el\", \"en\",\n \"es\", \"et\", \"fa\", \"fi\", \"fr\", \"hi\", \"hu\", \"it\", \"ko\", \"pl\", \"pt\",\n \"ro\", \"ru\", \"sv\", \"th\", \"tr\", \"uk\", \"vi\",\n \"zh\"]\n\n self._wenzhong_gpt2_langs = [\"zh\"]\n self._mgpt_langs = [lang for lang in self._langs if lang not in self._wenzhong_gpt2_langs]\n\n self._lang_pairs = list(permutations(self._langs, 2))\n\n self._lang_probs = {\n \"en\": 0.4,\n \"pl\": 0.1\n }\n\n self.tracker = ValidatorTracker(self._lang_pairs, TRACKER_HISTORY_COUNT)\n\n self.out_dir = out_dir\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n exams = Exams()\n german_quad = GermanQuAD()\n peer_sum = PeerSum()\n xquad = XQuAD()\n mkqa = MKqa()\n bittranslate_dataset = BitTranslateDataset()\n\n self._datasets = {\n \"ar\": [xquad],\n \"bg\": [exams],\n \"de\": [german_quad, xquad],\n \"el\": [xquad],\n \"en\": [peer_sum, xquad],\n \"es\": [xquad],\n \"et\": [bittranslate_dataset],\n \"fa\": [bittranslate_dataset],\n \"fi\": [bittranslate_dataset],\n \"fr\": [mkqa, bittranslate_dataset],\n \"hi\": [xquad],\n \"hu\": [exams],\n \"it\": [exams],\n \"ko\": [bittranslate_dataset],\n \"pl\": [exams],\n \"pt\": [exams],\n \"ro\": [xquad],\n \"ru\": [xquad],\n \"sv\": [bittranslate_dataset],\n \"th\": [xquad],\n \"tr\": [exams, xquad],\n \"uk\": [bittranslate_dataset],\n \"vi\": [exams, xquad],\n \"zh\": [xquad]}\n\n def score(self, sources: List[str], translations: List[List[str]], source_lang: str, target_lang: str):\n len_sources = len(sources)\n miners_count = len(translations[0])\n all_scores = [0]*miners_count\n overall_top_max_score = 0\n overall_top_max_source = \"\"\n overall_top_max_target = \"\"\n overall_top_min_score = 1.1\n overall_top_min_source = \"\"\n overall_top_min_target = \"\"\n\n top_translations = []\n top_scores = []\n\n for s, t in zip(sources, translations):\n # s: single source text\n # t: a list of translation where index contains a translation from a given miner.\n # l: target language\n\n scores = self.single_score(s, t, target_lang)\n all_scores = [a + b for a, b in zip(all_scores, scores)]\n\n max_score = max(scores)\n min_score = min(scores)\n max_score_index = scores.index(max_score)\n min_score_index = scores.index(min_score)\n max_score_value = t[max_score_index]\n top_translations.append(max_score_value)\n top_scores.append(max_score)\n if max_score > overall_top_max_score:\n overall_top_max_score = max_score\n overall_top_max_source = s\n overall_top_max_target = max_score_value\n\n min_score_value = t[min_score_index]\n if min_score < overall_top_min_score:\n overall_top_min_score = min_score\n overall_top_min_source = s\n overall_top_min_target = min_score_value\n\n final_scores = [score/len_sources for score in all_scores]\n\n # Track scores\n try: # nonessential code:\n self.tracker.track_scores(source_lang, target_lang, final_scores)\n except Exception as e:\n print(f\"Error (non-essential code): tracker.log_scores()\", file=sys.stderr)\n print(e, file=sys.stderr)\n\n # Track texts\n try: # nonessential code:\n self.tracker.track_texts(source_lang, target_lang,\n overall_top_min_source,\n overall_top_min_target,\n overall_top_min_score,\n overall_top_max_source,\n overall_top_max_target,\n overall_top_max_score)\n except Exception as e:\n print(f\"Error (non-essential code): tracker.track_texts()\", file=sys.stderr)\n print(e, file=sys.stderr)\n\n return final_scores, top_translations, top_scores\n\n def single_score(self, source: str, translations: List[str], target_lang: str) -> List[float]:\n\n lang_filter = self._filter_lang(translations, target_lang)\n\n reward_scores = [0.0] * len(translations)\n for i, reward_model in enumerate(self._reward_models):\n # Produce scores with a Reward Model\n scores = reward_model.score(source, translations)\n\n # Sigmoid normalization\n norm_scores = self._sigmoid_normalize(scores)\n\n # Get the weight for the Reward Model\n weight = self._reward_weights[i]\n\n # Multiply each score based on its weight\n weighted_scores = [float(score * weight) for score in norm_scores]\n\n # Add the resulting weighted scores to the total reward_scores list\n reward_scores = [\n current_score + new_score\n for current_score, new_score in zip(reward_scores, weighted_scores)\n ]\n\n result = [a * b for a, b in zip(lang_filter, reward_scores)]\n\n return result\n\n def _sigmoid_normalize(self, scores: List[float]) -> List[float]:\n np_scores = np.array(scores)\n norm_scores = 1 / (1 + np.exp(-np_scores))\n\n return norm_scores.tolist()\n\n def _get_source_dataset(self) -> (PromptDataset, str, str):\n\n source_lang, target_lang = self._select_lang_pair()\n\n source_datasets = self._datasets[source_lang]\n\n random_dataset_index = random.randint(0, len(source_datasets) - 1)\n source_dataset = source_datasets[random_dataset_index]\n\n return source_dataset, source_lang, target_lang\n\n\n def generate_cases(self, count: int=2) -> (str, str, List[str]):\n good_sources = []\n bad_sources = []\n max_iter = count + 4\n curr_iter = 0\n\n source_dataset, source_lang, target_lang = self._get_source_dataset()\n\n while len(good_sources) < count and curr_iter < max_iter:\n curr_iter += 1\n starting_case = source_dataset.sample_case(source_lang)\n prompt = self._generate_prompt(starting_case, lang=target_lang)\n if self._is_gibberish(prompt, source_lang):\n bad_sources.append(prompt)\n else:\n good_sources.append(prompt)\n sources = good_sources if len(good_sources) > count else [*good_sources, *bad_sources][:count]\n return source_lang, target_lang, sources\n\n def _generate_prompt(self, text: str, lang: str = \"en\") -> str:\n\n if lang in self._wenzhong_gpt2_langs:\n current_token_length = len(self._wenzhong_gpt2_pipeline.tokenizer.encode(text))\n return self._wenzhong_gpt2_pipeline(\n text,\n return_full_text=False,\n no_repeat_ngram_size=3,\n do_sample=True,\n top_k=10,\n temperature=1,\n min_length=32 + current_token_length,\n max_length=64 + current_token_length,\n )[0][\"generated_text\"]\n elif lang in self._mgpt_langs:\n current_token_length = len(self._mgpt_pipeline.tokenizer.encode(text))\n return self._mgpt_pipeline(\n text,\n return_full_text=False,\n no_repeat_ngram_size=3,\n do_sample=True,\n top_k=10,\n temperature=1,\n min_length=32 + current_token_length,\n max_length=64 + current_token_length,\n )[0][\"generated_text\"]\n else:\n print(\"error, language not supported\")\n def _filter_lang(self, translations, target_lang):\n # Lang detection filter\n lang_filter = []\n\n for translation in translations:\n try:\n pred = detect(translation)\n\n except Exception as e:\n lang_filter.append(0)\n print(f\"Language detection exception. Error {str(e)}. Translation: {translation}\", file=sys.stderr)\n continue\n if pred == target_lang:\n lang_filter.append(1)\n elif pred[0:2] == \"zh\" and target_lang == \"zh\":\n lang_filter.append(1)\n else:\n lang_filter.append(0)\n\n return lang_filter\n\n def save_tracked_results(self):\n out_scores_path = self.out_dir + \"scores.json\"\n self.tracker.scores_to_json(out_scores_path)\n out_texts_path = self.out_dir + \"texts.json\"\n self.tracker.texts_to_json(out_texts_path)\n\n def _select_lang_pair(self):\n remaining_prob = 1 - sum(self._lang_probs.get(lang, 0) for lang in self._langs)\n langs_wo_prob = [lang for lang in self._langs if lang not in self._lang_probs]\n prob_per_lang = remaining_prob / len(langs_wo_prob)\n probs = {**{lang: prob_per_lang for lang in langs_wo_prob}, **self._lang_probs}\n \n source_lang = np.random.choice(\n self._langs, p=[probs.get(lang) for lang in self._langs]\n ).item()\n target_lang = np.random.choice(\n [lang for lang in self._langs if lang != source_lang]\n ).item()\n return source_lang, target_lang\n \n def _is_gibberish(self, text: str, lang: str) -> bool:\n \"\"\"\n Filter out gibberish text based on a list of patterns and a cutoff.\n\n Args:\n text (str): text(prompt) to be filtered\n patterns (List[str]): list of regex patterns to be searched for\n cutoff (float): cutoff for the sum of ratios of pattern matches to text length\n \"\"\"\n cutoff = 0.2\n\n chinese_pattern = r'[\\u4e00-\\u9fff]+'\n emoji_pattern = r'[\\U0001F600-\\U0001F64F\\U00002700-\\U000027BF\\U0001F680-\\U0001F6FF\\U00002600-\\U000026FF\\U0001F900-\\U0001F9FF]'\n invalid_pattern = r'[\\uE000-\\uF8FF]'\n patterns = [emoji_pattern, invalid_pattern]\n if lang != \"zh\":\n patterns.append(chinese_pattern)\n \n pattern_results = []\n for pattern in patterns:\n chars = \"\".join(re.findall(pattern, text))\n ratio = round(len(chars)/len(text), 2)\n pattern_results.append(ratio)\n \n if sum(pattern_results) > cutoff:\n return True\n return False"
},
{
"identifier": "log_elapsed_time",
"path": "bittranslate/logging.py",
"snippet": "@contextmanager\ndef log_elapsed_time(name: str) -> Iterator[BoxedTime]:\n boxed_time = BoxedTime()\n try:\n with Timer() as timer:\n yield boxed_time\n finally:\n bittensor.logging.info(\n f\"Elapsed time ({name}) = \"\n f\"{timer.elapsed_seconds():.3f} seconds\"\n )\n boxed_time.time = timer.elapsed_seconds()"
},
{
"identifier": "check_for_updates",
"path": "neurons/auto_update.py",
"snippet": "def check_for_updates(no_restart):\n try:\n bt.logging.info(\"Checking for updates...\")\n response = requests.get(\n PATH_TO_REPO\n )\n response.raise_for_status()\n try:\n # load version from VERSION file\n with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"VERSION\")) as f:\n __version__ = f.read().strip()\n # convert to list of ints\n __version__ = [int(v) for v in __version__.split(\".\")]\n latest_version = response.text.strip()\n latest_version = [int(v) for v in latest_version.split(\".\")]\n bt.logging.info(f\"Current version: {__version__}\")\n bt.logging.info(f\"Latest version: {latest_version}\")\n if latest_version > __version__:\n bt.logging.info(\"A newer version of BitTranslate is available. Downloading...\")\n # download latest version with git pull\n os.system(\"git pull\")\n # checking local VERSION\n with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"VERSION\")) as f:\n new__version__ = f.read().strip()\n # convert to list of ints\n new__version__ = [int(v) for v in new__version__.split(\".\")]\n if new__version__ == latest_version and new__version__ > __version__:\n try:\n os.system(\"pip install -e .\")\n except Exception as e:\n bt.logging.error(\"Failed to run 'pip install -e . '\".format(e))\n\n if not no_restart:\n bt.logging.info(\"BitTranslate updated successfully. Restarting...\")\n bt.logging.info(f\"Running: {sys.executable} {sys.argv}\")\n try:\n # add an argument to the end of the command to prevent infinite loop\n os.execv(sys.executable, [sys.executable] + sys.argv + [\"--no-restart\"])\n except Exception as e:\n bt.logging.error(\"Error restarting process'\".format(e))\n else:\n bt.logging.info(\"BitTranslate has been updated successfully. Restart to apply changes.\")\n else:\n bt.logging.error(\"BitTranslate git pull failed you will need to manually update and restart for latest code.\")\n except Exception as e:\n bt.logging.error(\"Failed to convert response to json: {}\".format(e))\n bt.logging.info(\"Response: {}\".format(response.text))\n except Exception as e:\n bt.logging.error(\"Failed to check for updates: {}\".format(e))"
},
{
"identifier": "Translate",
"path": "neurons/protocol.py",
"snippet": "class Translate(bt.Synapse):\n source_texts: List[str] = pydantic.Field(..., allow_mutation=False)\n translated_texts: List[str] = []\n source_lang: str = pydantic.Field(..., allow_mutation=False)\n target_lang: str = pydantic.Field(..., allow_mutation=False)\n required_hash_fields: list[str] = pydantic.Field( [\"source_texts\", \"source_lang\", \"target_lang\"], allow_mutation = False)"
},
{
"identifier": "ApiServer",
"path": "neurons/api_server.py",
"snippet": "class ApiServer:\n app: FastAPI\n fast_server: FastAPIThreadedServer\n router: APIRouter\n forward_fn: ForwardFn\n tunnel: Optional[ngrok.NgrokTunnel]\n ngrok_domain: Optional[str]\n\n def __init__(\n self, \n axon_port: int,\n forward_fn: ForwardFn,\n api_json: str,\n lang_pairs: list,\n max_char: int,\n ngrok_domain: Optional[str]\n ):\n\n self.forward_fn = forward_fn\n self.app = FastAPI()\n self.app.middleware('http')(auth_rate_limiting_middleware)\n\n self.fast_server = FastAPIThreadedServer(config=uvicorn.Config(\n self.app,\n host=\"0.0.0.0\",\n port=axon_port,\n log_level=\"trace\" if bt.logging.__trace_on__ else \"critical\"\n ))\n self.router = APIRouter()\n self.router.add_api_route(\n \"/translate\",\n self.translate,\n methods=[\"POST\"],\n )\n self.app.include_router(self.router)\n\n self.api_json = api_json\n\n self.lang_pairs = lang_pairs\n\n self.max_char = max_char\n\n self.ngrok_domain = ngrok_domain\n self.tunnel = None\n\n async def translate(self, request: Translate):\n\n if (request.source_lang, request.target_lang) in self.lang_pairs:\n source_lang = request.source_lang\n bt.logging.trace(\n f\"Detected in lang_pairs \"\n )\n elif request.source_lang == \"auto\":\n source_lang, warning = self._detect_lang(request.source_texts, request.target_lang)\n if not warning:\n bt.logging.trace(\n f\"Source lang: classified as {source_lang}\"\n )\n else:\n bt.logging.trace(\n f\"Source lang: {warning}. Classified as {source_lang}\"\n )\n else:\n return JSONResponse(\n status_code=400,\n content={\n \"detail\": \"Invalid source_lang. Please provide a language code or set it to /'auto'\",\n \"translated_texts\": []\n })\n\n # Recreate the synapse with the source_lang.\n request = Translate(\n source_lang=source_lang,\n target_lang=request.target_lang,\n source_texts=request.source_texts,\n translated_texts=[],\n )\n\n request_lang_pair = (source_lang, request.target_lang)\n\n if request_lang_pair not in self.lang_pairs:\n return JSONResponse(\n status_code=400, \n content={\n \"detail\": \"Invalid language pair\", \n \"translated_texts\": []\n }\n )\n\n for source_text in request.source_texts:\n if len(source_text) > self.max_char :\n return JSONResponse(\n status_code=400, \n content={\n \"detail\": (\n \"Source text is too long. \"\n f\"Must be under {self.max_char} characters\"\n ), \n \"translated_texts\": []\n }\n )\n\n for translated_text in request.translated_texts:\n # also check the length of the translated text for good measure.\n if len(translated_text) > self.max_char:\n return JSONResponse(\n status_code=400, \n content={\n \"detail\": (\n \"Translated text is too long. \"\n f\"Must be under {self.max_char} characters\"\n ), \n \"translated_texts\": []\n }\n )\n\n if len(request.source_texts) > 2:\n return JSONResponse(\n status_code=400,\n content={\n \"detail\": (\n \"Batch size for source texts is too large. \"\n \"Please set it to <= 2\"\n ), \n \"translated_texts\": []\n }\n )\n\n response = await self.forward_fn(request)\n bt.logging.debug(f\"API: response.translated_texts {response.translated_texts}\")\n return JSONResponse(status_code=200,\n content={\"detail\": \"success\", \"translated_texts\": response.translated_texts})\n\n def start(self):\n self.fast_server.start()\n\n if self.ngrok_domain is not None:\n self.tunnel = connect_ngrok_tunnel(\n local_port=self.fast_server.config.port,\n domain=self.ngrok_domain\n )\n\n def stop(self):\n self.fast_server.stop()\n\n if self.tunnel is not None:\n ngrok.disconnect(\n public_url=self.tunnel.public_url\n )\n self.tunnel = None\n\n def _detect_lang(self, source_texts, target_lang):\n # todo account for all texts within the input rather than just the first\n detect_source = detect(source_texts[0])\n warning = \"\"\n if detect_source[:2] == 'zh':\n lang = 'zh'\n elif (detect_source, target_lang) in self.lang_pairs:\n lang = detect_source\n else:\n # todo: return a warning to the client that we were unable to classify the source text\n lang = 'en'\n warning = \"Could not detect the language for the source text\"\n\n return lang, warning"
}
] | import os
import torch
import argparse
import traceback
import pkg_resources
import bittensor as bt
import random
import copy
import anyio
import anyio.to_thread
import threading
from typing import List, Optional
from dataclasses import dataclass
from queue import SimpleQueue, Empty
from bittranslate import Validator
from bittranslate.logging import log_elapsed_time
from neurons.auto_update import check_for_updates
from neurons.protocol import Translate
from neurons.api_server import ApiServer | 7,215 | median = scores.median()
for uid, hotkey in enumerate(hotkeys):
if hotkey != metagraph.hotkeys[uid]:
scores[uid] = median
bt.logging.debug(f"New hotkey: {uid}. Setting score to {median}")
# Did the most recent metagraph update increase the number of UIDs?
# Occurs during creation of subnet as registrations fill up.
if len(hotkeys) < len(metagraph.hotkeys):
# Create new list of scores with correct length.
new_scores = torch.zeros((metagraph.n))
# Copy scores we do have onto new scores.
min_len = min(len(hotkeys), len(scores))
new_scores[:min_len] = scores[:min_len]
bt.logging.debug(f"UID length increased. Previous scores: {scores}. New scores: {new_scores}")
# Update scores.
scores = new_scores
return scores
@dataclass
class SynapseWithEvent:
""" Object that API server can send to main thread to be serviced. """
input_synapse: Translate
event: threading.Event
output_synapse: Translate
api_queue = SimpleQueue() # Queue of SynapseEventPair
async def forward(synapse: Translate) -> Translate:
""" Forward function for API server. """
synapse_with_event = SynapseWithEvent(
input_synapse=synapse,
event=threading.Event(),
output_synapse=Translate(source_lang="en", target_lang="pl", source_texts=["sample"])
)
api_queue.put(synapse_with_event)
# Wait until the main thread marks this synapse as processed.
await anyio.to_thread.run_sync(synapse_with_event.event.wait)
return synapse_with_event.output_synapse
def main( config ):
# Set up logging with the provided configuration and directory.
bt.logging(config=config, logging_dir=config.full_path)
bt.logging.info(f"Running validator for subnet: {config.netuid} on network: {config.subtensor.chain_endpoint} with config:")
# Log the configuration for reference.
bt.logging.info(config)
# These are core Bittensor classes to interact with the network.
bt.logging.info("Setting up bittensor objects.")
# The wallet holds the cryptographic key pairs for the validator.
wallet = bt.wallet( config = config )
bt.logging.info(f"Wallet: {wallet}")
# The subtensor is our connection to the Bittensor blockchain.
subtensor = bt.subtensor( config = config )
bt.logging.info(f"Subtensor: {subtensor}")
# Dendrite is the RPC client; it lets us send messages to other nodes (axons) in the network.
dendrite = bt.dendrite( wallet = wallet )
bt.logging.info(f"Dendrite: {dendrite}")
# The metagraph holds the state of the network, letting us know about other miners.
metagraph = subtensor.metagraph( config.netuid )
bt.logging.info(f"Metagraph: {metagraph}")
hotkeys: List[str] = copy.deepcopy(metagraph.hotkeys)
if wallet.hotkey.ss58_address not in metagraph.hotkeys:
bt.logging.error(f"\nYour validator: {wallet} if not registered to chain connection: {subtensor} \nRun btcli register and try again.")
exit()
else:
# Each miner gets a unique identity (UID) in the network for differentiation.
my_subnet_uid = metagraph.hotkeys.index(wallet.hotkey.ss58_address)
bt.logging.info(f"Running validator on uid: {my_subnet_uid}")
bt.logging.info("Building validation weights.")
scores = torch.zeros_like(metagraph.S, dtype=torch.float32)
bt.logging.info(f"Weights: {scores}")
alpha = 0.999
## Custom Initialization
bt.logging.info(f"Loading validator components...")
validator = Validator(device=config.device, out_dir=config.out_dir)
bt.logging.info(f"Done validator components.")
if config.enable_api:
# external requests
api_server = ApiServer(
axon_port=config.axon.port,
forward_fn=forward,
api_json=config.api_json,
lang_pairs=validator._lang_pairs,
max_char=config.max_char,
ngrok_domain=config.ngrok_domain
)
api_server.start()
bt.logging.info("Starting validator loop.")
step = 0
while True:
try:
bt.logging.info(f"\n\nStep: {step}")
# We sleep at the top of the loop such that the queue access is more readable.
# This results in one extra delay at the beginning of a validator's startup,
# which is not a significant issue.
| # The MIT License (MIT)
# Copyright © 2023 Yuma Rao
# Copyright © 2023 Opentensor Foundation
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Bittensor Validator Template:
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument('--device', default="cuda", help="The device used for the validator's components.")
# Adds override arguments for network and netuid.
parser.add_argument( '--netuid', type = int, default = 2, help = "The chain subnet uid." )
parser.add_argument(
"--max_char",
type=int,
default=1024,
help="The maximum allowed characters for an incoming request.",
)
parser.add_argument(
"--batch_size",
type=int,
default=2,
help="Number of source texts to send to each miner."
)
parser.add_argument(
"--step_delay",
type=int,
default=12,
help="Number of seconds to sleep between steps."
)
parser.add_argument(
"--miners_per_step",
type=int,
default=8,
help="Number of miners query in each step."
)
parser.add_argument(
"--track_steps",
type=int,
default=100,
help="Number of steps before tracked scores and texts are saved."
)
parser.add_argument(
"--out_dir",
type=str,
default="bittranslate_out/",
help="Output directory for tracked results."
)
parser.add_argument(
"--enable_api",
action="store_true",
help="If set, a callable API will be activated."
)
parser.add_argument(
"--score_api",
action="store_true",
help="If set, responses from API requests will be used to modify scores."
)
parser.add_argument(
"--api_json",
type=str,
default="neurons/api.json",
help="A path to a a config file for the API."
)
parser.add_argument(
"--no_artificial_eval",
action="store_true",
help="If set, artificial data will not be sent to miners for the purpose of scoring. We only recommend setting this to true to when debugging the API."
)
parser.add_argument(
"--ngrok_domain",
help=(
"If set, expose the API over 'ngrok' to the specified domain."
)
)
parser.add_argument(
"--update_steps",
type=int,
default=500,
help=(
"The number of steps until we check if there has been a new version. If 0, no searching will be performed."
)
)
parser.add_argument(
"--no_restart",
help=(
"If set, the process is not restarted when a new version is detected."
)
)
# Adds subtensor specific arguments i.e. --subtensor.chain_endpoint ... --subtensor.network ...
bt.subtensor.add_args(parser)
# Adds logging specific arguments i.e. --logging.debug ..., --logging.trace .. or --logging.logging_dir ...
bt.logging.add_args(parser)
# Adds wallet specific arguments i.e. --wallet.name ..., --wallet.hotkey ./. or --wallet.path ...
bt.wallet.add_args(parser)
bt.axon.add_args(parser)
# Parse the config (will take command-line arguments if provided)
# To print help message, run python3 template/miner.py --help
config = bt.config(parser)
# Logging is crucial for monitoring and debugging purposes.
config.full_path = os.path.expanduser(
"{}/{}/{}/netuid{}/{}".format(
config.logging.logging_dir,
config.wallet.name,
config.wallet.hotkey,
config.netuid,
'validator',
)
)
# Ensure the logging directory exists.
if not os.path.exists(config.full_path): os.makedirs(config.full_path, exist_ok=True)
# Return the parsed config.
return config
def clamp(min: int, max: int, x: int) -> int:
""" Clamp `x` into the range `[min,max]`. """
if x<min:
return min
if x>max:
return max
return x
def translation_for_source_text_in_response(
response: Translate,
source_text_index: int
) -> str:
""" Get the translated text corresponding
to a particular source text on a miner's response. """
if source_text_index >= len(response.translated_texts):
return "BLANK"
response_text = response.translated_texts[source_text_index]
if len(response_text) > config.max_char:
# TODO log
return "BLANK"
if type(response_text) != str:
# TODO log
return "BLANK"
return response_text
def translations_for_source_text(
responses: List[Translate],
source_text_index: int
) -> List[str]:
""" Return a list of translations for a given source text,
from a set of responses.
Each translation corresponds to a different miner. """
return [
translation_for_source_text_in_response(
response=response,
source_text_index=source_text_index
)
for response in responses
]
def build_translations_per_source_text(
responses: List[Translate]
) -> List[List[str]]:
""" Assemble a list of lists, where if viewed as a matrix,
each row corresponds to different miner's responses
to the same source text.
Returns `translations`,
where `translations[source_index][miner_index]=...`
"""
return [
translations_for_source_text(
responses=responses,
source_text_index=source_text_index
)
# It is OK to trust this arbitrary response's "source_texts" field
# because we set `allow_mutation=False` in the protocol.
for source_text_index, _ in enumerate(responses[0].source_texts)
]
# source: https://github.com/opentensor/text-prompting/blob/6c493cbce0c621e28ded203d947ce47a9ae062ea/prompting/validators/utils.py#L102
def update_scores_from_metagraph(
scores: torch.FloatTensor,
metagraph: bt.metagraph,
hotkeys: List[str]
) -> List[float]:
""" Update the per-UID scores based on recent metagraph updates.
Inputs are current scores, recently synced metagraph,
and list of hotkeys from before metagraph sync.
Output is updated scores.
"""
# For any UIDs which have a new hotkey,
# set the score to the median.
median = scores.median()
for uid, hotkey in enumerate(hotkeys):
if hotkey != metagraph.hotkeys[uid]:
scores[uid] = median
bt.logging.debug(f"New hotkey: {uid}. Setting score to {median}")
# Did the most recent metagraph update increase the number of UIDs?
# Occurs during creation of subnet as registrations fill up.
if len(hotkeys) < len(metagraph.hotkeys):
# Create new list of scores with correct length.
new_scores = torch.zeros((metagraph.n))
# Copy scores we do have onto new scores.
min_len = min(len(hotkeys), len(scores))
new_scores[:min_len] = scores[:min_len]
bt.logging.debug(f"UID length increased. Previous scores: {scores}. New scores: {new_scores}")
# Update scores.
scores = new_scores
return scores
@dataclass
class SynapseWithEvent:
""" Object that API server can send to main thread to be serviced. """
input_synapse: Translate
event: threading.Event
output_synapse: Translate
api_queue = SimpleQueue() # Queue of SynapseEventPair
async def forward(synapse: Translate) -> Translate:
""" Forward function for API server. """
synapse_with_event = SynapseWithEvent(
input_synapse=synapse,
event=threading.Event(),
output_synapse=Translate(source_lang="en", target_lang="pl", source_texts=["sample"])
)
api_queue.put(synapse_with_event)
# Wait until the main thread marks this synapse as processed.
await anyio.to_thread.run_sync(synapse_with_event.event.wait)
return synapse_with_event.output_synapse
def main( config ):
# Set up logging with the provided configuration and directory.
bt.logging(config=config, logging_dir=config.full_path)
bt.logging.info(f"Running validator for subnet: {config.netuid} on network: {config.subtensor.chain_endpoint} with config:")
# Log the configuration for reference.
bt.logging.info(config)
# These are core Bittensor classes to interact with the network.
bt.logging.info("Setting up bittensor objects.")
# The wallet holds the cryptographic key pairs for the validator.
wallet = bt.wallet( config = config )
bt.logging.info(f"Wallet: {wallet}")
# The subtensor is our connection to the Bittensor blockchain.
subtensor = bt.subtensor( config = config )
bt.logging.info(f"Subtensor: {subtensor}")
# Dendrite is the RPC client; it lets us send messages to other nodes (axons) in the network.
dendrite = bt.dendrite( wallet = wallet )
bt.logging.info(f"Dendrite: {dendrite}")
# The metagraph holds the state of the network, letting us know about other miners.
metagraph = subtensor.metagraph( config.netuid )
bt.logging.info(f"Metagraph: {metagraph}")
hotkeys: List[str] = copy.deepcopy(metagraph.hotkeys)
if wallet.hotkey.ss58_address not in metagraph.hotkeys:
bt.logging.error(f"\nYour validator: {wallet} if not registered to chain connection: {subtensor} \nRun btcli register and try again.")
exit()
else:
# Each miner gets a unique identity (UID) in the network for differentiation.
my_subnet_uid = metagraph.hotkeys.index(wallet.hotkey.ss58_address)
bt.logging.info(f"Running validator on uid: {my_subnet_uid}")
bt.logging.info("Building validation weights.")
scores = torch.zeros_like(metagraph.S, dtype=torch.float32)
bt.logging.info(f"Weights: {scores}")
alpha = 0.999
## Custom Initialization
bt.logging.info(f"Loading validator components...")
validator = Validator(device=config.device, out_dir=config.out_dir)
bt.logging.info(f"Done validator components.")
if config.enable_api:
# external requests
api_server = ApiServer(
axon_port=config.axon.port,
forward_fn=forward,
api_json=config.api_json,
lang_pairs=validator._lang_pairs,
max_char=config.max_char,
ngrok_domain=config.ngrok_domain
)
api_server.start()
bt.logging.info("Starting validator loop.")
step = 0
while True:
try:
bt.logging.info(f"\n\nStep: {step}")
# We sleep at the top of the loop such that the queue access is more readable.
# This results in one extra delay at the beginning of a validator's startup,
# which is not a significant issue. | with log_elapsed_time("sleeping"): | 1 | 2023-10-09 12:08:05+00:00 | 12k |
grainseed/monitask | sam/segment_anything/automatic_mask_generator.py | [
{
"identifier": "Sam",
"path": "sam/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\r\n mask_threshold: float = 0.0\r\n image_format: str = \"RGB\"\r\n\r\n def __init__(\r\n self,\r\n image_encoder: ImageEncoderViT,\r\n prompt_encoder: PromptEncoder,\r\n mask_decoder: MaskDecoder,\r\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\r\n pixel_std: List[float] = [58.395, 57.12, 57.375],\r\n ) -> None:\r\n \"\"\"\r\n SAM predicts object masks from an image and input prompts.\r\n\r\n Arguments:\r\n image_encoder (ImageEncoderViT): The backbone used to encode the\r\n image into image embeddings that allow for efficient mask prediction.\r\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\r\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\r\n and encoded prompts.\r\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\r\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\r\n \"\"\"\r\n super().__init__()\r\n self.image_encoder = image_encoder\r\n self.prompt_encoder = prompt_encoder\r\n self.mask_decoder = mask_decoder\r\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\r\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\r\n\r\n @property\r\n def device(self) -> Any:\r\n return self.pixel_mean.device\r\n\r\n def forward(\r\n self,\r\n batched_input: List[Dict[str, Any]],\r\n multimask_output: bool,\r\n hq_token_only: bool =False,\r\n ) -> List[Dict[str, torch.Tensor]]:\r\n \"\"\"\r\n Predicts masks end-to-end from provided images and prompts.\r\n If prompts are not known in advance, using SamPredictor is\r\n recommended over calling the model directly.\r\n\r\n Arguments:\r\n batched_input (list(dict)): A list over input images, each a\r\n dictionary with the following keys. A prompt key can be\r\n excluded if it is not present.\r\n 'image': The image as a torch tensor in 3xHxW format,\r\n already transformed for input to the model.\r\n 'original_size': (tuple(int, int)) The original size of\r\n the image before transformation, as (H, W).\r\n 'point_coords': (torch.Tensor) Batched point prompts for\r\n this image, with shape BxNx2. Already transformed to the\r\n input frame of the model.\r\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\r\n with shape BxN.\r\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\r\n Already transformed to the input frame of the model.\r\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\r\n in the form Bx1xHxW.\r\n multimask_output (bool): Whether the model should predict multiple\r\n disambiguating masks, or return a single mask.\r\n\r\n Returns:\r\n (list(dict)): A list over input images, where each element is\r\n as dictionary with the following keys.\r\n 'masks': (torch.Tensor) Batched binary mask predictions,\r\n with shape BxCxHxW, where B is the number of input prompts,\r\n C is determined by multimask_output, and (H, W) is the\r\n original size of the image.\r\n 'iou_predictions': (torch.Tensor) The model's predictions\r\n of mask quality, in shape BxC.\r\n 'low_res_logits': (torch.Tensor) Low resolution logits with\r\n shape BxCxHxW, where H=W=256. Can be passed as mask input\r\n to subsequent iterations of prediction.\r\n \"\"\"\r\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\r\n image_embeddings, interm_embeddings = self.image_encoder(input_images)\r\n interm_embeddings = interm_embeddings[0] # early layer\r\n\r\n outputs = []\r\n for image_record, curr_embedding, curr_interm in zip(batched_input, image_embeddings, interm_embeddings):\r\n if \"point_coords\" in image_record:\r\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\r\n else:\r\n points = None\r\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\r\n points=points,\r\n boxes=image_record.get(\"boxes\", None),\r\n masks=image_record.get(\"mask_inputs\", None),\r\n )\r\n low_res_masks, iou_predictions = self.mask_decoder(\r\n image_embeddings=curr_embedding.unsqueeze(0),\r\n image_pe=self.prompt_encoder.get_dense_pe(),\r\n sparse_prompt_embeddings=sparse_embeddings,\r\n dense_prompt_embeddings=dense_embeddings,\r\n multimask_output=multimask_output,\r\n hq_token_only=hq_token_only,\r\n interm_embeddings=curr_interm.unsqueeze(0).unsqueeze(0),\r\n )\r\n masks = self.postprocess_masks(\r\n low_res_masks,\r\n input_size=image_record[\"image\"].shape[-2:],\r\n original_size=image_record[\"original_size\"],\r\n )\r\n masks = masks > self.mask_threshold\r\n outputs.append(\r\n {\r\n \"masks\": masks,\r\n \"iou_predictions\": iou_predictions,\r\n \"low_res_logits\": low_res_masks,\r\n }\r\n )\r\n return outputs\r\n\r\n def postprocess_masks(\r\n self,\r\n masks: torch.Tensor,\r\n input_size: Tuple[int, ...],\r\n original_size: Tuple[int, ...],\r\n ) -> torch.Tensor:\r\n \"\"\"\r\n Remove padding and upscale masks to the original image size.\r\n\r\n Arguments:\r\n masks (torch.Tensor): Batched masks from the mask_decoder,\r\n in BxCxHxW format.\r\n input_size (tuple(int, int)): The size of the image input to the\r\n model, in (H, W) format. Used to remove padding.\r\n original_size (tuple(int, int)): The original size of the image\r\n before resizing for input to the model, in (H, W) format.\r\n\r\n Returns:\r\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\r\n is given by original_size.\r\n \"\"\"\r\n masks = F.interpolate(\r\n masks,\r\n (self.image_encoder.img_size, self.image_encoder.img_size),\r\n mode=\"bilinear\",\r\n align_corners=False,\r\n )\r\n masks = masks[..., : input_size[0], : input_size[1]]\r\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\r\n return masks\r\n\r\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\r\n # Normalize colors\r\n x = (x - self.pixel_mean) / self.pixel_std\r\n\r\n # Pad\r\n h, w = x.shape[-2:]\r\n padh = self.image_encoder.img_size - h\r\n padw = self.image_encoder.img_size - w\r\n x = F.pad(x, (0, padw, 0, padh))\r\n return x\r"
},
{
"identifier": "SamPredictor",
"path": "sam/segment_anything/predictor.py",
"snippet": "class SamPredictor:\r\n def __init__(\r\n self,\r\n sam_model: Sam,\r\n ) -> None:\r\n \"\"\"\r\n Uses SAM to calculate the image embedding for an image, and then\r\n allow repeated, efficient mask prediction given prompts.\r\n\r\n Arguments:\r\n sam_model (Sam): The model to use for mask prediction.\r\n \"\"\"\r\n super().__init__()\r\n self.model = sam_model\r\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\r\n self.reset_image()\r\n\r\n def set_image(\r\n self,\r\n image: np.ndarray,\r\n image_format: str = \"RGB\",\r\n ) -> None:\r\n \"\"\"\r\n Calculates the image embeddings for the provided image, allowing\r\n masks to be predicted with the 'predict' method.\r\n\r\n Arguments:\r\n image (np.ndarray): The image for calculating masks. Expects an\r\n image in HWC uint8 format, with pixel values in [0, 255].\r\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\r\n \"\"\"\r\n assert image_format in [\r\n \"RGB\",\r\n \"BGR\",\r\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\r\n # import pdb;pdb.set_trace()\r\n if image_format != self.model.image_format:\r\n image = image[..., ::-1]\r\n\r\n # Transform the image to the form expected by the model\r\n # import pdb;pdb.set_trace()\r\n input_image = self.transform.apply_image(image)\r\n input_image_torch = torch.as_tensor(input_image, device=self.device)\r\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\r\n\r\n self.set_torch_image(input_image_torch, image.shape[:2])\r\n\r\n @torch.no_grad()\r\n def set_torch_image(\r\n self,\r\n transformed_image: torch.Tensor,\r\n original_image_size: Tuple[int, ...],\r\n ) -> None:\r\n \"\"\"\r\n Calculates the image embeddings for the provided image, allowing\r\n masks to be predicted with the 'predict' method. Expects the input\r\n image to be already transformed to the format expected by the model.\r\n\r\n Arguments:\r\n transformed_image (torch.Tensor): The input image, with shape\r\n 1x3xHxW, which has been transformed with ResizeLongestSide.\r\n original_image_size (tuple(int, int)): The size of the image\r\n before transformation, in (H, W) format.\r\n \"\"\"\r\n assert (\r\n len(transformed_image.shape) == 4\r\n and transformed_image.shape[1] == 3\r\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\r\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\r\n self.reset_image()\r\n\r\n self.original_size = original_image_size\r\n self.input_size = tuple(transformed_image.shape[-2:])\r\n input_image = self.model.preprocess(transformed_image)\r\n self.features, self.interm_features = self.model.image_encoder(input_image)\r\n self.is_image_set = True\r\n\r\n def predict(\r\n self,\r\n point_coords: Optional[np.ndarray] = None,\r\n point_labels: Optional[np.ndarray] = None,\r\n box: Optional[np.ndarray] = None,\r\n mask_input: Optional[np.ndarray] = None,\r\n multimask_output: bool = True,\r\n return_logits: bool = False,\r\n hq_token_only: bool =False,\r\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\r\n \"\"\"\r\n Predict masks for the given input prompts, using the currently set image.\r\n\r\n Arguments:\r\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\r\n model. Each point is in (X,Y) in pixels.\r\n point_labels (np.ndarray or None): A length N array of labels for the\r\n point prompts. 1 indicates a foreground point and 0 indicates a\r\n background point.\r\n box (np.ndarray or None): A length 4 array given a box prompt to the\r\n model, in XYXY format.\r\n mask_input (np.ndarray): A low resolution mask input to the model, typically\r\n coming from a previous prediction iteration. Has form 1xHxW, where\r\n for SAM, H=W=256.\r\n multimask_output (bool): If true, the model will return three masks.\r\n For ambiguous input prompts (such as a single click), this will often\r\n produce better masks than a single prediction. If only a single\r\n mask is needed, the model's predicted quality score can be used\r\n to select the best mask. For non-ambiguous prompts, such as multiple\r\n input prompts, multimask_output=False can give better results.\r\n return_logits (bool): If true, returns un-thresholded masks logits\r\n instead of a binary mask.\r\n\r\n Returns:\r\n (np.ndarray): The output masks in CxHxW format, where C is the\r\n number of masks, and (H, W) is the original image size.\r\n (np.ndarray): An array of length C containing the model's\r\n predictions for the quality of each mask.\r\n (np.ndarray): An array of shape CxHxW, where C is the number\r\n of masks and H=W=256. These low resolution logits can be passed to\r\n a subsequent iteration as mask input.\r\n \"\"\"\r\n if not self.is_image_set:\r\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\r\n\r\n # Transform input prompts\r\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\r\n if point_coords is not None:\r\n assert (\r\n point_labels is not None\r\n ), \"point_labels must be supplied if point_coords is supplied.\"\r\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\r\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\r\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\r\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\r\n if box is not None:\r\n box = self.transform.apply_boxes(box, self.original_size)\r\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\r\n box_torch = box_torch[None, :]\r\n if mask_input is not None:\r\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\r\n mask_input_torch = mask_input_torch[None, :, :, :]\r\n\r\n masks, iou_predictions, low_res_masks = self.predict_torch(\r\n coords_torch,\r\n labels_torch,\r\n box_torch,\r\n mask_input_torch,\r\n multimask_output,\r\n return_logits=return_logits,\r\n hq_token_only=hq_token_only,\r\n )\r\n\r\n masks_np = masks[0].detach().cpu().numpy()\r\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\r\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\r\n return masks_np, iou_predictions_np, low_res_masks_np\r\n\r\n @torch.no_grad()\r\n def predict_torch(\r\n self,\r\n point_coords: Optional[torch.Tensor],\r\n point_labels: Optional[torch.Tensor],\r\n boxes: Optional[torch.Tensor] = None,\r\n mask_input: Optional[torch.Tensor] = None,\r\n multimask_output: bool = True,\r\n return_logits: bool = False,\r\n hq_token_only: bool =False,\r\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Predict masks for the given input prompts, using the currently set image.\r\n Input prompts are batched torch tensors and are expected to already be\r\n transformed to the input frame using ResizeLongestSide.\r\n\r\n Arguments:\r\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\r\n model. Each point is in (X,Y) in pixels.\r\n point_labels (torch.Tensor or None): A BxN array of labels for the\r\n point prompts. 1 indicates a foreground point and 0 indicates a\r\n background point.\r\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\r\n model, in XYXY format.\r\n mask_input (np.ndarray): A low resolution mask input to the model, typically\r\n coming from a previous prediction iteration. Has form Bx1xHxW, where\r\n for SAM, H=W=256. Masks returned by a previous iteration of the\r\n predict method do not need further transformation.\r\n multimask_output (bool): If true, the model will return three masks.\r\n For ambiguous input prompts (such as a single click), this will often\r\n produce better masks than a single prediction. If only a single\r\n mask is needed, the model's predicted quality score can be used\r\n to select the best mask. For non-ambiguous prompts, such as multiple\r\n input prompts, multimask_output=False can give better results.\r\n return_logits (bool): If true, returns un-thresholded masks logits\r\n instead of a binary mask.\r\n\r\n Returns:\r\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\r\n number of masks, and (H, W) is the original image size.\r\n (torch.Tensor): An array of shape BxC containing the model's\r\n predictions for the quality of each mask.\r\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\r\n of masks and H=W=256. These low res logits can be passed to\r\n a subsequent iteration as mask input.\r\n \"\"\"\r\n if not self.is_image_set:\r\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\r\n\r\n if point_coords is not None:\r\n points = (point_coords, point_labels)\r\n else:\r\n points = None\r\n\r\n # Embed prompts\r\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\r\n points=points,\r\n boxes=boxes,\r\n masks=mask_input,\r\n )\r\n\r\n # Predict masks\r\n low_res_masks, iou_predictions = self.model.mask_decoder(\r\n image_embeddings=self.features,\r\n image_pe=self.model.prompt_encoder.get_dense_pe(),\r\n sparse_prompt_embeddings=sparse_embeddings,\r\n dense_prompt_embeddings=dense_embeddings,\r\n multimask_output=multimask_output,\r\n hq_token_only=hq_token_only,\r\n interm_embeddings=self.interm_features,\r\n )\r\n\r\n # Upscale the masks to the original image resolution\r\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\r\n\r\n if not return_logits:\r\n masks = masks > self.model.mask_threshold\r\n\r\n return masks, iou_predictions, low_res_masks\r\n\r\n def get_image_embedding(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns the image embeddings for the currently set image, with\r\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\r\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\r\n \"\"\"\r\n if not self.is_image_set:\r\n raise RuntimeError(\r\n \"An image must be set with .set_image(...) to generate an embedding.\"\r\n )\r\n assert self.features is not None, \"Features must exist if an image has been set.\"\r\n return self.features\r\n\r\n @property\r\n def device(self) -> torch.device:\r\n return self.model.device\r\n\r\n def reset_image(self) -> None:\r\n \"\"\"Resets the currently set image.\"\"\"\r\n self.is_image_set = False\r\n self.features = None\r\n self.orig_h = None\r\n self.orig_w = None\r\n self.input_h = None\r\n self.input_w = None\r"
},
{
"identifier": "MaskData",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "class MaskData:\r\n \"\"\"\r\n A structure for storing masks and their related data in batched format.\r\n Implements basic filtering and concatenation.\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs) -> None:\r\n for v in kwargs.values():\r\n assert isinstance(\r\n v, (list, np.ndarray, torch.Tensor)\r\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\r\n self._stats = dict(**kwargs)\r\n\r\n def __setitem__(self, key: str, item: Any) -> None:\r\n assert isinstance(\r\n item, (list, np.ndarray, torch.Tensor)\r\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\r\n self._stats[key] = item\r\n\r\n def __delitem__(self, key: str) -> None:\r\n del self._stats[key]\r\n\r\n def __getitem__(self, key: str) -> Any:\r\n return self._stats[key]\r\n\r\n def items(self) -> ItemsView[str, Any]:\r\n return self._stats.items()\r\n\r\n def filter(self, keep: torch.Tensor) -> None:\r\n for k, v in self._stats.items():\r\n if v is None:\r\n self._stats[k] = None\r\n elif isinstance(v, torch.Tensor):\r\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\r\n elif isinstance(v, np.ndarray):\r\n self._stats[k] = v[keep.detach().cpu().numpy()]\r\n elif isinstance(v, list) and keep.dtype == torch.bool:\r\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\r\n elif isinstance(v, list):\r\n self._stats[k] = [v[i] for i in keep]\r\n else:\r\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\r\n\r\n def cat(self, new_stats: \"MaskData\") -> None:\r\n for k, v in new_stats.items():\r\n if k not in self._stats or self._stats[k] is None:\r\n self._stats[k] = deepcopy(v)\r\n elif isinstance(v, torch.Tensor):\r\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\r\n elif isinstance(v, np.ndarray):\r\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\r\n elif isinstance(v, list):\r\n self._stats[k] = self._stats[k] + deepcopy(v)\r\n else:\r\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\r\n\r\n def to_numpy(self) -> None:\r\n for k, v in self._stats.items():\r\n if isinstance(v, torch.Tensor):\r\n self._stats[k] = v.detach().cpu().numpy()\r"
},
{
"identifier": "area_from_rle",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\r\n return sum(rle[\"counts\"][1::2])\r"
},
{
"identifier": "batch_iterator",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\r\n assert len(args) > 0 and all(\r\n len(a) == len(args[0]) for a in args\r\n ), \"Batched iteration must have inputs of all the same size.\"\r\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\r\n for b in range(n_batches):\r\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]\r"
},
{
"identifier": "batched_mask_to_box",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\r\n \"\"\"\r\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\r\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\r\n \"\"\"\r\n # torch.max below raises an error on empty inputs, just skip in this case\r\n if torch.numel(masks) == 0:\r\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\r\n\r\n # Normalize shape to CxHxW\r\n shape = masks.shape\r\n h, w = shape[-2:]\r\n if len(shape) > 2:\r\n masks = masks.flatten(0, -3)\r\n else:\r\n masks = masks.unsqueeze(0)\r\n\r\n # Get top and bottom edges\r\n in_height, _ = torch.max(masks, dim=-1)\r\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\r\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\r\n in_height_coords = in_height_coords + h * (~in_height)\r\n top_edges, _ = torch.min(in_height_coords, dim=-1)\r\n\r\n # Get left and right edges\r\n in_width, _ = torch.max(masks, dim=-2)\r\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\r\n right_edges, _ = torch.max(in_width_coords, dim=-1)\r\n in_width_coords = in_width_coords + w * (~in_width)\r\n left_edges, _ = torch.min(in_width_coords, dim=-1)\r\n\r\n # If the mask is empty the right edge will be to the left of the left edge.\r\n # Replace these boxes with [0, 0, 0, 0]\r\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\r\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\r\n out = out * (~empty_filter).unsqueeze(-1)\r\n\r\n # Return to original shape\r\n if len(shape) > 2:\r\n out = out.reshape(*shape[:-2], 4)\r\n else:\r\n out = out[0]\r\n\r\n return out\r"
},
{
"identifier": "box_xyxy_to_xywh",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\r\n box_xywh = deepcopy(box_xyxy)\r\n box_xywh[2] = box_xywh[2] - box_xywh[0]\r\n box_xywh[3] = box_xywh[3] - box_xywh[1]\r\n return box_xywh\r"
},
{
"identifier": "build_all_layer_point_grids",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def build_all_layer_point_grids(\r\n n_per_side: int, n_layers: int, scale_per_layer: int\r\n) -> List[np.ndarray]:\r\n \"\"\"Generates point grids for all crop layers.\"\"\"\r\n points_by_layer = []\r\n for i in range(n_layers + 1):\r\n n_points = int(n_per_side / (scale_per_layer**i))\r\n points_by_layer.append(build_point_grid(n_points))\r\n return points_by_layer\r"
},
{
"identifier": "calculate_stability_score",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def calculate_stability_score(\r\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\r\n) -> torch.Tensor:\r\n \"\"\"\r\n Computes the stability score for a batch of masks. The stability\r\n score is the IoU between the binary masks obtained by thresholding\r\n the predicted mask logits at high and low values.\r\n \"\"\"\r\n # One mask is always contained inside the other.\r\n # Save memory by preventing unnecessary cast to torch.int64\r\n intersections = (\r\n (masks > (mask_threshold + threshold_offset))\r\n .sum(-1, dtype=torch.int16)\r\n .sum(-1, dtype=torch.int32)\r\n )\r\n unions = (\r\n (masks > (mask_threshold - threshold_offset))\r\n .sum(-1, dtype=torch.int16)\r\n .sum(-1, dtype=torch.int32)\r\n )\r\n return intersections / unions\r"
},
{
"identifier": "coco_encode_rle",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\r\n from pycocotools import mask as mask_utils # type: ignore\r\n\r\n h, w = uncompressed_rle[\"size\"]\r\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\r\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\r\n return rle\r"
},
{
"identifier": "generate_crop_boxes",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def generate_crop_boxes(\r\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\r\n) -> Tuple[List[List[int]], List[int]]:\r\n \"\"\"\r\n Generates a list of crop boxes of different sizes. Each layer\r\n has (2**i)**2 boxes for the ith layer.\r\n \"\"\"\r\n crop_boxes, layer_idxs = [], []\r\n im_h, im_w = im_size\r\n short_side = min(im_h, im_w)\r\n\r\n # Original image\r\n crop_boxes.append([0, 0, im_w, im_h])\r\n layer_idxs.append(0)\r\n\r\n def crop_len(orig_len, n_crops, overlap):\r\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\r\n\r\n for i_layer in range(n_layers):\r\n n_crops_per_side = 2 ** (i_layer + 1)\r\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\r\n\r\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\r\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\r\n\r\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\r\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\r\n\r\n # Crops in XYWH format\r\n for x0, y0 in product(crop_box_x0, crop_box_y0):\r\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\r\n crop_boxes.append(box)\r\n layer_idxs.append(i_layer + 1)\r\n\r\n return crop_boxes, layer_idxs\r"
},
{
"identifier": "is_box_near_crop_edge",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def is_box_near_crop_edge(\r\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\r\n) -> torch.Tensor:\r\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\r\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\r\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\r\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\r\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\r\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\r\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\r\n return torch.any(near_crop_edge, dim=1)\r"
},
{
"identifier": "mask_to_rle_pytorch",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\r\n \"\"\"\r\n Encodes masks to an uncompressed RLE, in the format expected by\r\n pycoco tools.\r\n \"\"\"\r\n # Put in fortran order and flatten h,w\r\n b, h, w = tensor.shape\r\n tensor = tensor.permute(0, 2, 1).flatten(1)\r\n\r\n # Compute change indices\r\n diff = tensor[:, 1:] ^ tensor[:, :-1]\r\n change_indices = diff.nonzero()\r\n\r\n # Encode run length\r\n out = []\r\n for i in range(b):\r\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\r\n cur_idxs = torch.cat(\r\n [\r\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\r\n cur_idxs + 1,\r\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\r\n ]\r\n )\r\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\r\n counts = [] if tensor[i, 0] == 0 else [0]\r\n counts.extend(btw_idxs.detach().cpu().tolist())\r\n out.append({\"size\": [h, w], \"counts\": counts})\r\n return out\r"
},
{
"identifier": "remove_small_regions",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def remove_small_regions(\r\n mask: np.ndarray, area_thresh: float, mode: str\r\n) -> Tuple[np.ndarray, bool]:\r\n \"\"\"\r\n Removes small disconnected regions and holes in a mask. Returns the\r\n mask and an indicator of if the mask has been modified.\r\n \"\"\"\r\n import cv2 # type: ignore\r\n\r\n assert mode in [\"holes\", \"islands\"]\r\n correct_holes = mode == \"holes\"\r\n working_mask = (correct_holes ^ mask).astype(np.uint8)\r\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\r\n sizes = stats[:, -1][1:] # Row 0 is background label\r\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\r\n if len(small_regions) == 0:\r\n return mask, False\r\n fill_labels = [0] + small_regions\r\n if not correct_holes:\r\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\r\n # If every region is below threshold, keep largest\r\n if len(fill_labels) == 0:\r\n fill_labels = [int(np.argmax(sizes)) + 1]\r\n mask = np.isin(regions, fill_labels)\r\n return mask, True\r"
},
{
"identifier": "rle_to_mask",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\r\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\r\n h, w = rle[\"size\"]\r\n mask = np.empty(h * w, dtype=bool)\r\n idx = 0\r\n parity = False\r\n for count in rle[\"counts\"]:\r\n mask[idx : idx + count] = parity\r\n idx += count\r\n parity ^= True\r\n mask = mask.reshape(w, h)\r\n return mask.transpose() # Put in C order\r"
},
{
"identifier": "uncrop_boxes_xyxy",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\r\n x0, y0, _, _ = crop_box\r\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\r\n # Check if boxes has a channel dimension\r\n if len(boxes.shape) == 3:\r\n offset = offset.unsqueeze(1)\r\n return boxes + offset\r"
},
{
"identifier": "uncrop_masks",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def uncrop_masks(\r\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\r\n) -> torch.Tensor:\r\n x0, y0, x1, y1 = crop_box\r\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\r\n return masks\r\n # Coordinate transform masks\r\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\r\n pad = (x0, pad_x - x0, y0, pad_y - y0)\r\n return torch.nn.functional.pad(masks, pad, value=0)\r"
},
{
"identifier": "uncrop_points",
"path": "sam/segment_anything/utils/amg.py",
"snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\r\n x0, y0, _, _ = crop_box\r\n offset = torch.tensor([[x0, y0]], device=points.device)\r\n # Check if points has a channel dimension\r\n if len(points.shape) == 3:\r\n offset = offset.unsqueeze(1)\r\n return points + offset\r"
}
] | import numpy as np
import torch
import cv2 # type: ignore # noqa: F401
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
from pycocotools import mask as mask_utils # type: ignore # noqa: F401
| 8,572 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
| model: Sam,
| 0 | 2023-10-14 13:45:54+00:00 | 12k |
zhaoyizhou1123/mbrcsl | examples/roboverse/run_dt_roboverse.py | [
{
"identifier": "DecisionTransformer",
"path": "offlinerlkit/policy/decision_transformer/decision_transformer.py",
"snippet": "class DecisionTransformer(TrajectoryModel):\n\n \"\"\"\n This model uses GPT to model (Return_1, state_1, action_1, Return_2, state_2, ...)\n \"\"\"\n\n def __init__(\n self,\n state_dim,\n act_dim,\n hidden_size,\n max_length=None,\n max_ep_len=4096,\n action_tanh=True,\n **kwargs\n ):\n super().__init__(state_dim, act_dim, max_length=max_length)\n\n self.hidden_size = hidden_size\n config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_embd=hidden_size,\n **kwargs\n )\n\n # note: the only difference between this GPT2Model and the default Huggingface version\n # is that the positional embeddings are removed (since we'll add those ourselves)\n self.transformer = GPT2Model(config)\n\n self.embed_timestep = nn.Embedding(max_ep_len, hidden_size)\n self.embed_return = torch.nn.Linear(1, hidden_size)\n self.embed_state = torch.nn.Linear(self.state_dim, hidden_size)\n self.embed_action = torch.nn.Linear(self.act_dim, hidden_size)\n\n self.embed_ln = nn.LayerNorm(hidden_size)\n\n # note: we don't predict states or returns for the paper\n self.predict_state = torch.nn.Linear(hidden_size, self.state_dim)\n self.predict_action = nn.Sequential(\n *([nn.Linear(hidden_size, self.act_dim)] + ([nn.Tanh()] if action_tanh else []))\n )\n self.predict_return = torch.nn.Linear(hidden_size, 1)\n\n def forward(self, states, actions, returns_to_go, timesteps, attention_mask=None):\n\n batch_size, seq_length = states.shape[0], states.shape[1]\n\n if attention_mask is None:\n # attention mask for GPT: 1 if can be attended to, 0 if not\n attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)\n\n # embed each modality with a different head\n state_embeddings = self.embed_state(states)\n action_embeddings = self.embed_action(actions)\n returns_embeddings = self.embed_return(returns_to_go)\n time_embeddings = self.embed_timestep(timesteps)\n\n # time embeddings are treated similar to positional embeddings\n state_embeddings = state_embeddings + time_embeddings\n action_embeddings = action_embeddings + time_embeddings\n returns_embeddings = returns_embeddings + time_embeddings\n\n # this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)\n # which works nice in an autoregressive sense since states predict actions\n stacked_inputs = torch.stack(\n (returns_embeddings, state_embeddings, action_embeddings), dim=1\n ).permute(0, 2, 1, 3).reshape(batch_size, 3*seq_length, self.hidden_size)\n stacked_inputs = self.embed_ln(stacked_inputs)\n\n # to make the attention mask fit the stacked inputs, have to stack it as well\n stacked_attention_mask = torch.stack(\n (attention_mask, attention_mask, attention_mask), dim=1\n ).permute(0, 2, 1).reshape(batch_size, 3*seq_length)\n\n # we feed in the input embeddings (not word indices as in NLP) to the model\n transformer_outputs = self.transformer(\n inputs_embeds=stacked_inputs,\n attention_mask=stacked_attention_mask,\n )\n x = transformer_outputs['last_hidden_state']\n\n # reshape x so that the second dimension corresponds to the original\n # returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t\n x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)\n\n # get predictions\n return_preds = self.predict_return(x[:,2]) # predict next return given state and action\n state_preds = self.predict_state(x[:,2]) # predict next state given state and action\n action_preds = self.predict_action(x[:,1]) # predict next action given state\n\n return state_preds, action_preds, return_preds\n\n def get_action(self, states, actions, returns_to_go, timesteps, **kwargs):\n # we don't care about the past rewards in this model\n\n states = states.reshape(1, -1, self.state_dim)\n actions = actions.reshape(1, -1, self.act_dim)\n returns_to_go = returns_to_go.reshape(1, -1, 1)\n timesteps = timesteps.reshape(1, -1)\n\n if self.max_length is not None:\n states = states[:,-self.max_length:]\n actions = actions[:,-self.max_length:]\n returns_to_go = returns_to_go[:,-self.max_length:]\n timesteps = timesteps[:,-self.max_length:]\n\n # pad all tokens to sequence length\n attention_mask = torch.cat([torch.zeros(self.max_length-states.shape[1]), torch.ones(states.shape[1])])\n attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1)\n states = torch.cat(\n [torch.zeros((states.shape[0], self.max_length-states.shape[1], self.state_dim), device=states.device), states],\n dim=1).to(dtype=torch.float32)\n actions = torch.cat(\n [torch.zeros((actions.shape[0], self.max_length - actions.shape[1], self.act_dim),\n device=actions.device), actions],\n dim=1).to(dtype=torch.float32)\n returns_to_go = torch.cat(\n [torch.zeros((returns_to_go.shape[0], self.max_length-returns_to_go.shape[1], 1), device=returns_to_go.device), returns_to_go],\n dim=1).to(dtype=torch.float32)\n timesteps = torch.cat(\n [torch.zeros((timesteps.shape[0], self.max_length-timesteps.shape[1]), device=timesteps.device), timesteps],\n dim=1\n ).to(dtype=torch.long)\n else:\n attention_mask = None\n\n _, action_preds, return_preds = self.forward(\n states, actions, returns_to_go, timesteps, attention_mask=attention_mask, **kwargs)\n\n return action_preds[0,-1]\n\n def select_action(self, states, actions, returns_to_go, timesteps, **kwargs):\n # returns (batch, action_dim)\n batch = states.shape[0]\n states = states.reshape(batch, -1, self.state_dim)\n actions = actions.reshape(batch, -1, self.act_dim)\n returns_to_go = returns_to_go.reshape(batch, -1, 1)\n timesteps = timesteps.reshape(batch, -1)\n\n if self.max_length is not None:\n states = states[:,-self.max_length:]\n actions = actions[:,-self.max_length:]\n returns_to_go = returns_to_go[:,-self.max_length:]\n timesteps = timesteps[:,-self.max_length:]\n\n # pad all tokens to sequence length\n attention_mask = torch.cat([torch.zeros(batch, self.max_length-states.shape[1]), torch.ones(batch, states.shape[1])], axis=1)\n attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(batch, -1)\n states = torch.cat(\n [torch.zeros((states.shape[0], self.max_length-states.shape[1], self.state_dim), device=states.device), states],\n dim=1).to(dtype=torch.float32)\n actions = torch.cat(\n [torch.zeros((actions.shape[0], self.max_length - actions.shape[1], self.act_dim),\n device=actions.device), actions],\n dim=1).to(dtype=torch.float32)\n returns_to_go = torch.cat(\n [torch.zeros((returns_to_go.shape[0], self.max_length-returns_to_go.shape[1], 1), device=returns_to_go.device), returns_to_go],\n dim=1).to(dtype=torch.float32)\n timesteps = torch.cat(\n [torch.zeros((timesteps.shape[0], self.max_length-timesteps.shape[1]), device=timesteps.device), timesteps],\n dim=1\n ).to(dtype=torch.long)\n else:\n attention_mask = None\n\n _, action_preds, return_preds = self.forward(\n states, actions, returns_to_go, timesteps, attention_mask=attention_mask, **kwargs)\n\n return action_preds[:,-1, :]"
},
{
"identifier": "SequenceTrainer",
"path": "offlinerlkit/policy_trainer/dt_policy_trainer.py",
"snippet": "class SequenceTrainer:\n def __init__(self, config: TrainerConfig, model: DecisionTransformer, offline_dataset, rollout_dataset = None, is_gym = False):\n '''\n offline_trajs / rollout_trajs: List[Trajectory]\n config members:\n - batch_size\n - lr\n - device\n '''\n self.config = config\n self.device = self.config.device\n self.model = model.to(self.device)\n self.batch_size = config.batch_size\n self.diagnostics = dict()\n self.offline_dataset = offline_dataset\n self.rollout_dataset = rollout_dataset\n\n warmup_steps = 10000\n self.optimizer = torch.optim.AdamW(\n model.parameters(),\n lr=config.lr,\n weight_decay=1e-4,\n )\n self.scheduler = torch.optim.lr_scheduler.LambdaLR(\n self.optimizer,\n lambda steps: min((steps+1)/warmup_steps, 1)\n )\n\n self.logger = config.logger\n self.is_gym = is_gym\n\n def loss_fn(self, pred_action, true_action):\n '''\n Compute the MSE loss.\n - pred_action: (batch, action_dim), logits of the predicted action (don't do softmax)\n - true_action: (batch, action_dim), the true action in 1-dim representation\n Return: scalar tensor. The mean of each loss\n '''\n\n return F.mse_loss(pred_action, true_action)\n\n def eval(self, desired_rtg, train_epoch):\n '''\n state_mean/std: Used for state normalization. Get from offline_dataset only currently\n '''\n state_mean, state_std = self.offline_dataset.get_normalize_coef()\n self.model.train(False)\n rets = [] # list of returns achieved in each epoch\n env = self.config.env\n action_dim = env.action_space.shape[0]\n for epoch in range(self.config.eval_repeat):\n if self.is_gym:\n states = env.reset()\n else:\n states, _ = env.reset()\n if hasattr(env, 'get_true_observation'): # For pointmaze\n states = env.get_true_observation(states)\n states = torch.from_numpy(states)\n states = states.type(torch.float32).to(self.device).unsqueeze(0).unsqueeze(0) # (1,1,state_dim)\n rtgs = torch.Tensor([[[desired_rtg]]]).to(self.device) # (1,1,1)\n timesteps = torch.Tensor([[0]]).to(self.device) # (1,1)\n \n # Initialize action\n actions = torch.empty((1,0,action_dim)).to(self.device) # Actions are represented in one-hot\n\n ret = 0 # total return \n for h in range(self.config.horizon):\n # Get action\n pred_action = self.model.get_action((states - state_mean) / state_std,\n actions.type(torch.float32),\n rtgs.type(torch.float32),\n timesteps.type(torch.float32)) # (act_dim)\n\n # Observe next states, rewards,\n if self.is_gym:\n next_state, reward, terminated, _ = env.step(pred_action.detach().cpu().numpy()) # (state_dim), scalar\n else:\n next_state, reward, terminated, _, _ = env.step(pred_action.detach().cpu().numpy()) # (state_dim), scalar\n if hasattr(env, 'get_true_observation'): # For pointmaze\n next_state = env.get_true_observation(next_state)\n if epoch == 0 and self.config.debug:\n print(f\"Step {h+1}, action is {pred_action.detach().cpu()}, observed next state {next_state}, reward {reward}\") \n next_state = torch.from_numpy(next_state)\n # Calculate return\n ret += reward\n \n # Update states, actions, rtgs, timesteps\n next_state = next_state.unsqueeze(0).unsqueeze(0).to(self.device) # (1,1,state_dim)\n states = torch.cat([states, next_state], dim=1)\n states = states[:, -self.config.ctx: , :] # truncate to ctx_length\n\n pred_action = pred_action.unsqueeze(0).unsqueeze(0).to(self.device) # (1, 1, action_dim)\n \n if self.config.ctx > 1:\n actions = torch.cat([actions, pred_action], dim=1)\n actions = actions[:, -self.config.ctx+1: , :] # actions length is ctx-1\n\n next_rtg = rtgs[0,0,-1] - reward\n next_rtg = next_rtg * torch.ones(1,1,1).to(self.device) # (1,1,1)\n rtgs = torch.cat([rtgs, next_rtg], dim=1)\n rtgs = rtgs[:, -self.config.ctx: , :]\n\n # Update timesteps\n timesteps = torch.cat([timesteps, (h+1)*torch.ones(1,1).to(self.device)], dim = 1) \n timesteps = timesteps[:, -self.config.ctx: ]\n\n # Add the ret to list\n rets.append(ret)\n\n ep_reward_mean, ep_reward_std = np.mean(rets), np.std(rets)\n\n # logging\n self.logger.logkv(\"epoch\", train_epoch + 1)\n self.logger.logkv(\"eval/target_return\", desired_rtg)\n self.logger.logkv(\"eval/episode_return\", ep_reward_mean)\n self.logger.logkv(\"eval/episode_return_std\", ep_reward_std) \n\n # Set the model back to training mode\n self.model.train(True)\n return ep_reward_mean\n \n def _run_epoch(self, epoch_num):\n '''\n Run one epoch in the training process \\n\n Epoch_num: int, epoch number, used to display in progress bar. \\n\n During training, we convert action to one_hot_hash\n '''\n if self.rollout_dataset is None: # Only offline dataset, don't differ\n dataset = self.offline_dataset\n else:\n if epoch_num < self.config.pre_epochs:\n dataset = self.offline_dataset\n if self.config.debug:\n print(f\"Pretraining\") \n else:\n dataset = self.rollout_dataset\n if self.config.debug:\n print(f\"Training on rollout data\")\n loader = DataLoader(dataset, shuffle=True, pin_memory=True,\n batch_size= self.config.batch_size,\n num_workers= self.config.num_workers)\n \n # losses = []\n pbar = tqdm(enumerate(loader), total=len(loader))\n losses = []\n for it, (states, actions, _, rtgs, timesteps, attention_mask) in pbar:\n '''\n states, (batch, ctx, state_dim)\n actions, (batch, ctx, action_dim)\n rtgs, (batch, ctx, 1)\n timesteps, (batch, ctx)\n attention_mask, (batch, ctx)\n ''' \n\n states = states.type(torch.float32).to(self.device)\n actions = actions.type(torch.float32).to(self.device)\n rtgs = rtgs.type(torch.float32).to(self.device)\n timesteps = timesteps.to(self.device).long()\n attention_mask = attention_mask.to(self.device)\n\n action_target = torch.clone(actions)\n\n # forward the model\n state_preds, action_preds, reward_preds = self.model.forward(\n states, actions, rtgs, timesteps, attention_mask=attention_mask,\n )\n\n act_dim = action_preds.shape[2]\n action_preds = action_preds.reshape(-1, act_dim)[attention_mask.reshape(-1) > 0]\n action_target = action_target.reshape(-1, act_dim)[attention_mask.reshape(-1) > 0]\n\n loss = self.loss_fn(\n action_preds,\n action_target\n )\n\n losses.append(loss.item())\n \n self.model.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.grad_norm_clip)\n self.optimizer.step()\n\n self.logger.logkv_mean(\"loss\", loss.item())\n pbar.set_description(f\"Epoch {epoch_num+1}, iter {it}: train loss {loss.item():.5f}.\")\n \n\n def train(self):\n start_time = time.time()\n for epoch in range(self.config.max_epochs):\n self._run_epoch(epoch)\n if self.config.last_eval and epoch < self.config.max_epochs - 1:\n pass\n else:\n self.eval(self.config.desired_rtg, train_epoch=epoch)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n self._save_checkpoint(os.path.join(self.logger.model_dir, \"policy_final.pth\"))\n\n def _save_checkpoint(self, ckpt_path):\n '''\n ckpt_path: str, path of storing the model\n '''\n # DataParallel wrappers keep raw model object in .module attribute\n raw_model = self.model.module if hasattr(self.model, \"module\") else self.model\n torch.save(raw_model, ckpt_path)"
},
{
"identifier": "TrainerConfig",
"path": "offlinerlkit/policy_trainer/dt_policy_trainer.py",
"snippet": "class TrainerConfig:\n grad_norm_clip = 1.0\n weight_decay = 0.1 # only applied on matmul weights\n ckpt_path = None\n num_workers = 1 # for DataLoader\n tb_log = None\n log_to_wandb = False\n\n def __init__(self, **kwargs):\n for k,v in kwargs.items():\n setattr(self, k, v)"
},
{
"identifier": "set_up_seed",
"path": "offlinerlkit/utils/set_up_seed.py",
"snippet": "def set_up_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True"
},
{
"identifier": "PickPlaceObsWrapper",
"path": "offlinerlkit/utils/roboverse_utils.py",
"snippet": "class PickPlaceObsWrapper(gym.ObservationWrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n\n tmp_true_obs = get_pickplace_obs(tmp_obs)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def observation(self, observation: Dict[str, np.ndarray]) -> np.ndarray:\n return get_pickplace_obs(observation)\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n return self.observation(self.env.reset())"
},
{
"identifier": "DoubleDrawerObsWrapper",
"path": "offlinerlkit/utils/roboverse_utils.py",
"snippet": "class DoubleDrawerObsWrapper(gym.Wrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n info = env.get_info()\n\n tmp_true_obs = get_doubledrawer_obs(tmp_obs, info)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n obs = get_doubledrawer_obs(obs, info)\n return obs, reward, done, info\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n obs = self.env.reset()\n info = self.env.get_info()\n return get_doubledrawer_obs(obs, info)"
},
{
"identifier": "get_pickplace_dataset_dt",
"path": "offlinerlkit/utils/roboverse_utils.py",
"snippet": "def get_pickplace_dataset_dt(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: list trajs: namedtuple with keys \"observations\", \"actions\", \"rewards\", \"returns\", \"timesteps\" \n '''\n SimpleTrajectory = namedtuple(\n \"SimpleTrajectory\", [\"observations\", \"actions\", \"rewards\", \"returns\", \"timesteps\"])\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n\n trajs = []\n for traj in full_data:\n last_reward = traj['rewards'][-1]\n rewards = traj['rewards']\n simple_traj = SimpleTrajectory(\n observations= [get_pickplace_obs(obs) for obs in traj['observations']],\n actions = traj['actions'],\n rewards = rewards,\n returns = [last_reward for _ in range(len(rewards))],\n timesteps= list(range(len(rewards)))\n )\n trajs.append(simple_traj)\n return trajs"
},
{
"identifier": "get_doubledrawer_dataset_dt",
"path": "offlinerlkit/utils/roboverse_utils.py",
"snippet": "def get_doubledrawer_dataset_dt(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: list trajs: namedtuple with keys \"observations\", \"actions\", \"rewards\", \"returns\", \"timesteps\" \n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n SimpleTrajectory = namedtuple(\n \"SimpleTrajectory\", [\"observations\", \"actions\", \"rewards\", \"returns\", \"timesteps\"])\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n\n trajs = []\n for traj in full_data:\n last_reward = traj['rewards'][-1]\n rewards = traj['rewards']\n info_list = traj['env_infos']\n obs_list = traj['observations']\n simple_traj = SimpleTrajectory(\n observations= [get_doubledrawer_obs(obs,info) for obs,info in zip(obs_list, [info_list[0]] + info_list[:-1])],\n actions = traj['actions'],\n rewards = rewards,\n returns = [last_reward for _ in range(len(rewards))],\n timesteps= list(range(len(rewards)))\n )\n trajs.append(simple_traj)\n return trajs"
},
{
"identifier": "none_or_str",
"path": "offlinerlkit/utils/none_or_str.py",
"snippet": "def none_or_str(value):\n if value == 'None':\n return None\n return value"
},
{
"identifier": "TrajCtxFloatLengthDataset",
"path": "offlinerlkit/utils/dataset.py",
"snippet": "class TrajCtxFloatLengthDataset(Dataset):\n '''\n Son of the pytorch Dataset class\n Provides context length, no next state.\n Trajectory length is uncertain\n '''\n\n def __init__(self, trajs, ctx = 1, single_timestep = False, keep_ctx = True, with_mask=False, state_normalize=False): \n '''\n trajs: list(traj), namedtuple with attributes \"observations\", \"actions\", \"rewards\", \"returns\", \"timesteps\" \\n\n single_timestep: bool. If true, timestep only keep initial step; Else (ctx,) \\n\n keep_ctx: If False, ctx must be set 1, and we will not keep ctx dimension.\n with_mask: If true, also return attention mask. For DT\n state_normalize: If true, normalize states\n Note: Each traj must have same number of timesteps\n ''' \n self._trajs = trajs\n self._trajectory_num = len(self._trajs)\n self._horizon = len(self._trajs[0].observations)\n self.keep_ctx = keep_ctx\n self.with_mask = with_mask\n\n if not keep_ctx:\n assert ctx == 1, f\"When keep_ctx = False, ctx must be 1\"\n\n self.ctx = ctx\n self.single_timestep = single_timestep\n\n self.state_normalize = state_normalize\n\n if state_normalize:\n states_list = []\n for traj in trajs:\n states_list += traj.observations\n states = np.concatenate(states_list, axis = 0)\n self.state_mean, self.state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6\n else:\n self.state_mean = 0\n self.state_std = 1\n\n self.traj_start_idxs = [] # The index of each traj's start\n cnt = 0\n self.traj_idx_list = [] # maintain the traj_idx of each idx\n for i,traj in enumerate(trajs):\n self.traj_start_idxs.append(cnt)\n traj_len = len(traj.rewards)\n self.traj_idx_list += [i for _ in range(traj_len)]\n cnt += traj_len\n self.traj_start_idxs.append(cnt) # Last idx is the total number of data\n \n def __len__(self):\n return self.traj_start_idxs[-1]\n \n def len(self):\n return self.__len__()\n\n def __getitem__(self, idx):\n '''\n Update: Also train incomplete contexts. Incomplete contexts pad 0.\n Input: idx, int, index to get an RTG trajectory slice from dataset \\n\n Return: An RTG trajectory slice with length ctx_length \\n\n - states: Tensor of size [ctx_length, state_space_size]\n - actions: Tensor of size [ctx_length, action_dim], here action is converted to one-hot representation\n - rewards: Tensor of size [ctx_length, 1]\n - rtgs: Tensor of size [ctx_length, 1]\n - timesteps: (ctx_length) if single_timestep=False; else (1,), only keep the first timestep\n Note: if keep_ctx = False, all returns above will remove the first dim. In particular, timesteps becomes scalar.\n '''\n\n ctx = self.ctx # context length\n trajectory_idx = self.traj_idx_list[idx]\n res_idx = idx - self.traj_start_idxs[trajectory_idx]\n\n # Test whether it is full context length\n if res_idx - ctx + 1 < 0:\n start_idx = 0\n pad_len = ctx - res_idx - 1 # number of zeros to pad\n else:\n start_idx = res_idx - ctx + 1\n pad_len = 0\n\n traj = self._trajs[trajectory_idx]\n states_slice = torch.from_numpy(np.array(traj.observations)[start_idx : res_idx + 1, :])\n states_slice = (states_slice - self.state_mean) / self.state_std\n\n actions_slice = torch.from_numpy(np.array(traj.actions)[start_idx : res_idx + 1, :])\n rewards_slice = torch.from_numpy(np.array(traj.rewards)[start_idx : res_idx + 1]).unsqueeze(-1) # (T,1)\n rtgs_slice = torch.from_numpy(np.array(traj.returns)[start_idx : res_idx + 1]).unsqueeze(-1) # (T,1)\n\n # pad 0\n states_slice = torch.cat([torch.zeros(pad_len, states_slice.shape[-1]), states_slice], dim = 0)\n actions_slice = torch.cat([torch.zeros(pad_len, actions_slice.shape[-1]), actions_slice], dim = 0)\n rewards_slice = torch.cat([torch.zeros(pad_len, rewards_slice.shape[-1]), rewards_slice], dim = 0)\n rtgs_slice = torch.cat([torch.zeros(pad_len, rtgs_slice.shape[-1]), rtgs_slice], dim = 0)\n\n if self.single_timestep: # take the last step\n timesteps_slice = torch.from_numpy(np.array(traj.timesteps)[res_idx : res_idx + 1]) # (1,)\n else: \n timesteps_slice = torch.from_numpy(np.array(traj.timesteps)[start_idx : res_idx + 1]) #(real_ctx_len, )\n timesteps_slice = torch.cat([torch.zeros(pad_len), timesteps_slice], dim = 0)\n\n if not self.keep_ctx:\n states_slice = states_slice[0,:]\n actions_slice = actions_slice[0,:]\n rewards_slice = rewards_slice[0,:]\n rtgs_slice = rtgs_slice[0,:]\n timesteps_slice = timesteps_slice[0]\n\n assert states_slice.shape[0] != 0, f\"{idx}, {states_slice.shape}\"\n if self.with_mask:\n attn_mask = torch.cat([torch.zeros((pad_len)), torch.ones((ctx-pad_len))], dim=-1)\n return states_slice, actions_slice, rewards_slice, rtgs_slice, timesteps_slice, attn_mask\n else:\n return states_slice, actions_slice, rewards_slice, rtgs_slice, timesteps_slice\n \n def getitem(self, idx):\n if self.with_mask:\n states_slice, actions_slice, rewards_slice, rtgs_slice, timesteps_slice, attn_mask = self.__getitem__(idx)\n return states_slice, actions_slice, rewards_slice, rtgs_slice, timesteps_slice, attn_mask\n else:\n states_slice, actions_slice, rewards_slice, rtgs_slice, timesteps_slice = self.__getitem__(idx)\n return states_slice, actions_slice, rewards_slice, rtgs_slice, timesteps_slice \n\n \n def get_max_return(self):\n traj_rets = [traj.returns[0] for traj in self._trajs]\n return max(traj_rets)\n \n def get_normalize_coef(self):\n '''\n Get state normalization mean and std\n '''\n return self.state_mean, self.state_std"
},
{
"identifier": "Logger",
"path": "offlinerlkit/utils/logger.py",
"snippet": "class Logger(object):\n def __init__(self, dir: str, ouput_config: Dict) -> None:\n self._dir = dir\n self._init_dirs()\n self._init_ouput_handlers(ouput_config)\n self._name2val = defaultdict(float)\n self._name2cnt = defaultdict(int)\n self._level = INFO\n self._timestep = 0\n \n def _init_dirs(self) -> None:\n self._record_dir = os.path.join(self._dir, \"record\")\n self._checkpoint_dir = os.path.join(self._dir, \"checkpoint\")\n self._model_dir = os.path.join(self._dir, \"model\")\n self._result_dir = os.path.join(self._dir, \"result\")\n os.mkdir(self._record_dir)\n os.mkdir(self._checkpoint_dir)\n os.mkdir(self._model_dir)\n os.mkdir(self._result_dir)\n \n def _init_ouput_handlers(self, output_config: Dict) -> None:\n self._output_handlers = []\n for file_name, fmt in output_config.items():\n try:\n self._output_handlers.append(HANDLER[fmt](os.path.join(self._record_dir, file_name)))\n except KeyError:\n warnings.warn(\"Invalid output type, Valid types: stdout, csv, tensorboard\", DeprecationWarning)\n # default output to console\n self._output_handlers.append(StandardOutputHandler(sys.stdout))\n \n def log_hyperparameters(self, hyper_param: Dict) -> None:\n json_output_handler = JSONOutputHandler(os.path.join(self._record_dir, \"hyper_param\"))\n json_output_handler.writekvs(hyper_param)\n json_output_handler.close()\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.add_hyper_params_to_tb(hyper_param)\n\n def logkv(self, key: Any, val: Any) -> None:\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n \"\"\"\n self._name2val[key] = val\n\n def logkv_mean(self, key: Any, val: Number) -> None:\n \"\"\"\n The same as logkv(), but if called many times, values averaged.\n \"\"\"\n oldval, cnt = self._name2val[key], self._name2cnt[key]\n self._name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)\n self._name2cnt[key] = cnt + 1\n\n def dumpkvs(self, exclude:Optional[Union[str, Tuple[str, ...]]]=None) -> None:\n # log timestep\n self.logkv(DEFAULT_X_NAME, self._timestep)\n for handler in self._output_handlers:\n if isinstance(handler, KVWriter):\n if exclude is not None and handler.handler_name in exclude:\n continue\n handler.writekvs(self._name2val)\n self._name2val.clear()\n self._name2cnt.clear()\n\n def log(self, s: str, level=INFO) -> None:\n for handler in self._output_handlers:\n if isinstance(handler, StandardOutputHandler):\n handler.writestr(s)\n \n def set_timestep(self, timestep: int) -> None:\n self._timestep = timestep\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.set_step(timestep)\n\n def set_level(self, level) -> None:\n self._level = level\n\n @property\n def record_dir(self) -> str:\n return self._record_dir\n \n @property\n def checkpoint_dir(self) -> str:\n return self._checkpoint_dir\n\n @property\n def model_dir(self) -> str:\n return self._model_dir\n \n @property\n def result_dir(self) -> str:\n return self._result_dir\n \n def close(self) -> None:\n for handler in self._output_handlers:\n handler.close()"
},
{
"identifier": "make_log_dirs",
"path": "offlinerlkit/utils/logger.py",
"snippet": "def make_log_dirs(\n task_name: str,\n algo_name: str,\n exp_name: str,\n args: Dict,\n part: Optional[str] = None,\n record_params: Optional[List]=None\n) -> str:\n if record_params is not None:\n for param_name in record_params:\n algo_name += f\"&{param_name}={args[param_name]}\"\n\n if part is not None:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name, part)\n else:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name)\n os.makedirs(log_dirs)\n return log_dirs"
}
] | import numpy as np
import torch
import os
import argparse
import roboverse
import datetime
from offlinerlkit.policy import DecisionTransformer
from offlinerlkit.policy_trainer import SequenceTrainer, TrainerConfig
from offlinerlkit.utils.set_up_seed import set_up_seed
from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset_dt, get_doubledrawer_dataset_dt
from offlinerlkit.utils.none_or_str import none_or_str
from offlinerlkit.utils.dataset import TrajCtxFloatLengthDataset
from offlinerlkit.utils.logger import Logger, make_log_dirs | 9,809 |
'''
Recommended hyperparameters:
pickplace, horizon=40
doubledraweropen, horizon=50
doubledrawercloseopen, horizon=80
doubledrawerpickplaceopen, horizon=80
'''
def get_args():
parser = argparse.ArgumentParser()
# general
parser.add_argument("--algo-name", type=str, default="dt")
parser.add_argument("--task", type=str, default="pickplace", help="task name")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu")
parser.add_argument("--last_eval", action="store_false")
# env config
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace")
# transformer mode
parser.add_argument("--n_layer", type=int, default=4)
parser.add_argument("--n_head", type=int, default=4)
parser.add_argument("--n_embd", type=int, default=32)
parser.add_argument('--ctx', type=int, default=10)
parser.add_argument('--embed_dim', type=int, default=128, help="dt token embedding dimension")
# Train
parser.add_argument("--epoch", type=int, default=100)
parser.add_argument("--eval_episodes", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument('--lr', type=float, default=6e-3, help="learning rate of Trainer" )
parser.add_argument('--goal_mul', type=float, default=1., help="goal = max_dataset_return * goal_mul")
parser.add_argument('--sample', action='store_false', help="Sample action by probs, or choose the largest prob")
return parser.parse_args()
def discount_cumsum(x, gamma):
discount_cumsum = np.zeros_like(x)
discount_cumsum[-1] = x[-1]
for t in reversed(range(x.shape[0]-1)):
discount_cumsum[t] = x[t] + gamma * discount_cumsum[t+1]
return discount_cumsum
def train(args = get_args()):
set_up_seed(args.seed)
# create env and dataset
if args.task == 'pickplace':
env = roboverse.make('Widow250PickTray-v0')
env = PickPlaceObsWrapper(env)
obs_space = env.observation_space
args.obs_shape = obs_space.shape
obs_dim = np.prod(args.obs_shape)
args.action_shape = env.action_space.shape
action_dim = np.prod(args.action_shape)
prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy")
task_data_path = os.path.join(args.data_dir, "pickplace_task.npy")
|
'''
Recommended hyperparameters:
pickplace, horizon=40
doubledraweropen, horizon=50
doubledrawercloseopen, horizon=80
doubledrawerpickplaceopen, horizon=80
'''
def get_args():
parser = argparse.ArgumentParser()
# general
parser.add_argument("--algo-name", type=str, default="dt")
parser.add_argument("--task", type=str, default="pickplace", help="task name")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu")
parser.add_argument("--last_eval", action="store_false")
# env config
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace")
# transformer mode
parser.add_argument("--n_layer", type=int, default=4)
parser.add_argument("--n_head", type=int, default=4)
parser.add_argument("--n_embd", type=int, default=32)
parser.add_argument('--ctx', type=int, default=10)
parser.add_argument('--embed_dim', type=int, default=128, help="dt token embedding dimension")
# Train
parser.add_argument("--epoch", type=int, default=100)
parser.add_argument("--eval_episodes", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument('--lr', type=float, default=6e-3, help="learning rate of Trainer" )
parser.add_argument('--goal_mul', type=float, default=1., help="goal = max_dataset_return * goal_mul")
parser.add_argument('--sample', action='store_false', help="Sample action by probs, or choose the largest prob")
return parser.parse_args()
def discount_cumsum(x, gamma):
discount_cumsum = np.zeros_like(x)
discount_cumsum[-1] = x[-1]
for t in reversed(range(x.shape[0]-1)):
discount_cumsum[t] = x[t] + gamma * discount_cumsum[t+1]
return discount_cumsum
def train(args = get_args()):
set_up_seed(args.seed)
# create env and dataset
if args.task == 'pickplace':
env = roboverse.make('Widow250PickTray-v0')
env = PickPlaceObsWrapper(env)
obs_space = env.observation_space
args.obs_shape = obs_space.shape
obs_dim = np.prod(args.obs_shape)
args.action_shape = env.action_space.shape
action_dim = np.prod(args.action_shape)
prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy")
task_data_path = os.path.join(args.data_dir, "pickplace_task.npy")
| trajs = get_pickplace_dataset_dt( | 6 | 2023-10-11 08:36:06+00:00 | 12k |
parklab/Salamander | src/salamander/nmf_framework/corrnmf.py | [
{
"identifier": "match_signatures_pair",
"path": "src/salamander/utils.py",
"snippet": "def match_signatures_pair(\n signatures1: pd.DataFrame, signatures2: pd.DataFrame, metric=\"cosine\"\n):\n \"\"\"\n Match a pair of signature catalogs using their pairwise column distances,\n see https://en.wikipedia.org/wiki/Assignment_problem.\n\n Output:\n ------\n reordered_indices: np.ndarray\n The list of column indices such that reordering signatures2 using this list\n minimizes the sum of the pairwise column distances between\n signatures1 and signatures2.\n \"\"\"\n if signatures1.shape != signatures2.shape:\n raise ValueError(\"The signatures must be of the same shape.\")\n\n pdist = pairwise_distances(signatures1.T, signatures2.T, metric=metric)\n reordered_indices = linear_sum_assignment(pdist)[1]\n\n return reordered_indices"
},
{
"identifier": "shape_checker",
"path": "src/salamander/utils.py",
"snippet": "def shape_checker(arg_name: str, arg, allowed_shape):\n \"\"\"\n A helper function to test the shape of a numpy ndarray or pandas dataframe.\n\n Input:\n ------\n arg_name: str\n The name of the argument\n arg:\n The actual value of the argument\n allowed_shape:\n The expected shape of 'arg'\n \"\"\"\n type_checker(arg_name, arg, [np.ndarray, pd.DataFrame])\n\n if arg.shape != allowed_shape:\n raise ValueError(f\"The shape of '{arg_name}' has to be {allowed_shape}.\")"
},
{
"identifier": "type_checker",
"path": "src/salamander/utils.py",
"snippet": "def type_checker(arg_name: str, arg, allowed_types):\n \"\"\"\n A helper function to test the type of an argument.\n\n Input:\n ------\n arg_name: str\n The name of the argument\n arg:\n The actual value of the argument\n allowed_types: a type or list of types\n The type or list of types allowed for 'arg'\n \"\"\"\n if isinstance(allowed_types, type):\n allowed_types = [allowed_types]\n\n if type(arg) not in allowed_types:\n raise TypeError(f\"The type of '{arg_name}' has to be one of {allowed_types}.\")"
},
{
"identifier": "kl_divergence",
"path": "src/salamander/nmf_framework/_utils_klnmf.py",
"snippet": "@njit(fastmath=True)\ndef kl_divergence(X: np.ndarray, W: np.ndarray, H: np.ndarray, weights=None) -> float:\n r\"\"\"\n The generalized Kullback-Leibler divergence\n D_KL(X || WH) = \\sum_vd X_vd * ln(X_vd / (WH)_vd) - \\sum_vd X_vd + \\sum_vd (WH)_vd.\n\n Parameters\n ----------\n X : np.ndarray of shape (n_features, n_samples)\n data matrix\n\n W : np.ndarray of shape (n_features, n_signatures)\n signature matrix\n\n H : np.ndarray of shape (n_signatures, n_samples)\n exposure matrix\n\n weights : np.ndarray of shape (n_samples,)\n per sample weights\n\n Returns\n -------\n result : float\n \"\"\"\n V, D = X.shape\n WH = W @ H\n result = 0.0\n\n for d in range(D):\n summand_sample = 0.0\n\n for v in range(V):\n if X[v, d] != 0:\n summand_sample += X[v, d] * np.log(X[v, d] / WH[v, d])\n summand_sample -= X[v, d]\n summand_sample += WH[v, d]\n\n if weights is not None:\n summand_sample *= weights[d]\n\n result += summand_sample\n\n return result"
},
{
"identifier": "poisson_llh",
"path": "src/salamander/nmf_framework/_utils_klnmf.py",
"snippet": "def poisson_llh(X: np.ndarray, W: np.ndarray, H: np.ndarray) -> float:\n \"\"\"\n The Poisson log-likelihood generalized to X, W and H having\n non-negative real numbers.\n\n Parameters\n ----------\n X : np.ndarray of shape (n_features, n_samples)\n data matrix\n\n W : np.ndarray of shape (n_features, n_signatures)\n signature matrix\n\n H : np.ndarray of shape (n_signatures, n_samples)\n exposure matrix\n\n Returns\n -------\n result : float\n \"\"\"\n result = _poisson_llh_wo_factorial(X, W, H)\n result -= np.sum(gammaln(1 + X))\n\n return result"
},
{
"identifier": "samplewise_kl_divergence",
"path": "src/salamander/nmf_framework/_utils_klnmf.py",
"snippet": "def samplewise_kl_divergence(\n X: np.ndarray, W: np.ndarray, H: np.ndarray, weights=None\n) -> np.ndarray:\n \"\"\"\n Per sample (weighted) generalized Kullback-Leibler divergence D_KL(x || Wh).\n\n Parameters\n ----------\n X : np.ndarray of shape (n_features, n_samples)\n data matrix\n\n W : np.ndarray of shape (n_features, n_signatures)\n signature matrix\n\n H : np.ndarray of shape (n_signatures, n_samples)\n exposure matrix\n\n weights : np.ndarray of shape (n_samples,)\n per sample weights\n\n Returns\n -------\n errors : np.ndarray of shape (n_samples,)\n \"\"\"\n X_data = np.copy(X).astype(float)\n indices = X == 0\n X_data[indices] = EPSILON\n WH_data = W @ H\n WH_data[indices] = EPSILON\n\n s1 = np.einsum(\"vd,vd->d\", X_data, np.log(X_data / WH_data))\n s2 = -np.sum(X, axis=0)\n s3 = np.dot(H.T, np.sum(W, axis=0))\n\n errors = s1 + s2 + s3\n\n if weights is not None:\n errors *= weights\n\n return errors"
},
{
"identifier": "initialize",
"path": "src/salamander/nmf_framework/initialization.py",
"snippet": "def initialize(\n X: np.ndarray,\n n_signatures: int,\n init_method=\"nndsvd\",\n given_signatures=None,\n **kwargs,\n):\n \"\"\"\n Initialize the signature and exposure matrices.\n\n Parameters\n ----------\n X : np.ndarray\n count matrix\n\n n_signatures : int\n number of signatures\n\n init_method : str\n initialization method. One of 'custom', 'flat', 'hierarchical_cluster',\n 'nndsvd', 'nndsvda', 'nndsvdar', 'random', 'separableNMF'\n\n given_signatures : pd.Dataframe, default=None\n At most 'n_signatures' many signatures can be provided to\n overwrite some of the initialized signatures. This does not\n change the initialized exposurse.\n\n kwargs : dict\n Any keyword arguments to be passed to the initialization method.\n This includes, for example, a possible 'seed' keyword argument\n for all stochastic methods.\n\n Returns\n -------\n W : np.ndarray\n signature matrix\n\n H : np.ndarray\n exposure matrix\n\n signature_names : list\n The signature names. By default, the signatures are named\n 'Sigk', where 'k' is one plus the index of the signature.\n If 'given_signatures' are provided, the names are adjusted\n accordingly.\n \"\"\"\n value_checker(\"init_method\", init_method, INIT_METHODS)\n\n if init_method == \"custom\":\n W, H = init_custom(X, n_signatures, **kwargs)\n\n elif init_method == \"flat\":\n W, H = init_flat(X, n_signatures)\n\n elif init_method in [\"nndsvd\", \"nndsvda\", \"nndsvdar\"]:\n W, H = init_nndsvd(X, n_signatures, init=init_method, **kwargs)\n\n elif init_method == \"random\":\n W, H = init_random(X, n_signatures, **kwargs)\n\n else:\n W, H = init_separableNMF(X, n_signatures, **kwargs)\n\n if given_signatures is not None:\n n_given_signatures = len(given_signatures.columns)\n W[:, :n_given_signatures] = given_signatures.copy().values\n given_signatures_names = given_signatures.columns.to_numpy(dtype=str)\n n_new_signatures = n_signatures - n_given_signatures\n new_signatures_names = np.array([f\"Sig{k+1}\" for k in range(n_new_signatures)])\n signature_names = np.concatenate([given_signatures_names, new_signatures_names])\n else:\n signature_names = np.array([f\"Sig{k+1}\" for k in range(n_signatures)])\n\n W, H = normalize_WH(W, H)\n W, H = W.clip(EPSILON), H.clip(EPSILON)\n return W, H, signature_names"
},
{
"identifier": "SignatureNMF",
"path": "src/salamander/nmf_framework/signature_nmf.py",
"snippet": "class SignatureNMF(ABC):\n \"\"\"\n The abstract class SignatureNMF unifies the structure of\n multiple NMF algorithms used for signature analysis.\n\n Common properties and methods of all algorithms are indicated,\n i.e. have to be implemented by child classes, or implemented. Overview:\n\n Every child class has to implement the following attributes:\n\n - signatures: pd.DataFrame\n The signature matrix including mutation type names and signature names\n\n - exposures: pd.DataFrames\n The exposure matrix including the signature names and sample names\n\n - _n_parameters: int\n The number of parameters fitted by the NMF algorithm.\n This is needed to compute the Bayesian Information Criterion (BIC)\n\n - reconstruction_error: float\n The reconstruction error between the count matrix and\n the reconstructed count matrix.\n\n - samplewise_reconstruction_error: np.ndarray\n The samplewise reconstruction error between the sample counts and\n the reconstructed sample counts.\n\n - objective: str\n \"minimize\" or \"maximize\". Whether the NMF algorithm maximizes or\n minimizes the objective function. Some algorithms maximize a likelihood,\n others minimize a distance. The distinction is useful for filtering NMF runs\n based on the fitted objective function value downstream.\n\n - corr_signatures: pd.DataFrame\n The signature correlation matrix\n\n - corr_samples: pd.DataFrame\n The sample correlation matrix\n\n\n Every child class has to implement the following methods:\n\n - objective_fuction:\n The objective function to optimize when running the algorithm\n\n - loglikelihood:\n The loglikelihood of the underyling generative model\n\n - _initialize:\n A method to initialize all model parameters before fitting\n\n - fit:\n Run the NMF algorithm for a given mutation count data. Every\n fit method should also implement a version that allows fixing\n arbitrary many a priori known signatures.\n\n - _get_embedding_data:\n A helper function for the embedding plot\n\n - _get_default_embedding_annotations:\n A helper function for the embedding plot\n\n\n The following attributes and methods are implemented in SignatureNMF:\n\n - data_reconstructed: pd.DataFrame\n The recovered mutation count data given\n the current signatures and exposures.\n\n - X_reconstructed: np.ndarray\n The recovered mutation count matrix given\n the current signatures and exposures\n\n - bic: float\n The value of the Bayesian Information Criterion (BIC)\n\n - _setup_data_parameters:\n Perform parameter checks on the input data and add attributes\n\n - plot_history:\n Plot the history of the objective function values after fitting the model\n\n - plot_signatures:\n Plot the signatures using the signatures_plot function implemented in\n the plot module\n\n - plot_correlation:\n Plot the correlation of either the signatures or exposures\n using the corr_plot function implemented in the plot module\n\n - plot_embeddings:\n Plot the sample (and potentially the signature) embeddings in 2D\n using PCA, tSNE or UMAP\n \"\"\"\n\n def __init__(\n self,\n n_signatures=1,\n init_method=\"nndsvd\",\n min_iterations=500,\n max_iterations=10000,\n conv_test_freq=10,\n tol=1e-7,\n ):\n \"\"\"\n Input:\n ------\n n_signatures: int\n The number of underlying signatures that are assumed to\n have generated the mutation count data\n\n init_method: str\n The initialization method for the NMF algorithm\n\n min_iterations: int\n The minimum number of iterations to perform by the NMF algorithm\n\n max_iterations: int\n The maximum number of iterations to perform by the NMF algorithm\n\n conv_test_freq: int\n The frequency at which the algorithm is tested for convergence.\n The objective function value is only computed every 'conv_test_freq'\n many iterations, which also affects a potentially saved history of\n the objective function values.\n\n tol: float\n The NMF algorithm is converged when the relative change of\n the objective function of one iteration is smaller\n than the tolerance 'tol'.\n \"\"\"\n init_methods = [\n \"custom\",\n \"flat\",\n \"hierarchical_cluster\",\n \"nndsvd\",\n \"nndsvda\",\n \"nndsvdar\",\n \"random\",\n \"separableNMF\",\n ]\n value_checker(\"init_method\", init_method, init_methods)\n\n self.n_signatures = n_signatures\n self.signature_names = None\n self.init_method = init_method\n self.min_iterations = min_iterations\n self.max_iterations = max_iterations\n self.conv_test_freq = conv_test_freq\n self.tol = tol\n\n # initialize data/fitting dependent attributes\n self.X = None\n self.n_features = 0\n self.n_given_signatures = 0\n self.n_samples = 0\n self.mutation_types = np.empty(0, dtype=str)\n self.sample_names = np.empty(0, dtype=str)\n self.history = {}\n\n @property\n @abstractmethod\n def signatures(self) -> pd.DataFrame:\n \"\"\"\n Extract the mutational signatures as a pandas dataframe.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def exposures(self) -> pd.DataFrame:\n \"\"\"\n Extract the signature exposures of samples as a pandas dataframe.\n \"\"\"\n pass\n\n @property\n def data_reconstructed(self) -> pd.DataFrame:\n return (self.signatures @ self.exposures).astype(int)\n\n @property\n def X_reconstructed(self) -> np.ndarray:\n return self.data_reconstructed.values\n\n @property\n @abstractmethod\n def reconstruction_error(self) -> float:\n \"\"\"\n The reconstruction error between the count matrix and\n the reconstructed count matrix.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def samplewise_reconstruction_error(self) -> np.ndarray:\n \"\"\"\n The samplewise reconstruction error between the sample counts and\n the reconstructed sample counts.\n \"\"\"\n pass\n\n @abstractmethod\n def objective_function(self) -> float:\n \"\"\"\n The objective function to be optimized during fitting.\n \"\"\"\n pass\n\n @abstractmethod\n def loglikelihood(self) -> float:\n \"\"\"\n The log-likelihood of the underlying generative model.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def _n_parameters(self) -> int:\n \"\"\"\n Every child class has to implement a function returning\n the number of parameters estimated by the respective model.\n This is allows to, for example, implement the BIC\n (Bayesian information criterion). The BIC can be used to\n estimate the optimal number of signatures.\n \"\"\"\n pass\n\n @property\n def bic(self) -> float:\n \"\"\"\n Bayesian information criterion (BIC).\n Can only be called after the _setup_parameters_fitting function as it\n requires the number of samples be an attribute.\n \"\"\"\n return self._n_parameters * np.log(self.n_samples) - 2 * self.loglikelihood()\n\n def _check_given_signatures(self, given_signatures: pd.DataFrame):\n \"\"\"\n Check if the given signatures are compatible with the\n number of signatures of the algorithm and the\n mutation types of the input data.\n\n given_signatures: pd.DataFrame\n Known signatures that should be fixed by the algorithm.\n The number of known signatures can be less or equal to the\n number of signatures specified in the algorithm instance.\n \"\"\"\n type_checker(\"given_signatures\", given_signatures, pd.DataFrame)\n given_mutation_types = given_signatures.index.to_numpy(dtype=str)\n compatible = (\n np.array_equal(given_mutation_types, self.mutation_types)\n and given_signatures.shape[1] <= self.n_signatures\n )\n\n if not compatible:\n raise ValueError(\n f\"You have to provide at most {self.n_signatures} signatures with \"\n f\"mutation types matching to your data.\"\n )\n\n @abstractmethod\n def _initialize(self):\n \"\"\"\n Initialize model parameters and attributes before fitting.\n Enforcing the existence of _initialize unifies the implementation of\n the NMF algorithms.\n\n Example:\n\n Before running the Lee & Seung NMF multiplicative update rules to\n decompose the mutation count matrix X into a signature matrix W and\n an exposure matrix H, both W and H have to be initialized.\n \"\"\"\n\n def _setup_data_parameters(self, data: pd.DataFrame):\n \"\"\"\n Perform parameter checks before running the fit method.\n\n Input:\n ------\n data: pd.DataFrame\n The mutation count pandas dataframe with indices and column names.\n Samples are expected to corresponding to columns.\n \"\"\"\n type_checker(\"data\", data, pd.DataFrame)\n self.X = data.values.clip(EPSILON)\n self.n_features, self.n_samples = data.shape\n self.mutation_types = data.index.values.astype(str)\n self.sample_names = data.columns.values.astype(str)\n\n @abstractmethod\n def fit(self, data: pd.DataFrame, given_signatures=None):\n \"\"\"\n Fit the model parameters. Child classes are expected to handle\n 'given_signatures' appropriately.\n\n Input:\n ------\n data: pd.DataFrame\n The named mutation count data of shape (n_features, n_samples).\n\n given_signatures: pd.DataFrame, by default None\n A priori known signatures. The number of given signatures has\n to be less or equal to the number of signatures of NMF\n algorithm instance, and the mutation type names have to match\n the mutation types of the count data.\n \"\"\"\n\n def plot_history(self, ax=None, min_iteration=0, outfile=None, **kwargs):\n if not self.history:\n raise ValueError(\n \"No history available, the model has to be fitted first. \"\n \"Remember to set 'history' to 'True' when calling 'fit()'.\"\n )\n\n history_plot(\n self.history[\"objective_function\"],\n self.conv_test_freq,\n min_iteration=min_iteration,\n ax=ax,\n **kwargs,\n )\n\n if outfile is not None:\n plt.savefig(outfile, bbox_inches=\"tight\")\n\n return ax\n\n def plot_signatures(\n self,\n catalog=None,\n colors=None,\n annotate_mutation_types=False,\n axes=None,\n outfile=None,\n **kwargs,\n ):\n \"\"\"\n Plot the signatures, see plot.py for the implementation of signatures_plot.\n \"\"\"\n axes = signatures_plot(\n self.signatures,\n catalog=catalog,\n colors=colors,\n annotate_mutation_types=annotate_mutation_types,\n axes=axes,\n **kwargs,\n )\n\n if outfile is not None:\n plt.savefig(outfile, bbox_inches=\"tight\")\n\n return axes\n\n def plot_exposures(\n self,\n sample_order=None,\n reorder_signatures=True,\n annotate_samples=True,\n colors=None,\n ncol_legend=1,\n ax=None,\n outfile=None,\n **kwargs,\n ):\n \"\"\"\n Visualize the exposures as a stacked bar chart,\n see plot.py for the implementation.\n \"\"\"\n ax = exposures_plot(\n exposures=self.exposures,\n sample_order=sample_order,\n reorder_signatures=reorder_signatures,\n annotate_samples=annotate_samples,\n colors=colors,\n ncol_legend=ncol_legend,\n ax=ax,\n **kwargs,\n )\n if outfile is not None:\n plt.savefig(outfile, bbox_inches=\"tight\")\n\n return ax\n\n @property\n @abstractmethod\n def corr_signatures(self) -> pd.DataFrame:\n \"\"\"\n Every child class of SignatureNMF has to implement a function that\n returns the signature correlation matrix as a pandas dataframe.\n \"\"\"\n\n @property\n @abstractmethod\n def corr_samples(self) -> pd.DataFrame:\n \"\"\"\n Every child class of SignatureNMF has to implement a function that\n returns the sample correlation matrix as a pandas dataframe.\n \"\"\"\n\n def plot_correlation(self, data=\"signatures\", annot=False, outfile=None, **kwargs):\n \"\"\"\n Plot the correlation matrix of the signatures or samples.\n See plot.py for the implementation of corr_plot.\n\n Input:\n ------\n *args, **kwargs:\n arguments to be passed to corr_plot\n \"\"\"\n value_checker(\"data\", data, [\"signatures\", \"samples\"])\n\n if data == \"signatures\":\n corr = self.corr_signatures\n\n else:\n corr = self.corr_samples\n\n clustergrid = corr_plot(corr, annot=annot, **kwargs)\n\n if outfile is not None:\n plt.savefig(outfile, bbox_inches=\"tight\")\n\n return clustergrid\n\n @abstractmethod\n def _get_embedding_data(self) -> np.ndarray:\n \"\"\"\n Get the data points for the dimensionality reduction / embedding plot.\n One data point corresponds to a row of the embedding data.\n Usually, these are the transposed exposures.\n \"\"\"\n\n @abstractmethod\n def _get_default_embedding_annotations(self) -> np.ndarray:\n \"\"\"\n Get the annotations of the data points in the embedding plot.\n \"\"\"\n\n def plot_embeddings(self, annotations=None, outfile=None, **kwargs):\n \"\"\"\n Plot a dimensionality reduction of the exposure representation.\n In most NMF algorithms, this is just the exposures of the samples.\n In CorrNMF, the exposures matrix is refactored, and there are both\n sample and signature embeddings in a shared embedding space.\n\n If the embedding dimension is one or two, the embeddings are be plotted\n directly, ignoring the chosen method.\n See plot.py for the implementation of 'embeddings_plot'.\n\n Parameters\n ----------\n annotations : list[str], default=None\n Annotations per data point, e.g. the sample names. If None,\n the algorithm-specific default annotations are used.\n For example, CorrNMF annotates the signature embeddings by default.\n Note that there are 'n_signatures' + 'n_samples' data points in CorrNMF,\n i.e. the first 'n_signatures' elements in 'annotations'\n are the signature annotations, not any sample annotations.\n\n outfile : str, default=None\n If not None, the figure will be saved in the specified file path.\n\n **kwargs :\n keyword arguments to pass to seaborn's scatterplot\n\n Returns\n -------\n ax : matplotlib.axes.Axes\n The matplotlib axes containing the plot.\n \"\"\"\n # one data point corresponds to a row of embedding_data\n embedding_data = self._get_embedding_data()\n\n if annotations is None:\n annotations = self._get_default_embedding_annotations()\n\n ax = embeddings_plot(data=embedding_data, annotations=annotations, **kwargs)\n\n if outfile is not None:\n plt.savefig(outfile, bbox_inches=\"tight\")\n\n return ax"
}
] | from abc import abstractmethod
from scipy.spatial.distance import squareform
from scipy.special import gammaln
from ..utils import match_signatures_pair, shape_checker, type_checker
from ._utils_klnmf import kl_divergence, poisson_llh, samplewise_kl_divergence
from .initialization import initialize
from .signature_nmf import SignatureNMF
import numpy as np
import pandas as pd | 8,050 | * self.dim_embeddings
* self.n_samples
* np.log(2 * np.pi * self.sigma_sq)
)
elbo -= np.sum(self.U**2) / (2 * self.sigma_sq)
return elbo
@property
def objective(self) -> str:
return "maximize"
def _surrogate_objective_function(self, penalize_sample_embeddings=True) -> float:
"""
The surrogate lower bound of the ELBO.
"""
p = self._update_p()
exposures = self.exposures.values
aux = np.log(self.W)[:, :, None] + np.log(exposures)[None, :, :] - np.log(p)
sof_value = np.einsum("VD,VKD,VKD->", self.X, p, aux, optimize="greedy").item()
sof_value -= np.sum(gammaln(1 + self.X))
sof_value -= np.sum(exposures)
sof_value -= (
0.5
* self.dim_embeddings
* self.n_signatures
* np.log(2 * np.pi * self.sigma_sq)
)
sof_value -= np.sum(self.L**2) / (2 * self.sigma_sq)
if penalize_sample_embeddings:
sof_value -= (
0.5
* self.dim_embeddings
* self.n_samples
* np.log(2 * np.pi * self.sigma_sq)
)
sof_value -= np.sum(self.U**2) / (2 * self.sigma_sq)
return sof_value
def loglikelihood(self):
return self.objective_function()
@property
def _n_parameters(self):
"""
There are n_features * n_signatures parameters corresponding to
the signature matrix, each embedding corresponds to dim_embeddings parameters,
and each signature & sample has a real valued bias.
Finally, the model variance is a single positive real number.
"""
n_parameters_signatures = self.n_features * self.n_signatures
n_parameters_embeddings = self.dim_embeddings * (
self.n_signatures + self.n_samples
)
n_parameters_biases = self.n_samples + self.n_signatures
n_parameters_exposures = n_parameters_embeddings + n_parameters_biases
n_parameters = n_parameters_signatures + n_parameters_exposures + 1
return n_parameters
@abstractmethod
def _update_alpha(self):
pass
@abstractmethod
def _update_sigma_sq(self):
pass
@abstractmethod
def _update_W(self):
pass
@abstractmethod
def _update_p(self):
pass
@abstractmethod
def _update_l(self, index, aux_row, outer_prods_U):
r"""
Input:
------
index: int
The index of the signature whose embedding is updated
aux_row: nd.ndarray
Row of the following matrix:
aux_kd = \sum_v X_vd * p_vkd.
This auxiliary matrix is used for updating the signatures
and the sample embeddidngs. The aux_row argument
is the k-th row of aux, where k is equal to 'index'.
outer_prods_U: np.ndarray
All outer products of the sample embeddings.
shape: (n_samples, dim_embeddings, dim_embeddings)
"""
@abstractmethod
def _update_u(self, index, aux_col, outer_prods_L):
r"""
Input:
------
index: int
The index of the sample whose embedding is updated
aux_col: nd.ndarray
Column of the following matrix:
aux_kd = \sum_v X_vd * p_vkd.
This auxiliary matrix is used for updating the signatures
and the sample embeddidngs. The aux_col argument
is the d-th row of aux, where d is equal to 'index'.
outer_prods_L: np.ndarray
All outer products of the signature embeddings.
shape: (n_signatures, dim_embeddings, dim_embeddings)
"""
def _check_given_biases(self, given_biases, expected_n_biases, name):
type_checker(name, given_biases, np.ndarray)
|
EPSILON = np.finfo(np.float32).eps
class CorrNMF(SignatureNMF):
r"""
The abstract class CorrNMF unifies the structure of deterministic and
stochastic algorithms to fit the parameters of correlated NMF (CorrNMF).
The model parameters are the signature and sample biases, the variance, and the
signature matrix. The latent variables are the signature and sample embeddings.
Overview:
Every child class has to implement the following methods:
- _update_alpha:
update the sample exposure biases \alpha
- _update_beta:
update the signature exposure biases \beta
- _update_sigma_sq:
update the embedding distribution variance \sigma^2
- _update_W:
update the signature matrix W
- _update_p:
update the auxiliary parameters p
- _update_l:
update a single signature embedding l
- _update_u:
update a single sample embedding u
- fit:
Run CorrNMF for a given mutation count data.
The following attributes are implemented in CorrNMF:
- signatures: pd.DataFrame
The signature matrix including mutation type names and signature names
- exposures: pd.DataFrame
The exposure matrix including the signature names and sample names
- reconstruction_error: float
The reconstruction error between the count matrix
and the reconstructed count matrix.
- samplewise_reconstruction_error: np.ndarray
The samplewise reconstruction error between the sample counts
and the reconstructed sample counts.
- _n_parameters:
The number of parameters fitted in CorrNMF
- objective: str
"minimize" or "maximize". CorrNMF maximizes the objective function.
- corr_signatures: pd.DataFrame
The signature correlation matrix induced by the signature embeddings
- corr_samples: pd.DataFrame
The sample correlation matrix induced by the sample embeddings
The following methods are implemented in CorrNMF:
- objective_function:
The evidence lower bound (ELBO) of the log-likelihood.
Note: The ELBO is sometimes called the variational lower bound.
- _surrogate_objective_function:
A surrogate lower bound of the ELBO after introducing the
auxiliary parameters p. In contrast to the original objective_function,
the surrogate is strictly convex in the signature and sample embeddings
- loglikelihood:
The loglikelihood of the underyling generative model
- _initialize:
Initialize all model parameters and latent variables depending on the
initialization method chosen
- _get_embedding_data:
A helper function for the embedding plot that returns the signature
and sample embeddings
- _get_default_embedding_annotations:
A helper function for the embedding plot that returns the signature names
More specific docstrings are written for the respective attributes and methods.
"""
def __init__(
self,
n_signatures=1,
dim_embeddings=None,
init_method="nndsvd",
min_iterations=500,
max_iterations=10000,
conv_test_freq=10,
tol=1e-7,
):
"""
Input:
------
n_signatures: int
The number of underlying signatures that are assumed to
have generated the mutation count data
dim_embeddings: int
The assumed dimension of the signature and sample embeddings.
Should be smaller or equal to the number of signatures as a dimension
equal to the number of signatures covers the case of independent
signatures. The smaller the embedding dimension, the stronger the
enforced correlation structure on both signatures and samples.
init_method: str
One of "custom", "flat", "hierarchical_cluster", "nndsvd",
"nndsvda", "nndsvdar" "random" and "separableNMF".
See the initialization module for further details.
min_iterations: int
The minimum number of iterations to perform during inference
max_iterations: int
The maximum number of iterations to perform during inference
conv_test_freq: int
The frequency at which the algorithm is tested for convergence.
The objective function value is only computed every 'conv_test_freq'
many iterations, which also affects a potentially saved history of
the objective function values.
tol: float
The CorrNMF algorithm is converged when the relative change of the
surrogate objective function of one iteration is smaller
than the tolerance 'tol'.
"""
super().__init__(
n_signatures,
init_method,
min_iterations,
max_iterations,
conv_test_freq,
tol,
)
if dim_embeddings is None:
dim_embeddings = n_signatures
self.dim_embeddings = dim_embeddings
# initialize data/fitting-dependent attributes
self.W = None
self.alpha = None
self.beta = None
self.L = None
self.U = None
self.sigma_sq = None
@property
def signatures(self) -> pd.DataFrame:
signatures = pd.DataFrame(
self.W, index=self.mutation_types, columns=self.signature_names
)
return signatures
@property
def exposures(self) -> pd.DataFrame:
"""
In contrast to the classical NMF framework, the exposure matrix is
restructured and determined by the signature & sample biases and
embeddings.
"""
exposures = pd.DataFrame(
np.exp(self.alpha + self.beta[:, np.newaxis] + self.L.T @ self.U),
index=self.signature_names,
columns=self.sample_names,
)
return exposures
@property
def reconstruction_error(self):
return kl_divergence(self.X, self.W, self.exposures.values)
@property
def samplewise_reconstruction_error(self):
return samplewise_kl_divergence(self.X, self.W, self.exposures.values)
def objective_function(self, penalize_sample_embeddings=True) -> float:
"""
The evidence lower bound (ELBO)
"""
elbo = poisson_llh(self.X, self.signatures.values, self.exposures.values)
elbo -= (
0.5
* self.dim_embeddings
* self.n_signatures
* np.log(2 * np.pi * self.sigma_sq)
)
elbo -= np.sum(self.L**2) / (2 * self.sigma_sq)
if penalize_sample_embeddings:
elbo -= (
0.5
* self.dim_embeddings
* self.n_samples
* np.log(2 * np.pi * self.sigma_sq)
)
elbo -= np.sum(self.U**2) / (2 * self.sigma_sq)
return elbo
@property
def objective(self) -> str:
return "maximize"
def _surrogate_objective_function(self, penalize_sample_embeddings=True) -> float:
"""
The surrogate lower bound of the ELBO.
"""
p = self._update_p()
exposures = self.exposures.values
aux = np.log(self.W)[:, :, None] + np.log(exposures)[None, :, :] - np.log(p)
sof_value = np.einsum("VD,VKD,VKD->", self.X, p, aux, optimize="greedy").item()
sof_value -= np.sum(gammaln(1 + self.X))
sof_value -= np.sum(exposures)
sof_value -= (
0.5
* self.dim_embeddings
* self.n_signatures
* np.log(2 * np.pi * self.sigma_sq)
)
sof_value -= np.sum(self.L**2) / (2 * self.sigma_sq)
if penalize_sample_embeddings:
sof_value -= (
0.5
* self.dim_embeddings
* self.n_samples
* np.log(2 * np.pi * self.sigma_sq)
)
sof_value -= np.sum(self.U**2) / (2 * self.sigma_sq)
return sof_value
def loglikelihood(self):
return self.objective_function()
@property
def _n_parameters(self):
"""
There are n_features * n_signatures parameters corresponding to
the signature matrix, each embedding corresponds to dim_embeddings parameters,
and each signature & sample has a real valued bias.
Finally, the model variance is a single positive real number.
"""
n_parameters_signatures = self.n_features * self.n_signatures
n_parameters_embeddings = self.dim_embeddings * (
self.n_signatures + self.n_samples
)
n_parameters_biases = self.n_samples + self.n_signatures
n_parameters_exposures = n_parameters_embeddings + n_parameters_biases
n_parameters = n_parameters_signatures + n_parameters_exposures + 1
return n_parameters
@abstractmethod
def _update_alpha(self):
pass
@abstractmethod
def _update_sigma_sq(self):
pass
@abstractmethod
def _update_W(self):
pass
@abstractmethod
def _update_p(self):
pass
@abstractmethod
def _update_l(self, index, aux_row, outer_prods_U):
r"""
Input:
------
index: int
The index of the signature whose embedding is updated
aux_row: nd.ndarray
Row of the following matrix:
aux_kd = \sum_v X_vd * p_vkd.
This auxiliary matrix is used for updating the signatures
and the sample embeddidngs. The aux_row argument
is the k-th row of aux, where k is equal to 'index'.
outer_prods_U: np.ndarray
All outer products of the sample embeddings.
shape: (n_samples, dim_embeddings, dim_embeddings)
"""
@abstractmethod
def _update_u(self, index, aux_col, outer_prods_L):
r"""
Input:
------
index: int
The index of the sample whose embedding is updated
aux_col: nd.ndarray
Column of the following matrix:
aux_kd = \sum_v X_vd * p_vkd.
This auxiliary matrix is used for updating the signatures
and the sample embeddidngs. The aux_col argument
is the d-th row of aux, where d is equal to 'index'.
outer_prods_L: np.ndarray
All outer products of the signature embeddings.
shape: (n_signatures, dim_embeddings, dim_embeddings)
"""
def _check_given_biases(self, given_biases, expected_n_biases, name):
type_checker(name, given_biases, np.ndarray) | shape_checker(name, given_biases, (expected_n_biases,)) | 1 | 2023-10-08 04:29:42+00:00 | 12k |
shadlc/FreeKill-Web-Panel | src/v1.py | [
{
"identifier": "restful",
"path": "src/utils.py",
"snippet": "def restful(code: int, msg: str = '', data: dict = {}) -> None:\n retcode = 1\n if code == 200:\n retcode = 0\n return jsonify({'code': code,\n 'retcode': retcode,\n 'msg': msg,\n 'data': data\n }), code"
},
{
"identifier": "isPortBusy",
"path": "src/utils.py",
"snippet": "def isPortBusy(port: int) -> bool:\n for conn in psutil.net_connections():\n if conn.status == 'LISTEN' and conn.laddr.port == port:\n return True\n return False"
},
{
"identifier": "startGameServer",
"path": "src/utils.py",
"snippet": "def startGameServer(name: str, port: int, path: str, session_type: str) -> int:\n if session_type == 'tmux':\n command = f''' cd {path}; tmux new -d -s \"{name}\" \"./FreeKill -s {port} 2>&1 | tee ./{config.log_file}\" '''\n else:\n name = name.split(\".\", 1).pop()\n command = f''' cd {path}; screen -dmS \"{name}\" bash -c \"./FreeKill -s {port} 2>&1 | tee ./{config.log_file}\" '''\n logging.debug(f' >>> 独立进程 执行指令 {command}')\n subprocess.Popen([command], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()\n time.sleep(0.5)\n try:\n for process in psutil.process_iter():\n cmd = process.cmdline()\n if './FreeKill' in cmd and '-s' in cmd and f'{port}' in cmd:\n return process.pid\n except psutil.NoSuchProcess:...\n return 0"
},
{
"identifier": "stopGameServer",
"path": "src/utils.py",
"snippet": "def stopGameServer(name: str, session_type: str) -> bool:\n if session_type == 'tmux':\n command = f''' tmux send-keys -t \"{name}\" C-d '''\n else:\n command = f''' screen -S {name} -X stuff \"\\004\\004\" '''\n result = runCmd(command)\n if result != '':\n return True\n return False"
},
{
"identifier": "deleteGameServer",
"path": "src/utils.py",
"snippet": "def deleteGameServer(server_name: str) -> str:\n server_dict = getServerFromConfig()\n del_name = ''\n for name in server_dict:\n if name == server_name:\n del_name = name\n if del_name:\n server_dict.pop(del_name)\n return saveServerToConfig(server_dict)\n return '服务器已经不存在'"
},
{
"identifier": "updateGameServer",
"path": "src/utils.py",
"snippet": "def updateGameServer(server_name: str) -> str:\n server_path = ''\n server_dict = getServerFromConfig()\n for name in server_dict:\n if name == server_name:\n server_path = server_dict[name][1]\n update_cmd = f'''\n cd {server_path} \\\n && echo \"正在读取最新版本...\\n\" \\\n && git reset --hard 2>&1 \\\n && git pull --tags origin master 2>&1 \\\n && latest_tag=$(git describe --tags `git rev-list --tags --max-count=1`) 2>&1 \\\n && git checkout $latest_tag 2>&1 \\\n && echo \"\\n正在编译...\\n\" \\\n && ([ -f include/lua.h ] || cp -r /usr/include/lua5.4/* include) \\\n && ([ -d build ] || mkdir build) \\\n && cd build \\\n && cmake .. \\\n && make \\\n && cd .. \\\n && ([ -f FreeKill ] || ln -s build/FreeKill)\n '''\n logging.debug(f' >>> 独立进程 执行指令' + update_cmd.replace('\\n', '').replace(' ',''))\n process = subprocess.Popen(\n update_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n universal_newlines=True\n )\n while True:\n output = process.stdout.readline()\n if output:\n yield f'event: message\\ndata: {output}\\n\\n'\n elif process.poll() is not None:\n if process.poll() == 0:\n yield f'event: message\\ndata: <span class=\"green\">服务器更新成功</span>\\n\\n'\n else:\n yield f'event: message\\ndata: <span class=\"red\">服务器更新失败,错误码:{process.poll()}</span><br>\\n\\n'\n return"
},
{
"identifier": "backupGameServer",
"path": "src/utils.py",
"snippet": "def backupGameServer(server_path: str) -> [bool, str]:\n try:\n backup_dir = config.backup_directory\n ignore_list: list = [backup_dir] + config.backup_ignore\n ignore_list = [os.path.join(server_path, i) for i in ignore_list]\n backup_dir_path = os.path.join(server_path, backup_dir) if backup_dir[0] != '/' else backup_dir\n os.makedirs(backup_dir_path, exist_ok=True)\n backup_zip = os.path.join(backup_dir_path, f'backup-{time.strftime(\"%Y%m%d-%H-%M-%S\", time.localtime())}.zip')\n with zipfile.ZipFile(backup_zip, 'w', zipfile.ZIP_DEFLATED) as zip:\n for root, dirs, files in os.walk(server_path):\n if len([i for i in ignore_list if i in root]):\n continue\n for file in files:\n file_path = os.path.join(root, file)\n if file_path in ignore_list:\n continue\n zip.write(file_path, os.path.relpath(file_path, server_path))\n backup_size = os.path.getsize(backup_zip) / (1024 * 1024)\n return True, f'备份包路径:[{backup_zip}]\\n备份包大小[{round(backup_size, 2)}MB]'\n except PermissionError as e:\n return False, f'无权限在该路径保存备份,请修改配置文件\\n{e}'\n except Exception as e:\n return False, f'失败原因:{e}'"
},
{
"identifier": "getGameServerStat",
"path": "src/utils.py",
"snippet": "def getGameServerStat(server_path: str) -> [bool, str]:\n try:\n db_file = os.path.join(server_path, 'server/users.db')\n logging.debug(f'读取数据库{db_file}')\n conn = sqlite3.connect(db_file)\n cursor = conn.cursor()\n # 查询每日日活\n cursor.execute(\"SELECT count(*) FROM usergameinfo WHERE strftime('%Y%m%d', lastLoginTime, 'unixepoch', 'localtime') = strftime('%Y%m%d', 'now', 'localtime');\")\n daily_active_result = cursor.fetchone()\n daily_active = daily_active_result[0] if len(daily_active_result) else 0\n # 查询每月月活\n cursor.execute(\"SELECT count(*) FROM usergameinfo WHERE strftime('%Y%m', lastLoginTime, 'unixepoch', 'localtime') = strftime('%Y%m', 'now', 'localtime');\")\n month_active_result = cursor.fetchone()\n month_active = month_active_result[0] if len(month_active_result) else 0\n # 查询玩家胜率\n cursor.execute('SELECT * FROM playerWinRate;')\n player_win_rate_result = cursor.fetchall()\n player_win_rate = {\"0_all\": {}}\n for item in player_win_rate_result:\n id, player, mode, win, lose, draw, total, win_rate = item\n if mode not in player_win_rate:\n player_win_rate[mode] = {}\n player_win_rate[mode][player] = [win_rate, win, lose, draw, total]\n if player in player_win_rate[\"0_all\"]:\n des = [win_rate, win, lose, draw, total]\n sou = player_win_rate[\"0_all\"][player]\n player_win_rate[\"0_all\"][player] = [x + y for x, y in zip(sou, des)]\n else:\n player_win_rate[\"0_all\"][player] = [win_rate, win, lose, draw, total]\n for player in player_win_rate[\"0_all\"]:\n data = player_win_rate[\"0_all\"][player]\n player_win_rate[\"0_all\"][player][0] = round(data[1] / data[4] * 100, 2)\n # 查询角色胜率\n cursor.execute('SELECT * FROM generalWinRate;')\n general_win_rate_result = cursor.fetchall()\n general_win_rate = {\"0_all\": {}}\n for item in general_win_rate_result:\n general, mode, win, lose, draw, total, win_rate = item\n if mode not in general_win_rate:\n general_win_rate[mode] = {}\n general_win_rate[mode][general] = [win_rate, win, lose, draw, total]\n if general in general_win_rate[\"0_all\"]:\n des = [win_rate, win, lose, draw, total]\n sou = general_win_rate[\"0_all\"][general]\n general_win_rate[\"0_all\"][general] = [x + y for x, y in zip(sou, des)]\n else:\n general_win_rate[\"0_all\"][general] = [win_rate, win, lose, draw, total]\n for general in general_win_rate[\"0_all\"]:\n data = general_win_rate[\"0_all\"][general]\n general_win_rate[\"0_all\"][general][0] = round(data[1] / data[4] * 100, 2)\n cursor.close()\n conn.close()\n\n statistics_dict = {\"daily_active\": daily_active, \"month_active\": month_active, \"player_win_rate\": player_win_rate, \"general_win_rate\": general_win_rate}\n return True, statistics_dict\n except Exception as e:\n logging.error(f'读取数据库{db_file}发生错误:{e}')\n return False, f'{e}'"
},
{
"identifier": "getGameTransTable",
"path": "src/utils.py",
"snippet": "def getGameTransTable(directory: str, raw: str = False) -> dict:\n directory = os.path.join(directory, 'packages')\n root_path, pack_dir = os.path.split(directory.rstrip('/'))\n pack_path_list = [f.path for f in os.scandir(directory) if f.is_dir()]\n trans_table = config.custom_trans\n for pack_path in pack_path_list:\n pack_name = os.path.basename(pack_path)\n init_file = os.path.join(pack_dir, pack_name, 'init.lua')\n _, _, trans_dict = extractExtension(root_path, init_file)\n if raw:\n trans_table.update(trans_dict)\n else:\n trans_table.update({key: value for key, value in trans_dict.items() if not key.startswith(('~', '@', '#', '$', '^', ':'))})\n return trans_table"
},
{
"identifier": "readGameConfig",
"path": "src/utils.py",
"snippet": "def readGameConfig(path: str) -> [bool, str]:\n try:\n with open(f'{path}/freekill.server.config.json') as f:\n config_text = f.read()\n return True, config_text\n except Exception as e:\n return False, str(e)"
},
{
"identifier": "writeGameConfig",
"path": "src/utils.py",
"snippet": "def writeGameConfig(path: str, config: dict | str) -> str | None:\n try:\n if type(config) == str:\n open(f'{path}/freekill.server.config.json', 'w').write(config)\n return\n config_json = json.load(open(f'{path}/freekill.server.config.json'))\n for key in config:\n if config[key] != None:\n config_json[key] = config[key]\n json.dump(config_json, open(f'{path}/freekill.server.config.json', 'w'), ensure_ascii=False, indent=2)\n except Exception as e:\n logging.error(e)\n return e"
},
{
"identifier": "isFileExists",
"path": "src/utils.py",
"snippet": "def isFileExists(path: str) -> bool:\n try: open(path)\n except: return False\n return True"
},
{
"identifier": "runTmuxCmd",
"path": "src/utils.py",
"snippet": "def runTmuxCmd(name: str, cmd: str) -> str:\n command = f' tmux send-keys -t {name} \"{cmd}\" Enter;sleep 0.1;tmux capture-pane -peS - -t {name} 2>&1'\n result = runCmd(command)\n return result"
},
{
"identifier": "runScreenCmd",
"path": "src/utils.py",
"snippet": "def runScreenCmd(name: str, cmd: str, path: str='') -> str:\n command = command = f' screen -S {name} -X stuff \"{cmd}\\n\" '\n if not path:\n return runCmd(command)\n log_file = os.path.join(path, config.log_file)\n with open(log_file) as f:\n f.seek(0, 2)\n runCmd(command)\n time.sleep(0.1)\n result = rmSpecialChar(f.read())\n return result"
},
{
"identifier": "appendFile",
"path": "src/utils.py",
"snippet": "def appendFile(path: str, content: str) -> str | None:\n try:\n open(path, mode='a').write(content)\n except Exception as e:\n return f'写入错误:{e}'"
},
{
"identifier": "runCmdCorrect",
"path": "src/utils.py",
"snippet": "def runCmdCorrect(cmd: str, log=True) -> bool:\n stime = time.time()\n try:\n result = subprocess.run(f'{cmd}', shell=True, capture_output=True, text=True)\n etime = time.time()\n if log:\n logging.debug(f' >>> 耗时({round(etime - stime, 3)})执行指令 {cmd}')\n if result.returncode != 0:\n raise EOFError(result.stderr)\n return True\n except Exception as e:\n logging.debug(f'执行外部指令不成功:{e}')\n return False"
},
{
"identifier": "getSessionPid",
"path": "src/utils.py",
"snippet": "def getSessionPid(pid: int, recursion: bool=True) -> int:\n if pid == 1 or pid == 0:\n return 0\n try:\n for process in psutil.process_iter():\n if pid == process.pid:\n cmd = process.cmdline()\n if 'SCREEN' in cmd:\n return process.pid\n elif 'bash' in cmd or '-bash' in cmd:\n session_pid = getSessionPid(process.ppid(), False)\n if session_pid:\n return session_pid\n return process.pid\n elif recursion:\n return getSessionPid(process.ppid())\n except psutil.NoSuchProcess:...\n return 0"
},
{
"identifier": "getGitTree",
"path": "src/utils.py",
"snippet": "def getGitTree(url: str) -> list:\n content = ''\n try:\n git_url = url.replace('.git', '')\n repo = '/'.join(git_url.split('/')[-2:])\n if 'gitee.com' in git_url:\n commit_url = f'https://gitee.com/api/v5/repos/{repo}/commits?per_page=100'\n branch_url = f'https://gitee.com/api/v5/repos/{repo}/branches'\n elif 'github.com' in git_url:\n commit_url = f'https://api.github.com/repos/{repo}/commits?per_page=100'\n branch_url = f'https://api.github.com/repos/{repo}/branches'\n else:\n return False, '不支持此站点的解析'\n\n branch_response = requests.get(branch_url, timeout=10)\n logging.debug(f'请求外部地址: {branch_url}')\n commit_response = requests.get(commit_url, timeout=20)\n logging.debug(f'请求外部地址: {commit_url}')\n if branch_response.status_code in [200, 304]:\n branches = branch_response.json()\n for branch in branches:\n name = branch['name']\n sha = branch['commit']['sha']\n tree[name] = {'sha': sha, 'commits': []}\n if commit_response.status_code in [200, 304]:\n commits = commit_response.json()\n for commit in commits:\n sha = commit['sha']\n message = commit['commit']['message']\n author = commit['commit']['author']['name']\n parents = [i['sha'] for i in commit['parents']]\n for branch in tree:\n if (sha == tree[branch]['sha']\n or sha in [i for i in tree[branch]['commits'][-1].get('parents', '')]):\n tree[branch]['commits'].append({\n 'sha': sha,\n 'message': message,\n 'author': author,\n 'parents': parents,\n })\n return True, tree\n\n return False, commit_response.text\n return False, branch_response.text\n\n except Exception as e:\n logging.error(e)\n return False, str(e)"
},
{
"identifier": "setPackVersionForServer",
"path": "src/utils.py",
"snippet": "def setPackVersionForServer(server_path: str, pack_code: str, pack_branch: str, pack_hash: str) -> str:\n try:\n pack_path = os.path.join(server_path, 'packages', pack_code)\n db_file = os.path.join(server_path, 'packages/packages.db')\n logging.debug(f'读取数据库 {db_file}')\n conn = sqlite3.connect(db_file)\n cursor = conn.cursor()\n cursor.execute('SELECT * FROM packages')\n pack_list: list[tuple] = cursor.fetchall()\n db_pack_dict = {pack[0]: pack[1:] for pack in pack_list}\n if pack_code in db_pack_dict:\n now_hash = db_pack_dict[pack_code][1]\n if now_hash == pack_hash:\n cursor.close()\n conn.close()\n yield f'event: message\\ndata: <span class=\"red\">切换失败,无法切换到当前版本</span>\\n\\n'\n return\n checkout_cmd = \\\n f'cd {pack_path} && git reset --hard 2>&1 && git checkout {pack_branch} 2>&1' \\\n + f' && git fetch 2>&1 && git -c advice.detachedHead=false checkout {pack_hash} 2>&1'\n logging.debug(f' >>> 独立进程 执行指令' + checkout_cmd)\n process = subprocess.Popen(\n checkout_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n universal_newlines=True\n )\n while True:\n output = process.stdout.readline()\n if output:\n yield f'event: message\\ndata: {output}\\n\\n'\n elif process.poll() is not None:\n if process.poll() == 0:\n cursor.execute(f'''UPDATE packages SET hash='{pack_hash}' WHERE name='{pack_code}'; ''')\n conn.commit()\n yield f'event: message\\ndata: <br>切换成功,刷新此页面更新展示,重启服务器生效\\n\\n'\n else:\n yield f'event: message\\ndata: <span class=\"red\">服务器更新失败,错误码:{process.poll()}</span><br>\\n\\n'\n cursor.close()\n conn.close()\n return\n except Exception as e:\n logging.error(f'读取拓展包数据库发生错误:{e}')\n yield f'event: message\\ndata: <span class=\"red\">切换失败,读取拓展包数据库发生错误:{e}</span><br>\\n\\n'"
},
{
"identifier": "Server",
"path": "src/game_server.py",
"snippet": "class Server:\n def __init__(self) -> None:\n self.name = ''\n self.port = 0\n self.pid = 0\n self.path = ''\n\n self.ban_words = []\n self.desc = ''\n self.icon_url = ''\n self.capacity = 0\n self.temp_ban_time = 0\n self.motd = ''\n self.hidden_packs = []\n self.enable_bots = True\n\n self.players = 0\n self.status = '初始化'\n self.version = 'v0.0.0'\n\n self.player_dict = {}\n self.room_dict = {}\n self.pack_dict = {}\n self.handled = False\n self.session_type = ''\n\n def init(self, name:str, port: int, pid: int = 0, path: str = '', session_type = '') -> None:\n if name == '' or port == '':\n return\n self.name = name\n self.port = port\n self.pid = pid\n self.session_type = session_type\n if pid:\n self.path = getProcPathByPid(self.pid)\n elif path:\n self.path = path\n if not self.readConfig():\n return\n if not self.pid or getProcessUptime(self.pid) == '0':\n self.status = '未运行'\n self.version = getVersionFromPath(self.path)\n\n def start(self) -> str | None:\n session_type = self.session_type if self.session_type else 'tmux'\n if pid := startGameServer(self.name, self.port, self.path, session_type):\n self.pid = pid\n if self.session_type == 'screen':\n self.name = f'{getSessionPid(self.pid)}.{self.name.split(\".\", 1).pop()}'\n return\n return '服务器启动失败,该端口可能已被占用'\n\n def info(self, server_list: list) -> dict:\n uptime = '0'\n if not isPortBusy(self.port):\n self.status = '已停止'\n else:\n self.status = '运行中'\n for server_info in server_list:\n server_name = server_info[0] if len(server_info) else ''\n server_pid = int(server_info[1]) if len(server_info) >=2 else self.pid\n if self.name == server_name:\n self.pid = server_pid\n uptime = getProcessUptime(self.pid)\n break\n info = getServerInfo(self.name, self.port)\n if info:\n [self.version,\n self.icon_url,\n self.desc,\n self.capacity,\n self.players,\n self.ip] = info\n\n return {\n 'name': self.name,\n 'port': self.port,\n 'desc': self.desc,\n 'icon': getImgBase64FromURL(self.icon_url),\n 'capacity': self.capacity,\n 'players': self.players,\n 'status': self.status,\n 'version': getVersionFromPath(self.path),\n 'uptime': uptime,\n 'pid': self.pid,\n 'session_type': self.session_type,\n }\n\n def details(self, server_list: list) -> dict:\n self.readConfig()\n self.readPacks()\n self.handled = isHandledByPid(self.pid)\n info_dict = self.info(server_list)\n info_dict = {\n **info_dict,\n 'ban_words': self.ban_words,\n 'motd': self.motd,\n 'temp_ban_time': self.temp_ban_time,\n 'hidden_packs': self.hidden_packs,\n 'enable_bots': self.enable_bots,\n 'pack_list': self.pack_dict,\n 'room_list': self.room_dict,\n 'player_list': self.player_dict,\n 'session_type': self.session_type,\n 'handled': self.handled,\n }\n return info_dict\n \n def getPlayerList(self) -> dict:\n if isPortBusy(self.port):\n self.readPlayers()\n else:\n self.player_dict = {}\n return self.player_dict\n \n def getRoomList(self) -> dict:\n if isPortBusy(self.port):\n self.readRooms()\n else:\n self.room_dict = {}\n return self.room_dict\n\n def readConfig(self) -> bool:\n try:\n json_data : dict = json.load(open(f'{self.path}/freekill.server.config.json'))\n self.ban_words = json_data.get('banwords', [])\n self.desc = json_data.get('description', '')\n self.icon_url = json_data.get('iconUrl', '')\n self.capacity = json_data.get('capacity', 0)\n self.temp_ban_time = json_data.get('tempBanTime', 0)\n self.motd = json_data.get('motd', '')\n self.hidden_packs = json_data.get('hiddenPacks', [])\n self.enable_bots = json_data.get('enableBots', True)\n self.status = '运行中' if isPortBusy(self.port) else '已停止'\n return True\n except Exception as e:\n self.status = '配置读取异常'\n return False\n\n def readPlayers(self) -> None:\n self.player_dict = getPlayerList(self.name, self.session_type, self.path)\n self.players = len(self.player_dict)\n\n def readRooms(self) -> None:\n self.room_dict = getRoomList(self.name, self.session_type, self.path)\n\n def readPacks(self) -> None:\n self.pack_dict = getPackList(self.path)"
},
{
"identifier": "Controller",
"path": "src/controller.py",
"snippet": "class Controller:\n def __init__(self) -> None:\n self.server_list = []\n self.server_dict = {}\n self.list: list[Server | None] = []\n self.connection: Connection | None\n self.latest_fk_version = ''\n self.version_check_timestamp = 0\n\n self.refreshRunning()\n self.server_dict = getServerFromConfig()\n for server_name in self.server_dict:\n server_port = self.server_dict[server_name][0]\n server_path = self.server_dict[server_name][1]\n session_type = self.server_dict[server_name][2] if len(self.server_dict[server_name]) > 2 else 'tmux'\n\n if server_name not in [server.name for server in self.list]:\n server = Server()\n server.init(server_name, server_port, path=server_path, session_type=session_type)\n self.list.append(server)\n\n def refreshRunning(self) -> None:\n self.server_list = getServerList()\n del_server_list = []\n for server_info in self.server_list:\n server_name = server_info[0]\n server_pid = server_info[1]\n server_port = server_info[2]\n server_type = server_info[3]\n\n if server_name and server_name not in [server.name for server in self.list]:\n if del_server := [server for server in self.list if server.port == server_port]:\n del_server_list.append(del_server[0].name)\n self.list.remove(del_server[0])\n server = Server()\n server.init(server_name, server_port, server_pid, session_type=server_type)\n self.list.append(server)\n\n for server in self.list:\n if not isPortBusy(server.port) and server.name not in self.server_dict:\n self.list.remove(server)\n\n for server_name in del_server_list:\n if server_name in self.server_dict:\n self.server_dict.pop(server_name)\n saveServerToConfig(self.server_dict)\n\n def refreshConfig(self) -> None:\n self.server_dict = getServerFromConfig()\n\n def getList(self) -> list[Server]:\n self.refreshRunning()\n return self.list\n\n def add(self, server: Server) -> None:\n self.list.append(server)\n for server_name in [i for i in self.server_dict if self.server_dict[i][0] == server.port]:\n self.server_dict.pop(server_name)\n self.server_dict[server.name] = [server.port, server.path, server.session_type]\n saveServerToConfig(self.server_dict)\n\n def remove(self, server: Server) -> None:\n self.list.remove(server)\n\n def getDict(self) -> dict:\n self.refreshRunning()\n return self.server_dict\n\n def modifyDict(self, name, key, value) -> None:\n if key == 'port':\n self.server_dict[name][0] = value\n elif key == 'path':\n self.server_dict[name][1] = value\n self.saveDict()\n\n def saveDict(self) -> bool:\n return saveServerToConfig(self.server_dict)\n\n def checkFKVersion(self) -> str:\n if not self.latest_fk_version or time.time() - self.version_check_timestamp > 600:\n self.latest_fk_version = getFKVersion()\n self.version_check_timestamp = int(time.time())\n return self.latest_fk_version"
},
{
"identifier": "config",
"path": "src/utils.py",
"snippet": "def getImgBase64FromURL(url: str) -> str:\ndef getFKVersion() -> str | None:\ndef getGitTree(url: str) -> list:\ndef getVersionFromPath(path: str) -> str:\ndef runCmd(cmd: str, log=True) -> str:\ndef runCmdCorrect(cmd: str, log=True) -> bool:\ndef getProcessUptime(pid: int) -> str:\ndef getServerList() -> list[str]:\ndef getSessionPid(pid: int, recursion: bool=True) -> int:\ndef isHandledByPid(pid: int) -> bool:\ndef getProcPathByPid(pid: int) -> str:\ndef getProcPortByPid(pid: int) -> int:\ndef isPortBusy(port: int) -> bool:\ndef isFileExists(path: str) -> bool:\ndef getServerFromConfig() -> dict:\ndef saveServerToConfig(server_dict: list[str]) -> str:\ndef restful(code: int, msg: str = '', data: dict = {}) -> None:\ndef startGameServer(name: str, port: int, path: str, session_type: str) -> int:\ndef stopGameServer(name: str, session_type: str) -> bool:\ndef deleteGameServer(server_name: str) -> str:\ndef updateGameServer(server_name: str) -> str:\ndef backupGameServer(server_path: str) -> [bool, str]:\ndef getGameServerStat(server_path: str) -> [bool, str]:\ndef readGameConfig(path: str) -> [bool, str]:\ndef writeGameConfig(path: str, config: dict | str) -> str | None:\ndef runScreenCmd(name: str, cmd: str, path: str='') -> str:\ndef runTmuxCmd(name: str, cmd: str) -> str:\ndef getServerInfo(name: str, port : int) -> list:\ndef getPlayerList(name: str, session_type: str, path: str) -> dict:\ndef getRoomList(name: str, session_type: str, path: str) -> dict:\ndef getPackList(path: str) -> dict:\ndef banFromServer(server_name: str, player_name: str, session_type: str, path: str) -> bool:\ndef sendMsgTo(name: str, msg: str, session_type: str, path: str) -> bool:\ndef rmSpecialChar(text: str) -> str:\ndef tailLogNum(file_path: str, num: int) -> str:\ndef tailLog(conn: Connection, sid: str) -> None:\ndef appendFile(path: str, content: str) -> str | None:\ndef queryPerf(conn: Connection, sid: str) -> None:\ndef getPerfByPid(pid: int) -> list:\ndef getGameTransTable(directory: str, raw: str = False) -> dict:\ndef getPackListFromDir(directory: str) -> dict:\ndef extractExtension(root_path: str, lua_file: str) -> tuple:\ndef setPackVersionForServer(server_path: str, pack_code: str, pack_branch: str, pack_hash: str) -> str:"
}
] | import os
import re
import json
import time
from flask import Response
from flask_classful import FlaskView, route, request
from src.utils import restful, isPortBusy, startGameServer, stopGameServer, deleteGameServer, updateGameServer, backupGameServer, getGameServerStat, getGameTransTable, readGameConfig, writeGameConfig, isFileExists, runTmuxCmd, runScreenCmd, appendFile, runCmdCorrect, getSessionPid, getGitTree, setPackVersionForServer
from src.game_server import Server
from src.controller import Controller
from src.utils import config | 8,975 | info_dict = server.details(self.controller.server_list)
return restful(200, '', info_dict)
return restful(404, '未找到该服务器')
@route('player_list', methods=['GET'])
def player_list(self):
name = request.args.get('name', '')
for server in self.controller.list:
if server.name == name:
info_dict = server.getPlayerList()
return restful(200, '', info_dict)
return restful(404, '未找到该服务器')
@route('room_list', methods=['GET'])
def room_list(self):
name = request.args.get('name', '')
for server in self.controller.list:
if server.name == name:
info_dict = server.getRoomList()
return restful(200, '', info_dict)
return restful(404, '未找到该服务器')
@route('trans_table', methods=['GET'])
def trans_table(self):
name = request.args.get('name', '')
raw = request.args.get('raw', False)
for server in self.controller.list:
if server.name == name:
trans_table = getGameTransTable(server.path, raw)
return restful(200, '', trans_table)
return restful(404, '未找到该服务器')
@route('execute', methods=['POST'])
def execute(self):
name = request.json.get('name', '')
cmd = request.json.get('cmd', '')
for char in ['`', '"', '$', '\x01']:
cmd = cmd.replace(char, f'\\{char}')
server_list = self.controller.getList()
for server in server_list:
if server.name == name:
is_port_busy = isPortBusy(server.port)
if cmd == 'start' and not is_port_busy:
appendFile(f'{server.path}/{config.log_file}', '\x01')
time.sleep(0.1)
error = server.start()
if error:
return restful(400, error)
self.controller.connection.set(server.name, 'path', server.path)
self.controller.connection.set(server.name, 'pid', server.pid)
return restful(200, '服务器启动成功')
elif not is_port_busy:
return restful(405, '服务器未启动,请先启动')
else:
if server.session_type == 'tmux':
runTmuxCmd(name, cmd)
elif server.handled:
runScreenCmd(name, cmd)
else:
return restful(403, '无法与终端交互,请关闭服务器后由本程序接管启动')
return restful(200, '')
return restful(404, '未找到该服务器')
@route('add_server', methods=['POST'])
def add_server(self):
name = request.json.get('name', None)
port = int(request.json.get('port')) if request.json.get('port').isdigit() else None
path = request.json.get('path', None)
desc = request.json.get('desc', None)
icon = request.json.get('icon', None)
capacity = int(request.json.get('capacity')) if request.json.get('capacity').isdigit() else None
temp_ban_time = int(request.json.get('temp_ban_time')) if request.json.get('temp_ban_time').isdigit() else None
motd = request.json.get('motd', None)
enable_bots = request.json.get('enable_bots', None)
if enable_bots != None:
enable_bots = bool(enable_bots)
session_type = request.json.get('session_type', None)
server_list = self.controller.getList()
if not name:
return restful(405, f'服务器名称不能为空')
elif not port:
return restful(405, f'服务器端口无效')
elif not path:
return restful(405, f'服务器启动路径不能为空')
elif name in [server.name for server in server_list]:
return restful(409, f'该服务器名称重名:{name}')
elif match := re.search(r'([<>:;"/\\\|\?\*\x00-\x1F\x7F\'\`\s])', name):
result = match.groups()[0]
return restful(409, f'该服务器名称存在不可用字符:<{result}>')
elif isPortBusy(port):
return restful(409, f'该端口已被占用:{port}')
elif port < 1025 or port > 65535:
return restful(409, f'该端口不可用:{port}')
elif not isFileExists(os.path.join(path,'FreeKill')):
return restful(409, f'该路径无效\n确保该路径下存在可执行的“FreeKill”文件')
elif match := re.search(r'([<>:;"\\|\?\*\x00-\x1F\x7F\'\`\s])', path):
result = match.groups()[0]
return restful(409, f'该服务器路径存在不可用字符:<{result}>')
elif path in [server.path for server in server_list]:
return restful(409, f'该路径已经启动了一个服务器')
elif session_type not in ['tmux', 'screen']:
return restful(409, f'本程序仅支持启动tmux或screen服')
elif session_type == 'tmux' and not runCmdCorrect('tmux -V'):
return restful(409, f'服务器未安装tmux,无法以此方式启动')
elif session_type == 'screen' and not runCmdCorrect('screen -v'):
return restful(409, f'服务器未安装screen,无法以此方式启动')
if e := writeGameConfig(path, {
"description": desc,
"iconUrl": icon,
"capacity": capacity,
"tempBanTime": temp_ban_time,
"motd": motd,
"enableBots": enable_bots,
}):
return restful(400, f'服务器配置写入错误,启动失败:\n{e}')
pid = startGameServer(name, port, path, session_type)
if pid == 0:
return restful(400, '服务器启动失败,请联系管理员')
|
class V1API(FlaskView):
def __init__(self):
super().__init__()
self.controller : Controller
@route('/')
def index(self):
return 'V1 API'
@route('servers', methods=['GET'])
def servers(self):
server_dict_list = []
server_list = self.controller.getList()
for server in server_list:
server_dict_list.append(server.info(self.controller.server_list))
return restful(200, '', {'list': server_dict_list})
@route('details', methods=['GET'])
def details(self):
name = request.args.get('name', '')
server_list = self.controller.getList()
for server in server_list:
if server.name == name:
info_dict = server.details(self.controller.server_list)
return restful(200, '', info_dict)
return restful(404, '未找到该服务器')
@route('player_list', methods=['GET'])
def player_list(self):
name = request.args.get('name', '')
for server in self.controller.list:
if server.name == name:
info_dict = server.getPlayerList()
return restful(200, '', info_dict)
return restful(404, '未找到该服务器')
@route('room_list', methods=['GET'])
def room_list(self):
name = request.args.get('name', '')
for server in self.controller.list:
if server.name == name:
info_dict = server.getRoomList()
return restful(200, '', info_dict)
return restful(404, '未找到该服务器')
@route('trans_table', methods=['GET'])
def trans_table(self):
name = request.args.get('name', '')
raw = request.args.get('raw', False)
for server in self.controller.list:
if server.name == name:
trans_table = getGameTransTable(server.path, raw)
return restful(200, '', trans_table)
return restful(404, '未找到该服务器')
@route('execute', methods=['POST'])
def execute(self):
name = request.json.get('name', '')
cmd = request.json.get('cmd', '')
for char in ['`', '"', '$', '\x01']:
cmd = cmd.replace(char, f'\\{char}')
server_list = self.controller.getList()
for server in server_list:
if server.name == name:
is_port_busy = isPortBusy(server.port)
if cmd == 'start' and not is_port_busy:
appendFile(f'{server.path}/{config.log_file}', '\x01')
time.sleep(0.1)
error = server.start()
if error:
return restful(400, error)
self.controller.connection.set(server.name, 'path', server.path)
self.controller.connection.set(server.name, 'pid', server.pid)
return restful(200, '服务器启动成功')
elif not is_port_busy:
return restful(405, '服务器未启动,请先启动')
else:
if server.session_type == 'tmux':
runTmuxCmd(name, cmd)
elif server.handled:
runScreenCmd(name, cmd)
else:
return restful(403, '无法与终端交互,请关闭服务器后由本程序接管启动')
return restful(200, '')
return restful(404, '未找到该服务器')
@route('add_server', methods=['POST'])
def add_server(self):
name = request.json.get('name', None)
port = int(request.json.get('port')) if request.json.get('port').isdigit() else None
path = request.json.get('path', None)
desc = request.json.get('desc', None)
icon = request.json.get('icon', None)
capacity = int(request.json.get('capacity')) if request.json.get('capacity').isdigit() else None
temp_ban_time = int(request.json.get('temp_ban_time')) if request.json.get('temp_ban_time').isdigit() else None
motd = request.json.get('motd', None)
enable_bots = request.json.get('enable_bots', None)
if enable_bots != None:
enable_bots = bool(enable_bots)
session_type = request.json.get('session_type', None)
server_list = self.controller.getList()
if not name:
return restful(405, f'服务器名称不能为空')
elif not port:
return restful(405, f'服务器端口无效')
elif not path:
return restful(405, f'服务器启动路径不能为空')
elif name in [server.name for server in server_list]:
return restful(409, f'该服务器名称重名:{name}')
elif match := re.search(r'([<>:;"/\\\|\?\*\x00-\x1F\x7F\'\`\s])', name):
result = match.groups()[0]
return restful(409, f'该服务器名称存在不可用字符:<{result}>')
elif isPortBusy(port):
return restful(409, f'该端口已被占用:{port}')
elif port < 1025 or port > 65535:
return restful(409, f'该端口不可用:{port}')
elif not isFileExists(os.path.join(path,'FreeKill')):
return restful(409, f'该路径无效\n确保该路径下存在可执行的“FreeKill”文件')
elif match := re.search(r'([<>:;"\\|\?\*\x00-\x1F\x7F\'\`\s])', path):
result = match.groups()[0]
return restful(409, f'该服务器路径存在不可用字符:<{result}>')
elif path in [server.path for server in server_list]:
return restful(409, f'该路径已经启动了一个服务器')
elif session_type not in ['tmux', 'screen']:
return restful(409, f'本程序仅支持启动tmux或screen服')
elif session_type == 'tmux' and not runCmdCorrect('tmux -V'):
return restful(409, f'服务器未安装tmux,无法以此方式启动')
elif session_type == 'screen' and not runCmdCorrect('screen -v'):
return restful(409, f'服务器未安装screen,无法以此方式启动')
if e := writeGameConfig(path, {
"description": desc,
"iconUrl": icon,
"capacity": capacity,
"tempBanTime": temp_ban_time,
"motd": motd,
"enableBots": enable_bots,
}):
return restful(400, f'服务器配置写入错误,启动失败:\n{e}')
pid = startGameServer(name, port, path, session_type)
if pid == 0:
return restful(400, '服务器启动失败,请联系管理员') | server = Server() | 19 | 2023-10-14 12:34:08+00:00 | 12k |
wilhelmagren/finq | finq/portfolio.py | [
{
"identifier": "Asset",
"path": "finq/asset.py",
"snippet": "class Asset(object):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data: pd.Series,\n name: str,\n *,\n market: Optional[str] = None,\n index_name: Optional[str] = None,\n price_type: str = \"Close\",\n pre_compute: bool = True,\n ):\n \"\"\" \"\"\"\n\n self._data = data\n self._name = name\n self._market = market\n self._index_name = index_name\n self._price_type = price_type\n self._pre_compute = pre_compute\n self._metrics = {}\n\n if pre_compute:\n log.info(\"pre-computing some common metrics...\")\n self.compute_common_metrics()\n log.info(\"OK!\")\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"\n Compare self with the other object. If ``other`` is of instance class\n ``Asset`` then compare their hashes. Otherwise ``False``.\n\n Parameters\n ----------\n other : Any\n The other object to compare equality against.\n\n Returns\n -------\n bool\n Whether or not they objects are equal.\n\n \"\"\"\n if isinstance(other, self.__class__):\n return hash(self) == hash(other)\n return False\n\n def __hash__(self) -> int:\n \"\"\"\n Compute a hash from the following attributes of the ``Asset`` object:\n (`_name`, `_market_`, `_index_name`, `_price_type`).\n\n NOTE: the ``Asset`` object is mutable, thus, the hash functionality\n can have unknown side effects... Use responsibly.\n\n Returns\n -------\n int\n The computed hash value.\n\n \"\"\"\n return hash(\n (\n len(self._data),\n self._data.mean(),\n self._data.std(),\n self._name,\n self._market,\n self._index_name,\n self._price_type,\n )\n )\n\n def __str__(self) -> str:\n \"\"\" \"\"\"\n\n format = f\"<{self.__class__.__name__} called `{self._name}`\"\n if self._market:\n format += f\" on {self._market}\"\n if self._index_name:\n format += f\" in {self._index_name}\"\n\n format += f\" (price type: {self._price_type})\"\n format += f\"\\n-- num samples:\\t\\t\\t{self._data.shape[0]}\"\n\n drm = self._metrics.get(\"daily_returns_mean\", None)\n if drm:\n format += f\"\\n-- daily returns mean:\\t\\t{drm:.5f}\"\n\n yrm = self._metrics.get(\"yearly_returns_mean\", None)\n if yrm:\n format += f\"\\n-- yearly returns mean:\\t\\t{yrm:.5f}\"\n\n yv = self._metrics.get(\"yearly_volatility\", None)\n if yv:\n format += f\"\\n-- yearly volatility:\\t\\t{yv:.5f}\"\n\n skew = self._metrics.get(\"skewness\", None)\n if skew:\n format += f\"\\n-- unbiased skewness:\\t\\t{self._metrics['skewness']:.5f}\"\n\n format += f\"\\nobject located at {hex(id(self))}>\"\n\n return format\n\n def compute_common_metrics(self):\n \"\"\" \"\"\"\n self._metrics[\"daily_returns\"] = self.period_returns(period=1)\n self._metrics[\"daily_returns_mean\"] = self.period_returns_mean(period=1)\n self._metrics[\"yearly_returns_mean\"] = self.period_returns_mean(period=252)\n self._metrics[\"yearly_volatility\"] = self.volatility(period=1, trading_days=252)\n self._metrics[\"skewness\"] = self.skewness()\n\n def period_returns(self, period: int = 1) -> pd.Series:\n \"\"\" \"\"\"\n return self._data.pct_change(periods=period)\n\n def period_returns_mean(self, period: int = 1) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).mean(axis=0)\n\n def volatility(\n self, period: int = 1, trading_days: int = 252\n ) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).std() * np.sqrt(trading_days)\n\n def skewness(self) -> np.float32:\n \"\"\"\n Computes the skewness of the saved data. Uses the ``Adjusted Fisher-Pearson\n standardized moment coefficient`` formula without bias [1, 2]. Skewness is a\n measure of the asymmetry of the probability distribution for a real-valued\n random variable around its mean.\n\n Returns\n -------\n np.float32\n The skewness measure for the saved historical price data.\n\n References\n ----------\n [1] Skewness calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skew.html\n [2] Moment calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.moment.html\n\n \"\"\"\n return self._data.skew().astype(np.float32)\n\n @property\n def data(self) -> pd.Series:\n \"\"\"\n Return the saved data by accessing it as a property of the ``Asset`` object.\n\n Returns\n -------\n pd.Series\n A ``pd.Series`` copy of the saved data.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, data: pd.Series):\n \"\"\"\n Set the value of the data attribute for the ``Asset`` object.\n\n Parameters\n ----------\n data : pd.Series\n The new ``pd.Series`` to set as data attribute for the object.\n\n \"\"\"\n self._data = data\n\n @property\n def name(self) -> str:\n \"\"\"\n Get the name property of the ``Asset`` object.\n\n Returns\n -------\n str\n The name of the ``Asset``.\n\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name: str):\n \"\"\"\n Set the value of the name property for the ``Asset`` object.\n\n Parameters\n ----------\n name : str\n The new ``str`` to set as name attribute for the object.\n\n \"\"\"\n self._name = name\n\n def as_numpy(self, dtype: np.typing.DTypeLike = np.float32) -> np.ndarray:\n \"\"\"\n Return the saved data as an numpy array. It will have the shape (n_samples, ).\n\n Parameters\n ----------\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the ``pd.Series`` data.\n\n \"\"\"\n return self._data.to_numpy().astype(dtype)"
},
{
"identifier": "Dataset",
"path": "finq/datasets/dataset.py",
"snippet": "class Dataset(object):\n \"\"\"\n A collection of ticker symbols and their historical price data. Fetches information\n and prices from Yahoo! Finance and optionally saves them to a local path for later\n use. Supports fixing missing values by interpolating ``NaN`` and verifying the\n integrity of the fetched data.\n\n Parameters\n ----------\n names : list | None\n The names of the financial assets to create a dataset with.\n symbols : list | None\n The ticker symbols corresponding to the names of the financial assets.\n market : str\n The name of the market to fetch the historical price data from.\n Defaults to ``OMX``.\n index_name : str | None\n The name of the financial index to get ticker symbols and names from.\n proxy : str | None\n The name of the proxy url to use for REST requests.\n cache_name: Path | str\n The name of the path to the file which stores the cache.\n Defaults to ``/home/.finq/http_cache``.\n n_requests : int\n The max number of requests to perform per ``t_interval``. Defaults to ``5``.\n t_interval : int\n The time interval (in seconds) to use with the ``CachedRateLimiter``.\n Defaults to ``1``.\n save : bool\n Wether or not to save the fetched data to a local file path.\n save_path : Path | str\n The local file path to potentially save any fetched data to.\n Defaults to ``.data/dataset/``.\n dataset_name : str\n The name of the ``Dataset`` class instance.\n separator : str\n The csv separator to use when loading and saving any ``pd.DataFrame``.\n Defaults to ``;``.\n\n \"\"\"\n\n def __init__(\n self,\n names: Optional[List[str]] = None,\n symbols: Optional[List[str]] = None,\n *,\n market: str = \"OMX\",\n index_name: Optional[str] = None,\n proxy: Optional[str] = None,\n cache_name: Union[Path, str] = default_finq_cache_path(),\n n_requests: int = 5,\n t_interval: int = 1,\n save: bool = False,\n save_path: Union[Path, str] = default_finq_save_path(),\n dataset_name: str = \"dataset\",\n separator: str = \";\",\n filter_symbols: Callable = lambda s: s,\n ) -> Optional[InvalidCombinationOfArgumentsError]:\n \"\"\" \"\"\"\n\n log.info(\n \"creating cached rate-limited session with \"\n f\"{n_requests} requests per {t_interval} seconds\"\n )\n\n # We combine a cache with rate-limiting to avoid triggering\n # Yahoo! Finance's rate-limiter that can otherwise corrupt data.\n # We specify a maximum number of requests N per X seconds.\n session = CachedRateLimiter(\n cache_name=cache_name,\n limiter=Limiter(\n RequestRate(\n n_requests,\n Duration.SECOND * t_interval,\n ),\n ),\n )\n\n if proxy:\n session.proxies.update(\n {\n \"https\": proxy,\n }\n )\n\n self._proxy = proxy\n self._session = session\n self._n_requests = n_requests\n self._t_interval = t_interval\n\n if (not names or not symbols) and isinstance(index_name, str):\n if market == \"OMX\":\n\n def filter_symbols(s):\n return s.replace(\" \", \"-\") + \".ST\"\n\n names, symbols = fetch_names_and_symbols(\n index_name,\n market=market,\n session=session,\n filter_symbols=filter_symbols,\n )\n\n if not names or not symbols:\n raise InvalidCombinationOfArgumentsError(\n \"You did not pass in a list of names and symbols, and if you \"\n \"passed in an index name to fetch, the request failed since \"\n f\"`{names=}` and `{symbols=}`. Did you pass in a valid index name?\"\n )\n\n if not (len(names) == len(symbols)):\n raise InvalidCombinationOfArgumentsError(\n \"Number of names does not match the number of ticker symbols, \"\n f\"{len(names)} != {len(symbols)}.\\n{names=}\\n{symbols=}\"\n )\n\n self._data = None\n self._info = None\n\n self._names = names\n self._symbols = symbols\n self._market = market\n self._index_name = index_name\n\n self._save = save\n self._save_path = Path(save_path) / dataset_name\n self._dataset_name = dataset_name\n self._separator = separator\n\n def __getitem__(self, key: str) -> Optional[pd.DataFrame]:\n \"\"\"\n Get the ``pd.DataFrame`` from the locally stored dictionary which maps ticker\n symbols to their corresponding historical price data.\n\n Parameters\n ----------\n key : str\n The dictionary key to get data for.\n\n Returns\n -------\n pd.DataFrame\n The data that is associated with the provided ticker key.\n\n \"\"\"\n return self._data.get(key, None)\n\n def __len__(self) -> int:\n \"\"\"\n Get the number of names in the dataset.\n\n Returns\n -------\n int\n The number of names.\n\n \"\"\"\n return len(self._symbols)\n\n @staticmethod\n def _save_data(data: pd.DataFrame, path: Union[Path, str], separator: str):\n \"\"\"\n Save the historical price data for a ticker to a local csv file.\n\n Parameters\n ----------\n data : pd.DataFrame\n The ``pd.DataFrame`` to save as a csv file.\n path : Path | str\n The local file name to save the csv to.\n separator : str\n The csv separator to use when saving the data. Defaults to ``;``.\n\n \"\"\"\n data.to_csv(\n path,\n sep=separator,\n header=True,\n )\n\n @staticmethod\n def _save_info(info: dict, path: Union[Path, str]):\n \"\"\"\n Save the ticker information dictionary to a local file as a ``json`` object.\n\n Parameters\n ----------\n info : dict\n The ticker information dictionary to save as a ``json`` file.\n path : Path | str\n The local file name to save the dictionary to.\n\n \"\"\"\n with open(path, \"w\") as f:\n json.dump(info, f)\n\n @staticmethod\n def _load_data(path: Union[Path, str], separator: str) -> pd.DataFrame:\n \"\"\"\n Create a new ``pd.DataFrame`` from data that is stored locally as a ``csv``.\n\n Parameters\n ----------\n path : Path | str\n The local file path to read the csv from.\n separator : str\n The separator to use for parsing the csv.\n\n Returns\n -------\n pd.DataFrame\n The data that was stored in the csv.\n\n \"\"\"\n return pd.read_csv(path, sep=separator, index_col=\"Date\")\n\n @staticmethod\n def _load_info(path: Union[Path, str]) -> dict:\n \"\"\"\n Parameters\n ----------\n path : Path | str\n The local file path to read the json object from.\n\n Returns\n -------\n dict\n A dictionary containing the information for the ticker.\n\n \"\"\"\n with open(path, \"r\") as f:\n return json.load(f)\n\n @staticmethod\n def _extract_dates_from_data(data: pd.DataFrame) -> Tuple[List, Dict]:\n \"\"\"\n Extract the ``Date`` column from a ``pd.DataFrame`` and produce a sorted list of\n unique dates for the ticker.\n\n Parameters\n ----------\n data : pd.DataFrame\n The data to extract ``Date`` column from.\n\n Returns\n -------\n tuple\n A list of the unique dates (sorted in ascending order) and a dictionary\n containing all ticker dates as key: ``str`` and value: ``list``.\n\n \"\"\"\n dates = {}\n all_dates = []\n\n for ticker, df in data.items():\n dates[ticker] = df.index.to_list()\n all_dates.extend(dates[ticker])\n\n unique_dates = sorted(list(set(all_dates)), reverse=False)\n\n return (unique_dates, dates)\n\n def _save_tickers_data(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers data to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_data(\n self._data[ticker],\n self._save_path / \"data\" / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n log.info(\"OK!\")\n\n def _save_tickers_info(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers info to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_info(\n self._info[ticker],\n self._save_path / \"info\" / f\"{ticker}.json\",\n )\n\n log.info(\"OK!\")\n\n def _save_data_and_info(self):\n \"\"\"\n Saves the info and data objects to a local file path.\n\n \"\"\"\n\n self._save_tickers_data()\n self._save_tickers_info()\n\n def _fetch_tickers_data(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\" \"\"\"\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} data from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n data[ticker] = yf_ticker.history(\n period=period,\n proxy=self._proxy,\n )[\n cols\n ].tz_localize(None)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def _fetch_tickers_info(self):\n \"\"\" \"\"\"\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} info from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n info[ticker] = yf_ticker.get_info(proxy=self._proxy)\n\n self._info = info\n\n def _fetch_tickers_data_and_info(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\"\n Use the `yfinance` library to fetch historical ticker data for the specified time\n period. The performance of the REST requests is highly dependent on three things:\n the config of your `CachedRateLimiter`, the amount of tickers you want to fetch,\n and the multi-threading support of your CPU.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from.\n cols : list\n The columns of the fetched ticker data to collect.\n\n \"\"\"\n\n self._fetch_tickers_data(period, cols)\n self._fetch_tickers_info()\n\n def load_local_data_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n\n path = Path(self._save_path)\n data_path = path / \"data\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not data_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {data_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n data[ticker] = self._load_data(\n data_path / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n if not isinstance(data[ticker].index, pd.DatetimeIndex):\n data[ticker].index = pd.to_datetime(data[ticker].index)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def load_local_info_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n path = Path(self._save_path)\n info_path = path / \"info\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not info_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {info_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n info[ticker] = self._load_info(\n info_path / f\"{ticker}.json\",\n )\n\n self._info = info\n\n def load_local_files(self):\n \"\"\"\n Load the locally saved info and data files. The info is read from file as a\n ``json`` and the data is read from ``csv`` as a ``pd.DataFrame``.\n\n Raises\n ------\n DirectoryNotFoundError\n When either of the paths to the saved ``info`` and ``data`` is not a directory.\n\n \"\"\"\n\n self.load_local_data_files()\n self.load_local_info_files()\n\n def fetch_data(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\"\n Fetch the historical ticker data for the specified time period. If there exists\n locally saved files for all tickers, will try and load them instead of fetching\n from Yahoo! Finance. Saves the fetched files if ``save=True`` was specified in\n the class constructor.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``).\n cols : list\n The columns of the fetched ticker data to collect. Defaults to\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n Dataset\n The initialized instance of ``self`` with ticker data loaded or fetched.\n\n \"\"\"\n\n if all_tickers_data_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local data files for {self.__class__.__name__}, \"\n \"attempting local load of data files...\"\n )\n\n try:\n self.load_local_data_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local data files, attempting new fetch...\")\n\n self._fetch_tickers_data(period, cols)\n\n if self._save:\n setup_finq_save_data_path(self._save_path)\n self._save_tickers_data()\n\n return self\n\n def fetch_info(\n self,\n ) -> Dataset:\n \"\"\" \"\"\"\n\n if all_tickers_info_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local info files for {self.__class__.__name__}, \"\n \"attempting local load of info files...\"\n )\n\n try:\n self.load_local_info_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local info files, attempting new fetch...\")\n\n self._fetch_tickers_info()\n\n if self._save:\n setup_finq_save_info_path(self._save_path)\n\n return self\n\n def fetch_data_and_info(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\" \"\"\"\n self = self.fetch_data(period, cols=cols)\n self = self.fetch_info()\n return self\n\n def fix_missing_data(\n self,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n resave: bool = True,\n ) -> Dataset:\n \"\"\"\n Compares each tickers dates in their corresponding ``pd.DataFrame`` and compares\n to the known set of dates collected. If there are any missing values, will add\n the missing dates to the dataframe and then use ``df.interpolate()`` to fix them.\n Default interpolation strategy is ``linear``.\n\n Parameters\n ----------\n cols : list\n The columns of the ``pd.DataFrame`` to consider when looking for missing data\n to interpolate. Defaults to (``Open``, ``High``, ``Low``, ``Close``).\n resave : bool\n Whether or not to resave the data to local path after fixing missing values.\n Defaults to ``True`` but will onlyesave if there existed missing data.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n \"\"\"\n\n log.info(\"attempting to fix any missing data...\")\n\n n_missing_data = 0\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fixing ticker {ticker} potential missing values\")\n\n df = self._data[ticker]\n diff = set(self._all_dates) - set(self._dates[ticker])\n\n if diff:\n n_missing_data += 1\n\n df_missed = pd.DataFrame(index=list(diff))\n df_missed.index.name = \"Date\"\n\n df_fixed = pd.concat((df, df_missed)).sort_index(inplace=False)\n df_fixed[cols] = df_fixed[cols].interpolate()\n\n if df_fixed[df_fixed.isnull().any(axis=1)].index.values.size:\n log.error(\n f\"failed to interpolate missing prices for ticker {ticker}!\"\n )\n\n self._data[ticker] = df_fixed\n self._dates[ticker] = self._all_dates\n\n if n_missing_data and resave:\n log.info(f\"fixed {n_missing_data} tickers with missing data\")\n if self._save:\n log.info(f\"saving fixed data to {self._save_path}...\")\n self._save_tickers_data()\n\n log.info(\"OK!\")\n return self\n\n def verify_data(self) -> Union[ValueError, Dataset]:\n \"\"\"\n Tries to verify that the stored data does not contain any missing values.\n This is performed by comparing the dates in each ticker ``pd.DataFrame``\n with the known set of all fetched dates.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n Raises\n ------\n ValueError\n If there exists missing values in any stored ``pd.DataFrame``.\n\n \"\"\"\n\n log.info(\"verifying that stored data has no missing values...\")\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Verifying ticker {ticker} data\")\n\n diff = set(self._all_dates) - set(self._dates[ticker])\n if diff:\n raise ValueError(\n f\"There is a difference in dates for symbol {ticker}, have you \"\n \"tried fixing missing values prior to verifying? To do that, run \"\n \"dataset.fix_missing_data() with your initialized Dataset class.\"\n )\n\n log.info(\"OK!\")\n return self\n\n def run(self, period: str = \"1y\") -> Dataset:\n \"\"\"\n Call the three core methods for the ``Dataset`` class which fetches data,\n tries to fix missing values, and lastly verifies that there is no missing data.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``). Defaults to ``1y``.\n\n Returns\n -------\n Dataset\n The intialized instance of ``self``.\n\n \"\"\"\n return self.fetch_data(period).fix_missing_data().verify_data()\n\n def visualize_ticker(\n self,\n ticker: str,\n **kwargs: Dict[str, Any],\n ):\n \"\"\" \"\"\"\n\n if kwargs.get(\"title\", None) is None:\n kwargs[\"title\"] = f\"{ticker} historical OHLC prices [{self._market}]\"\n\n mpf.plot(\n self._data[ticker],\n **kwargs,\n )\n\n def visualize(\n self,\n *,\n title: str = \"Historical stock data\",\n xlabel: str = \"Dates\",\n ylabel: str = \"Closing price [$]\",\n ticks_rotation: int = 70,\n legend_loc: str = \"best\",\n log_scale: bool = False,\n save_path: Optional[str] = None,\n price_type: str = \"Close\",\n show: bool = True,\n block: bool = True,\n ):\n \"\"\"\n Plot the historical ticker price data over time.\n\n Parameters\n ----------\n title : str\n The header title to set on the generated plot.\n xlabel : str\n The label to use for the x-axis.\n ylabel : str\n The label to use for the y-axis.\n ticks_rotation : int\n The amount of degrees to rotate the x-axis ticks with. Defaults to ``70``.\n legend_loc : str\n The location of the legend. Some possible values are (``best``, ``center``,\n ``upper left``, ``upper right``, ``lower left``, ``lower right``).\n Defaults to ``best``.\n log_scale : bool\n ``True`` if the historical data should be log scaled, otherwise ``False``.\n save_path : str | None\n The local file to save the generated plot to. Does not save the plot if\n the argument is ``None``.\n price_type : str\n The price type of the historical data to plot. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n show : bool\n ``True`` if the generated plot should be shown on the screen, otherwise\n ``False``. Defaults to ``True``.\n block : bool\n Whether to wait for all figures to be closed before returning. When ``False``\n the figure windows will be displayed and returned immediately. Defaults to\n ``True``.\n\n \"\"\"\n\n for ticker, data in self._data.items():\n plt.plot(\n np.log(data[price_type]) if log_scale else data[price_type],\n label=ticker,\n )\n\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xticks(rotation=ticks_rotation)\n plt.legend(loc=legend_loc)\n\n if save_path:\n log.info(f\"saving plot to path {save_path}\")\n plt.savefig(save_path)\n log.info(\"OK!\")\n\n if show:\n plt.show(block=block)\n plt.close()\n\n def get_tickers(self) -> List[str]:\n \"\"\"\n Return the saved list of ticker symbols.\n\n Returns\n -------\n list\n A list of ``str`` ticker symbols.\n\n \"\"\"\n return self._symbols\n\n def get_data(self) -> Dict[str, pd.DataFrame]:\n \"\"\"\n Return the saved dictionary which maps ticker symbols to their\n corresponding historical data with the following columns:\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n dict\n A dictionary with key: ``str`` and value: ``pd.DataFrame``.\n\n \"\"\"\n return self._data\n\n def as_assets(self, price_type: str = \"Close\") -> Dict[str, Asset]:\n \"\"\"\n Create a list of Assets for each ticker and specified price type.\n\n Parameters\n ----------\n price_type : str\n The price type data to create an ``Asset`` object with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n dict\n A dictionary of newly created ``Asset`` objects with ticker symbols as keys.\n\n \"\"\"\n return {\n ticker: Asset(\n self._data[ticker][price_type],\n self._names[i],\n market=self._market,\n index_name=self._index_name,\n price_type=price_type,\n pre_compute=False,\n )\n for i, ticker in enumerate(self._symbols)\n }\n\n def as_df(self, price_type: str = \"Close\") -> pd.DataFrame:\n \"\"\"\n Create an aggregated ``pd.DataFrame`` for the specified price type.\n It will have the shape (n_samples, n_tickers).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``pd.DataFrame`` object with. Has to\n be one of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n pd.DataFrame\n A new ``pd.DataFrame`` with ticker names as columns.\n\n \"\"\"\n\n return pd.DataFrame(\n {t: d[price_type] for t, d in zip(self._symbols, self._data.values())},\n index=self._all_dates,\n )\n\n def as_numpy(\n self,\n price_type: str = \"Close\",\n *,\n dtype: np.typing.DTypeLike = np.float32,\n ) -> np.ndarray:\n \"\"\"\n Extract the specified price type from stored data as np.ndarray.\n It will have the shape (n_tickers, n_samples).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``np.ndarray`` with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the specified price type and dtype.\n\n \"\"\"\n return np.array(\n [d[price_type].to_numpy().astype(dtype) for d in self._data.values()]\n )"
},
{
"identifier": "FinqError",
"path": "finq/exceptions.py",
"snippet": "class FinqError(Exception):\n \"\"\" \"\"\""
},
{
"identifier": "InvalidCombinationOfArgumentsError",
"path": "finq/exceptions.py",
"snippet": "class InvalidCombinationOfArgumentsError(FinqError):\n \"\"\" \"\"\"\n\n pass"
},
{
"identifier": "InvalidPortfolioWeightsError",
"path": "finq/exceptions.py",
"snippet": "class InvalidPortfolioWeightsError(FinqError):\n \"\"\" \"\"\"\n\n pass"
},
{
"identifier": "ObjectiveFunctionError",
"path": "finq/exceptions.py",
"snippet": "class ObjectiveFunctionError(FinqError):\n \"\"\" \"\"\"\n\n pass"
},
{
"identifier": "PortfolioNotYetOptimizedError",
"path": "finq/exceptions.py",
"snippet": "class PortfolioNotYetOptimizedError(FinqError):\n \"\"\" \"\"\"\n\n pass"
},
{
"identifier": "period_returns",
"path": "finq/formulas.py",
"snippet": "def period_returns(x: np.ndarray, period: int = 1) -> np.ndarray:\n \"\"\" \"\"\"\n\n return (x[:, period:] / x[:, :-period]) - 1"
},
{
"identifier": "sharpe_ratio",
"path": "finq/formulas.py",
"snippet": "def sharpe_ratio(\n r: Union[float, np.ndarray],\n v: Union[float, np.ndarray],\n rfr: float,\n) -> Union[float, np.ndarray]:\n \"\"\" \"\"\"\n\n return (r - rfr) / v"
},
{
"identifier": "weighted_returns",
"path": "finq/formulas.py",
"snippet": "def weighted_returns(w: np.ndarray, r: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, r)"
},
{
"identifier": "weighted_variance",
"path": "finq/formulas.py",
"snippet": "def weighted_variance(w: np.ndarray, cov: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, np.dot(cov, w.T))"
}
] | import logging
import pandas as pd
import numpy as np
import scipy.optimize as scipyopt
import matplotlib.pyplot as plt
from functools import wraps
from tqdm import tqdm
from finq.asset import Asset
from finq.datasets import Dataset
from finq.exceptions import (
FinqError,
InvalidCombinationOfArgumentsError,
InvalidPortfolioWeightsError,
ObjectiveFunctionError,
PortfolioNotYetOptimizedError,
)
from finq.formulas import (
period_returns,
sharpe_ratio,
weighted_returns,
weighted_variance,
)
from typing import (
Any,
Callable,
List,
Dict,
Tuple,
Union,
Optional,
) | 10,019 | """ """
# For a full list of `scipy` optimization methods and references, see the link below.
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
_supported_optimization_methods = (
"Nelder-Mead",
"Powell",
"CG",
"BFGS",
"Newton-CG",
"L-BFGS-B",
"TNC",
"COBYLA",
"SLSQP",
"trust-constr",
"dogleg",
"trust-ncg",
"trust-exact",
"trust-krylov",
)
_weight_initializations = {
"lognormal": np.random.lognormal,
"normal": np.random.normal,
"uniform": np.random.uniform,
}
def __init__(
self,
data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame],
*,
weights: Optional[np.ndarray] = None,
names: Optional[Union[Dict[str, str], List[str]]] = None,
symbols: Optional[Union[Dict[str, str], List[str]]] = None,
confidence_level: float = 0.95,
risk_free_rate: float = 5e-3,
n_trading_days: int = 252,
objective_function: Optional[Callable] = None,
objective_function_args: Tuple[Any, ...] = (),
objective_bounds: Optional[List[Tuple[int, ...]]] = None,
objective_constraints: Optional[Tuple[Dict, ...]] = None,
):
""" """
if isinstance(data, Dataset):
assets = data.as_assets()
data = list(assets.values())
symbols = list(assets.keys())
if not isinstance(data, list):
if names is None and symbols is None and not isinstance(data, pd.DataFrame):
raise InvalidCombinationOfArgumentsError(
"You need to provide the names and ticker symbols of each asset that you "
"want to include in your portfolio if the data you provided is neither a "
"`list` of `Asset` objects or a `pd.DataFrame`. You can also try "
"providing only one of the arguments `names` and `symbols`, but then as "
"a dictionary of the form `key=name` `value=symbol`."
)
if isinstance(data, list):
symbols = [a.name for a in data]
data = np.array([a.data for a in data])
if isinstance(data, pd.DataFrame):
symbols = data.columns
data = data.to_numpy().T
if isinstance(names, dict):
symbols = list(names.values())
names = list(names.keys())
if isinstance(symbols, dict):
names = list(symbols.keys())
symbols = list(symbols.values())
self._data = data
self._weights = weights
self._names = names
self._symbols = symbols
self._confidence_level = confidence_level
self._risk_free_rate = risk_free_rate
self._n_trading_days = n_trading_days
self._random_portfolios = None
self._objective_function = objective_function
self._objective_function_args = objective_function_args
self._objective_bounds = objective_bounds
self._objective_constraints = objective_constraints
def weights_are_normalized(self) -> bool:
""" """
return np.allclose(self._weights.sum(), 1.0, rtol=1e-6)
def initialize_random_weights(
self,
distribution: Union[str, Callable],
*args: Tuple[Any, ...],
**kwargs: Dict[str, Any],
):
""" """
if isinstance(distribution, str):
distribution = self._weight_initializations.get(distribution, None)
if distribution is None:
raise ValueError(
"You provided a non valid weight initialization distribution."
)
weights = distribution(*args, **kwargs)
self._weights = weights / weights.sum()
def check_valid_weights(func) -> Callable:
""" """
@wraps(func)
def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]:
""" """
if self._weights is None:
| """
MIT License
Copyright (c) 2023 Wilhelm Ågren
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
File created: 2023-10-20
Last updated: 2023-11-10
"""
log = logging.getLogger(__name__)
class Portfolio(object):
""" """
# For a full list of `scipy` optimization methods and references, see the link below.
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
_supported_optimization_methods = (
"Nelder-Mead",
"Powell",
"CG",
"BFGS",
"Newton-CG",
"L-BFGS-B",
"TNC",
"COBYLA",
"SLSQP",
"trust-constr",
"dogleg",
"trust-ncg",
"trust-exact",
"trust-krylov",
)
_weight_initializations = {
"lognormal": np.random.lognormal,
"normal": np.random.normal,
"uniform": np.random.uniform,
}
def __init__(
self,
data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame],
*,
weights: Optional[np.ndarray] = None,
names: Optional[Union[Dict[str, str], List[str]]] = None,
symbols: Optional[Union[Dict[str, str], List[str]]] = None,
confidence_level: float = 0.95,
risk_free_rate: float = 5e-3,
n_trading_days: int = 252,
objective_function: Optional[Callable] = None,
objective_function_args: Tuple[Any, ...] = (),
objective_bounds: Optional[List[Tuple[int, ...]]] = None,
objective_constraints: Optional[Tuple[Dict, ...]] = None,
):
""" """
if isinstance(data, Dataset):
assets = data.as_assets()
data = list(assets.values())
symbols = list(assets.keys())
if not isinstance(data, list):
if names is None and symbols is None and not isinstance(data, pd.DataFrame):
raise InvalidCombinationOfArgumentsError(
"You need to provide the names and ticker symbols of each asset that you "
"want to include in your portfolio if the data you provided is neither a "
"`list` of `Asset` objects or a `pd.DataFrame`. You can also try "
"providing only one of the arguments `names` and `symbols`, but then as "
"a dictionary of the form `key=name` `value=symbol`."
)
if isinstance(data, list):
symbols = [a.name for a in data]
data = np.array([a.data for a in data])
if isinstance(data, pd.DataFrame):
symbols = data.columns
data = data.to_numpy().T
if isinstance(names, dict):
symbols = list(names.values())
names = list(names.keys())
if isinstance(symbols, dict):
names = list(symbols.keys())
symbols = list(symbols.values())
self._data = data
self._weights = weights
self._names = names
self._symbols = symbols
self._confidence_level = confidence_level
self._risk_free_rate = risk_free_rate
self._n_trading_days = n_trading_days
self._random_portfolios = None
self._objective_function = objective_function
self._objective_function_args = objective_function_args
self._objective_bounds = objective_bounds
self._objective_constraints = objective_constraints
def weights_are_normalized(self) -> bool:
""" """
return np.allclose(self._weights.sum(), 1.0, rtol=1e-6)
def initialize_random_weights(
self,
distribution: Union[str, Callable],
*args: Tuple[Any, ...],
**kwargs: Dict[str, Any],
):
""" """
if isinstance(distribution, str):
distribution = self._weight_initializations.get(distribution, None)
if distribution is None:
raise ValueError(
"You provided a non valid weight initialization distribution."
)
weights = distribution(*args, **kwargs)
self._weights = weights / weights.sum()
def check_valid_weights(func) -> Callable:
""" """
@wraps(func)
def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]:
""" """
if self._weights is None: | raise PortfolioNotYetOptimizedError( | 6 | 2023-10-09 19:02:54+00:00 | 12k |
lmb-freiburg/ldce | ldm/models/diffusion/classifier.py | [
{
"identifier": "EncoderUNetModel",
"path": "ldm/modules/diffusionmodules/openaimodel.py",
"snippet": "class EncoderUNetModel(nn.Module):\n \"\"\"\n The half UNet model with attention and timestep embedding.\n For usage, see UNet.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n pool=\"adaptive\",\n *args,\n **kwargs\n ):\n super().__init__()\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n self.num_res_blocks = num_res_blocks\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for _ in range(num_res_blocks):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n use_new_attention_order=use_new_attention_order,\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n self.pool = pool\n if pool == \"adaptive\":\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n nn.AdaptiveAvgPool2d((1, 1)),\n zero_module(conv_nd(dims, ch, out_channels, 1)),\n nn.Flatten(),\n )\n elif pool == \"attention\":\n assert num_head_channels != -1\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n AttentionPool2d(\n (image_size // ds), ch, num_head_channels, out_channels\n ),\n )\n elif pool == \"spatial\":\n self.out = nn.Sequential(\n nn.Linear(self._feature_size, 2048),\n nn.ReLU(),\n nn.Linear(2048, self.out_channels),\n )\n elif pool == \"spatial_v2\":\n self.out = nn.Sequential(\n nn.Linear(self._feature_size, 2048),\n normalization(2048),\n nn.SiLU(),\n nn.Linear(2048, self.out_channels),\n )\n else:\n raise NotImplementedError(f\"Unexpected {pool} pooling\")\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :return: an [N x K] Tensor of outputs.\n \"\"\"\n emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))\n\n results = []\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb)\n if self.pool.startswith(\"spatial\"):\n results.append(h.type(x.dtype).mean(dim=(2, 3)))\n h = self.middle_block(h, emb)\n if self.pool.startswith(\"spatial\"):\n results.append(h.type(x.dtype).mean(dim=(2, 3)))\n h = th.cat(results, axis=-1)\n return self.out(h)\n else:\n h = h.type(x.dtype)\n return self.out(h)"
},
{
"identifier": "UNetModel",
"path": "ldm/modules/diffusionmodules/openaimodel.py",
"snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n self.num_res_blocks = num_res_blocks\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for _ in range(num_res_blocks):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, use_checkpoint=use_checkpoint,\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, use_checkpoint=use_checkpoint,\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(num_res_blocks + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, use_checkpoint=use_checkpoint,\n )\n )\n if level and i == num_res_blocks:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape == (x.shape[0],)\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)"
},
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
}
] | import os
import torch
import pytorch_lightning as pl
import kornia.augmentation as K
import torchvision
from torch import nn
from torch import Tensor
from omegaconf import OmegaConf
from torch.nn import functional as F
from torch.nn import Linear
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from copy import deepcopy
from einops import rearrange
from glob import glob
from natsort import natsorted
from torchvision.models import resnet18
from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel
from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config | 8,184 |
if self.label_key == 'segmentation':
targets = rearrange(targets, 'b h w c -> b c h w')
for down in range(self.numd):
h, w = targets.shape[-2:]
targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
# targets = rearrange(targets,'b c h w -> b h w c')
return targets
def compute_top_k(self, logits, labels, k, reduction="mean"):
_, top_ks = torch.topk(logits, k, dim=1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def on_train_epoch_start(self):
# save some memory
self.diffusion_model.model.to('cpu')
@torch.no_grad()
def write_logs(self, loss, logits, targets):
log_prefix = 'train' if self.training else 'val'
log = {}
log[f"{log_prefix}/loss"] = loss.mean()
log[f"{log_prefix}/acc@1"] = self.compute_top_k(
logits, targets, k=1, reduction="mean"
)
log[f"{log_prefix}/acc@5"] = self.compute_top_k(
logits, targets, k=5, reduction="mean"
)
self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
#self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
lr = self.optimizers().param_groups[0]['lr']
self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
def shared_step(self, batch, t=None):
x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
if self.latent_transform is not None and self.trainer.training:
#print("Applying latent transform")
x = self.latent_transform(x)
targets = self.get_conditioning(batch)
if targets.dim() == 4:
targets = targets.argmax(dim=1)
if t is None:
t = torch.randint(0, self.diffusion_model.num_timesteps //self.diffusion_classifier_steps_ratio, (x.shape[0],), device=self.device).long()
else:
t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
x_noisy = self.get_x_noisy(x, t)
logits = self(x_noisy, t)
loss = F.cross_entropy(logits, targets, reduction='none')
self.write_logs(loss.detach(), logits.detach(), targets.detach())
loss = loss.mean()
return loss, logits, x_noisy, targets
def training_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
return loss
def reset_noise_accs(self):
self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
range(0, self.diffusion_model.num_timesteps //self.diffusion_classifier_steps_ratio,
self.diffusion_model.log_every_t // self.diffusion_classifier_steps_ratio )}
def on_validation_start(self):
self.reset_noise_accs()
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch, t=0)
for t in self.noisy_acc:
_, logits, _, targets = self.shared_step(batch, t)
self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
#log noisy_acc @t [f'inputs@t{current_time}']
# added log
self.log(f'val/noisy_acc@{t}@1', self.noisy_acc[t]['acc@1'][-1], on_step=False, on_epoch=True)
self.log(f'val/noisy_acc@{t}@5', self.noisy_acc[t]['acc@5'][-1], on_step=False, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if self.use_scheduler:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [optimizer], scheduler
return optimizer
@torch.no_grad()
def log_images(self, batch, N=8, *args, **kwargs):
log = dict()
x = self.get_input(batch, self.diffusion_model.first_stage_key)
log['inputs'] = x
# z, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
# log["inputs_after_first_stage"] = z
#log image after first stage model
y = self.get_conditioning(batch)
if self.label_key == 'class_label':
|
__models__ = {
'class_label': EncoderUNetModel,
'segmentation': UNetModel
}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class DataAugmentation(nn.Module):
"""Module to perform data augmentation using Kornia on torch tensors."""
def __init__(self, apply_color_jitter: bool = False, latent = False) -> None:
super().__init__()
self._apply_color_jitter = apply_color_jitter
self._latent = latent
self.transforms = K.AugmentationSequential(
K.RandomErasing(p=0.1, scale=(0.01, 0.33), ratio=(0.3, 3.3), value=0.45),
K.RandomHorizontalFlip(p=0.5),
K.RandomAffine(360, [0.1, 0.1], [0.7, 1.2], [30., 50.], p=0.5),
K.RandomPerspective(0.5, p=0.3),
K.RandomChannelShuffle(p=0.3),
K.RandomThinPlateSpline(p=0.3),
same_on_batch=False,
) if latent else K.AugmentationSequential(
K.RandomErasing(p=0.1, scale=(0.01, 0.33), ratio=(0.3, 3.3), value=0.45),
K.RandomHorizontalFlip(p=0.2),
K.RandomAffine(360, [0.1, 0.1], [0.7, 1.2], [30., 50.], p=0.2),
K.RandomPerspective(0.5, p=0.2),
#K.RandomChannelShuffle(p=0.2),
K.RandomThinPlateSpline(p=0.2),
K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.1),
same_on_batch=False,
)
self.jitter = K.ColorJitter(0.1, 0.1, 0.1, 0.1, p=0.2)
@torch.no_grad() # disable gradients for effiency
def forward(self, x: Tensor) -> Tensor:
#print("ranges of x before: ", x.min(), x.max())
# covert from -1 to 1 to 0 to 1
x = (x + 1) / 2
x_out = self.transforms(x) # BxCxHxW
if self._apply_color_jitter and not self._latent:
x_out = self.jitter(x_out)
# convert back to -1 to 1
x_out = x_out * 2 - 1
#print("ranges of x after: ", x_out.min(), x_out.max())
return x_out
class NoisyLatentImageClassifier(pl.LightningModule):
def __init__(self,
diffusion_path,
num_classes,
ckpt_path=None,
pool='attention',
label_key=None,
diffusion_classifier_steps_ratio=10,
diffusion_ckpt_path=None,
scheduler_config=None,
backbone='unet_encoder',
weight_decay=1.e-2,
log_steps=10,
monitor='val/loss',
image_aug = False,
latent_aug = False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.num_classes = num_classes
self.backbone = backbone
# get latest config of diffusion model
#diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1]
self.diffusion_config = OmegaConf.load(diffusion_path).model
self.diffusion_ckpt_path = diffusion_ckpt_path
self.diffusion_classifier_steps_ratio = diffusion_classifier_steps_ratio
#print("getting diffusion path", self.diffusion_config.params, self.diffusion_ckpt_path)
self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
self.load_diffusion()
self.monitor = monitor
self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
self.log_time_interval = self.diffusion_model.num_timesteps //self.diffusion_classifier_steps_ratio // log_steps
self.log_steps = log_steps
self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \
else self.diffusion_model.cond_stage_key
assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params'
if self.label_key not in __models__:
raise NotImplementedError()
self.load_classifier(ckpt_path, pool)
self.scheduler_config = scheduler_config
self.use_scheduler = self.scheduler_config is not None
self.weight_decay = weight_decay
self.image_transform = DataAugmentation(apply_color_jitter= True) if image_aug else None
self.latent_transform = DataAugmentation(apply_color_jitter= True, latent = True) if latent_aug else None
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
print(f"Missing Keys: {missing}")
if len(unexpected) > 0:
print(f"Unexpected Keys: {unexpected}")
def load_diffusion(self):
model = instantiate_from_config(self.diffusion_config)
self.diffusion_model = model.eval()
self.diffusion_model.train = disabled_train
for param in self.diffusion_model.parameters():
param.requires_grad = False
def load_classifier(self, ckpt_path, pool):
if self.backbone == 'unet_encoder':
model_config = deepcopy(self.diffusion_config.params.unet_config.params)
model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels
model_config.out_channels = self.num_classes
if self.label_key == 'class_label':
model_config.pool = pool
self.model = __models__[self.label_key](**model_config)
#check if model is in torchvision models
else:
print("Looking for model from torchvision.models")
try:
self.model = getattr(torchvision.models, self.backbone)(pretrained=True)
except:
raise NotImplementedError(f"Model {self.backbone} not implemented")
if hasattr(self.model, 'fc'):
try:
self.model.fc = Linear(self.model.fc.in_features, self.num_classes)
except:
raise NotImplementedError(f"Model {self.backbone} final layer mistmatch")
elif hasattr(self.model, 'classifier'):
try:
print(f"classifier size is {len(self.model.classifier)}")
self.model.classifier[-1] = Linear(self.model.classifier[-1].in_features, self.num_classes)
except:
raise NotImplementedError(f"Model {self.backbone} final layer mistmatch")
else:
raise NotImplementedError(f"Model {self.backbone} final layer structure not recognized")
if ckpt_path is not None:
print('#####################################################################')
print(f'load from ckpt "{ckpt_path}"')
print('#####################################################################')
self.init_from_ckpt(ckpt_path)
@torch.no_grad()
def get_x_noisy(self, x, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x))
continuous_sqrt_alpha_cumprod = None
# if self.diffusion_model.use_continuous_noise:
# continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
# # todo: make sure t+1 is correct here
return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise)
def forward(self, x_noisy, t, *args, **kwargs):
return self.model(x_noisy) #, t)
@torch.no_grad()
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = rearrange(x, 'b h w c -> b c h w')
x = x.to(memory_format=torch.contiguous_format).float()
if self.image_transform is not None and self.trainer.training:
#print("Applying image transform")
x = self.image_transform(x)
return x
@torch.no_grad()
def get_conditioning(self, batch, k=None):
if k is None:
k = self.label_key
assert k is not None, 'Needs to provide label key'
targets = batch[k].to(self.device)
if self.label_key == 'segmentation':
targets = rearrange(targets, 'b h w c -> b c h w')
for down in range(self.numd):
h, w = targets.shape[-2:]
targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
# targets = rearrange(targets,'b c h w -> b h w c')
return targets
def compute_top_k(self, logits, labels, k, reduction="mean"):
_, top_ks = torch.topk(logits, k, dim=1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def on_train_epoch_start(self):
# save some memory
self.diffusion_model.model.to('cpu')
@torch.no_grad()
def write_logs(self, loss, logits, targets):
log_prefix = 'train' if self.training else 'val'
log = {}
log[f"{log_prefix}/loss"] = loss.mean()
log[f"{log_prefix}/acc@1"] = self.compute_top_k(
logits, targets, k=1, reduction="mean"
)
log[f"{log_prefix}/acc@5"] = self.compute_top_k(
logits, targets, k=5, reduction="mean"
)
self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
#self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
lr = self.optimizers().param_groups[0]['lr']
self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
def shared_step(self, batch, t=None):
x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
if self.latent_transform is not None and self.trainer.training:
#print("Applying latent transform")
x = self.latent_transform(x)
targets = self.get_conditioning(batch)
if targets.dim() == 4:
targets = targets.argmax(dim=1)
if t is None:
t = torch.randint(0, self.diffusion_model.num_timesteps //self.diffusion_classifier_steps_ratio, (x.shape[0],), device=self.device).long()
else:
t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
x_noisy = self.get_x_noisy(x, t)
logits = self(x_noisy, t)
loss = F.cross_entropy(logits, targets, reduction='none')
self.write_logs(loss.detach(), logits.detach(), targets.detach())
loss = loss.mean()
return loss, logits, x_noisy, targets
def training_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
return loss
def reset_noise_accs(self):
self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
range(0, self.diffusion_model.num_timesteps //self.diffusion_classifier_steps_ratio,
self.diffusion_model.log_every_t // self.diffusion_classifier_steps_ratio )}
def on_validation_start(self):
self.reset_noise_accs()
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch, t=0)
for t in self.noisy_acc:
_, logits, _, targets = self.shared_step(batch, t)
self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
#log noisy_acc @t [f'inputs@t{current_time}']
# added log
self.log(f'val/noisy_acc@{t}@1', self.noisy_acc[t]['acc@1'][-1], on_step=False, on_epoch=True)
self.log(f'val/noisy_acc@{t}@5', self.noisy_acc[t]['acc@5'][-1], on_step=False, on_epoch=True)
return loss
def configure_optimizers(self):
optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if self.use_scheduler:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [optimizer], scheduler
return optimizer
@torch.no_grad()
def log_images(self, batch, N=8, *args, **kwargs):
log = dict()
x = self.get_input(batch, self.diffusion_model.first_stage_key)
log['inputs'] = x
# z, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
# log["inputs_after_first_stage"] = z
#log image after first stage model
y = self.get_conditioning(batch)
if self.label_key == 'class_label': | y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) | 2 | 2023-10-10 09:40:10+00:00 | 12k |
casszhao/PruneHall | summac/train_summac.py | [
{
"identifier": "select_freer_gpu",
"path": "summac/utils_misc.py",
"snippet": "def select_freer_gpu():\n freer_gpu = str(get_freer_gpu())\n print(\"Will use GPU: %s\" % (freer_gpu))\n os.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"+freer_gpu\n return freer_gpu"
},
{
"identifier": "build_optimizer",
"path": "summac/utils_optim.py",
"snippet": "def build_optimizer(model, optimizer_name=\"adam\", learning_rate=1e-5):\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n if optimizer_name == \"adam\":\n optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)\n elif optimizer_name == \"sgd\":\n optimizer = SGD(optimizer_grouped_parameters, lr=learning_rate)\n else:\n assert False, \"optimizer_name = '%s' is not `adam` or `lamb`\" % (optimizer_name)\n return optimizer"
},
{
"identifier": "SummaCBenchmark",
"path": "summac/benchmark.py",
"snippet": "class SummaCBenchmark:\n\n def __init__(self, benchmark_folder=\"/home/phillab/data/summac_benchmark/\", dataset_names=[\"cogensum\", \"xsumfaith\", \"polytope\", \"factcc\", \"summeval\", \"frank\"], cut=\"val\"):\n assert cut in [\"val\", \"test\"], \"Unrecognized cut for the Fact Checking Benchmark\"\n if not os.path.exists(benchmark_folder):\n os.makedirs(benchmark_folder)\n\n self.cut = cut\n self.benchmark_folder = benchmark_folder\n self.cnndm_id2reference = None\n self.cnndm = None\n self.xsum = None\n\n self.datasets = []\n for dataset_name in dataset_names:\n if dataset_name == \"cogensum\":\n self.load_cogensumm()\n elif dataset_name == \"xsumfaith\":\n self.load_xsumfaith()\n elif dataset_name == \"polytope\":\n self.load_polytope()\n elif dataset_name == \"factcc\":\n self.load_factcc()\n elif dataset_name == \"summeval\":\n self.load_summeval()\n elif dataset_name == \"frank\":\n self.load_frank()\n else:\n raise ValueError(\"Unrecognized dataset name: %s\" % (dataset_name))\n\n # Underlying dataset loader: CNN/DM and XSum\n def get_cnndm_document(self, aid):\n global CNNDM\n if self.cnndm is None:\n # by cass\n # if CNNDM is None:\n # CNNDM = load_dataset(\"cnn_dailymail\", \"3.0.0\")\n try: CNNDM\n except: CNNDM = load_dataset(\"cnn_dailymail\", \"3.0.0\")\n self.cnndm = CNNDM\n self.cnndm_id2article = {}\n for cut in [\"test\", \"validation\"]:\n self.cnndm_id2article.update({d[\"id\"]: d[\"article\"] for d in self.cnndm[cut]})\n return self.cnndm_id2article[aid]\n\n def get_cnndm_reference(self, aid):\n global CNNDM\n if CNNDM is None:\n CNNDM = load_dataset(\"cnn_dailymail\", \"3.0.0\")\n self.cnndm = CNNDM\n if self.cnndm_id2reference is None:\n self.cnndm_id2reference = {}\n for cut in [\"test\", \"validation\"]:\n self.cnndm_id2reference.update({d[\"id\"]: d[\"highlights\"] for d in self.cnndm[cut]})\n return self.cnndm_id2reference[aid]\n\n\n def get_xsum_document(self, aid):\n if self.xsum is None:\n self.xsum = load_dataset(\"xsum\")[\"test\"]\n self.xsumid2article = {d[\"id\"]: d[\"document\"] for d in self.xsum}\n\n return self.xsumid2article[aid]\n\n # Individual dataset loaders\n def load_cogensumm(self):\n # Correctness of Generated Summaries: https://www.aclweb.org/anthology/P19-1213.pdf\n # CoGenSumm: https://tudatalib.ulb.tu-darmstadt.de/handle/tudatalib/2002\n\n dataset_folder = os.path.join(self.benchmark_folder, \"cogensumm/\")\n if not os.path.exists(dataset_folder):\n print(\"==== CoGenSumm dataset not found, downloading from scratch\")\n os.makedirs(dataset_folder)\n data = requests.get(\"https://tudatalib.ulb.tu-darmstadt.de/bitstream/handle/tudatalib/2002/summary-correctness-v1.0.zip?sequence=3&isAllowed=y\")\n zip_file = os.path.join(dataset_folder, \"summary-correctness-v1.0.zip\")\n with open(zip_file, \"wb\") as f:\n f.write(data.content)\n\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(dataset_folder)\n os.remove(zip_file)\n\n clean_dataset = []\n for fn in os.listdir(dataset_folder):\n if self.cut not in fn:\n continue\n\n with open(os.path.join(dataset_folder, fn), \"r\") as f:\n dataset = json.load(f)\n\n if \"_org\" in fn or fn == \"test_chen18_reranked.json\":\n for aid in dataset:\n document = self.get_cnndm_document(aid)\n label = 0 if dataset[aid][\"label\"] == \"Incorrect\" else 1\n sents = dataset[aid][\"sents\"]\n summary = \" \".join([sents[str(i)][\"text\"] for i in range(len(sents))])\n clean_dataset.append({\"filename\": fn, \"label\": label, \"document\": document, \"claim\": summary, \"cnndm_id\": aid, \"annotations\": [label], \"dataset\": \"cogensumm\", \"origin\": \"cnndm\"})\n elif fn == \"val_reranking.json\":\n for aid in dataset:\n document = self.get_cnndm_document(aid)\n for idx, data in dataset[aid].items():\n label = 0 if data[\"label\"] == \"Incorrect\" else 1\n summary = \" \".join([data[\"sents\"][str(i)][\"text\"] for i in range(len(data[\"sents\"]))])\n clean_dataset.append({\"filename\": fn, \"label\": label, \"document\": document, \"claim\": summary, \"cnndm_id\": aid, \"annotations\": [label], \"dataset\": \"cogensumm\", \"origin\": \"cnndm\"})\n elif fn == \"val_sentence_pairs.json\":\n for d in dataset:\n aid = d[\"article_id\"]\n document = self.get_cnndm_document(aid)\n clean_dataset.append({\"filename\": fn, \"label\": 1, \"document\": document, \"claim\": d[\"correct_sent\"], \"cnndm_id\": aid, \"annotations\": [1], \"dataset\": \"cogensumm\", \"origin\": \"cnndm\"})\n clean_dataset.append({\"filename\": fn, \"label\": 0, \"document\": document, \"claim\": d[\"incorrect_sent\"], \"cnndm_id\": aid, \"annotations\": [0], \"dataset\": \"cogensumm\", \"origin\": \"cnndm\"})\n self.datasets.append({\"name\": \"cogensumm\", \"dataset\": clean_dataset})\n\n def load_xsumfaith(self):\n # On Faithfulness and Factuality in Abstractive Summarization - ACL 2020\n # https://github.com/google-research-datasets/xsum_hallucination_annotations\n # https://aclanthology.org/2020.acl-main.173.pdf\n\n dataset_folder = os.path.join(self.benchmark_folder, \"xsumfaith/\")\n if not os.path.exists(dataset_folder):\n print(\"==== XSum dataset not found, downloading from scratch\")\n os.makedirs(dataset_folder)\n\n csv_file = requests.get(\"https://github.com/google-research-datasets/xsum_hallucination_annotations/raw/master/hallucination_annotations_xsum_summaries.csv\")\n with open(os.path.join(dataset_folder, \"hallucination_annotations_xsum_summaries.csv\"), \"wb\") as f:\n f.write(csv_file.content)\n\n path_to_annotation = os.path.join(dataset_folder, \"hallucination_annotations_xsum_summaries.csv\")\n\n with open(path_to_annotation, \"r\") as f:\n raw_data = list(csv.reader(f))\n dataset = []\n keys = raw_data[0]\n for line in raw_data[1:]:\n dataset.append({k: v for k, v in zip(keys, line)})\n\n groups = {}\n for d in dataset:\n k = (d[\"bbcid\"], d[\"system\"])\n if k not in groups:\n groups[k] = []\n groups[k].append(d)\n\n clean_dataset = []\n for k, vs in groups.items():\n A = vs[0]\n document = self.get_xsum_document(A[\"bbcid\"])\n labels = [v[\"hallucination_type\"] for v in vs]\n annotations = [1 if label == \"NULL\" else 0 for label in labels]\n most_common_label = Counter(labels).most_common(1)[0][0]\n label = 1 if most_common_label == \"NULL\" else 0\n c = \"val\" if len(clean_dataset) % 2 == 0 else \"test\"\n\n clean_dataset.append({\"document\": document, \"claim\": A[\"summary\"], \"bbcid\": A[\"bbcid\"], \"model_name\": A[\"system\"], \"label\": label, \"cut\": c, \"annotations\": annotations, \"dataset\": \"xsumfaith\", \"origin\": \"xsum\"})\n final_dataset = [d for d in clean_dataset if d[\"cut\"]==self.cut]\n self.datasets.append({\"name\": \"xsumfaith\", \"dataset\": final_dataset})\n\n def load_polytope(self, which_label=\"overall\"):\n # What Have We Achieved on Text Summarization? [https://arxiv.org/abs/2010.04529]\n # Dataset must be downloaded from the Github repo: https://github.com/hddbang/polytope\n\n assert which_label in [\"overall\", \"omission\", \"addition\", \"duplication\", \"inaccuracy\"], \"Unrecognized `which label`\"\n\n dataset_folder = os.path.join(self.benchmark_folder, \"polytope\")\n if not os.path.exists(dataset_folder):\n print(\"==== Polytope dataset not found, downloading from scratch\")\n os.makedirs(dataset_folder)\n\n for model_name in [\"BART\", \"Bert_Ext\", \"Bert_Ext_Abs\", \"BottomUp\", \"PG\", \"PG_Coverage\", \"Summa\", \"TextRank\", \"seq2seq\"]:\n url = \"https://github.com/hddbang/PolyTope/raw/master/outputs_with_human_annotation/Human_Annotation_Summarization_%s.xlsm\" % (model_name)\n r = requests.get(url)\n with open(os.path.join(dataset_folder, \"Human_Annotation_Summarization_%s.xlsm\" % (model_name)), \"wb\") as f:\n f.write(r.content)\n\n full_dataset = []\n for fn in os.listdir(dataset_folder):\n fn = os.path.join(dataset_folder, fn)\n\n all_segments = pd.read_excel(fn, sheet_name=\"Scores per segment\")\n ID2row = {}\n for i, segment in all_segments.iterrows():\n c = \"val\" if i % 2 == 0 else \"test\"\n if str(segment[\"ID\"]) != \"nan\":\n ID2row[segment[\"ID\"]] = {\"ID\": segment[\"ID\"], \"document\": segment[\"Source\"], \"claim\": segment[\"Target\"], \"errors\": [], \"cut\": c}\n\n for i, row in pd.read_excel(fn, sheet_name=\"Error Log\").iterrows():\n if str(row[\"Subtypes\"]) != \"nan\":\n ID2row[row[\"ID\"]][\"errors\"].append(row[\"Subtypes\"])\n\n for ID in ID2row:\n d = ID2row[ID]\n d[\"overall_label\"] = 1 if len(d[\"errors\"]) == 0 else 0\n d[\"omission_label\"] = 0 if \"Omission\" in d[\"errors\"] else 1\n d[\"addition_label\"] = 0 if \"Addition\" in d[\"errors\"] else 1\n d[\"duplication_label\"] = 0 if \"Duplication\" in d[\"errors\"] else 1\n d[\"inaccuracy_label\"] = 0 if \"Inaccuracy_internal\" in d[\"errors\"] or \"Inaccuracy_external\" in d[\"errors\"] else 1\n if which_label is not None:\n d[\"label\"] = d[\"%s_label\" % (which_label)]\n d[\"dataset\"] = \"polytope\"\n d[\"annotations\"] = [d[\"label\"]]\n d[\"origin\"] = \"cnndm\"\n\n full_dataset.append(d)\n cut_dataset = [d for d in full_dataset if d[\"cut\"]==self.cut]\n self.datasets.append({\"name\": \"polytope\", \"dataset\": cut_dataset})\n\n def load_factcc(self, max_entries=-1):\n # Evaluating the Factual Consistency of Abstractive Text Summarization [https://arxiv.org/abs/1910.12840]\n # Dataset for each split must be downloaded from the Github repo: https://github.com/salesforce/factCC\n\n dataset_folder = os.path.join(self.benchmark_folder, \"factcc/\")\n if not os.path.exists(dataset_folder):\n print(\"==== FactCC dataset not found, downloading from scratch\")\n os.makedirs(dataset_folder)\n\n urls = [\"https://storage.googleapis.com/sfr-factcc-data-research/unpaired_generated_data.tar.gz\", \"https://storage.googleapis.com/sfr-factcc-data-research/unpaired_annotated_data.tar.gz\"]\n for url in urls:\n zip_name = url.split(\"/\")[-1]\n r = requests.get(url)\n with open(os.path.join(dataset_folder, zip_name), \"wb\") as f:\n f.write(r.content)\n \n with tarfile.open(os.path.join(dataset_folder, zip_name), \"r:gz\") as f:\n f.extractall(dataset_folder)\n os.remove(os.path.join(dataset_folder, zip_name))\n\n if self.cut == \"train\":\n dataset = []\n with open(os.path.join(dataset_folder, \"unpaired_generated_data/data-original/data-train.jsonl\"), \"r\") as f:\n for i, line in enumerate(f):\n if max_entries > 0 and i >= max_entries:\n break\n D = json.loads(line)\n aid = D[\"filepath\"].split(\"/\")[-1].replace(\".story\", \"\")\n full_text = self.get_cnndm_document(aid)\n\n label = 1 if D[\"label\"]==\"CORRECT\" else 0\n datum = {\"document\": full_text, \"claim\": D[\"claim\"], \"cnndm_id\": D[\"id\"], \"label\": label, \"dataset\": \"factcc\", \"origin\": \"cnndm\"}\n dataset.append(datum)\n\n if self.cut in [\"val\", \"test\"]:\n factcc_file = os.path.join(dataset_folder, \"unpaired_annotated_data/%s/data-dev.jsonl\" % (self.cut))\n dataset = []\n with open(factcc_file, \"r\") as f:\n for line in f:\n dataset.append(json.loads(line))\n\n for d in dataset:\n aid = d[\"filepath\"].split(\"/\")[-1].replace(\".story\", \"\")\n d[\"document\"] = self.get_cnndm_document(aid)\n d[\"label\"] = 1 if d[\"label\"] == \"CORRECT\" else 0\n d[\"annotations\"] = [d[\"label\"]]\n d[\"dataset\"] = \"factcc\"\n d[\"origin\"] = \"cnndm\"\n\n self.datasets.append({\"name\": \"factcc\", \"dataset\": dataset})\n\n def load_summeval(self, key_focus=\"consistency\"):\n assert key_focus in [\"consistency\", \"coherence\", \"fluency\", \"relevance\"]\n # SummEval: Re-evaluating Summarization Evaluation [https://arxiv.org/abs/2007.12626]\n # Data files must be downloaded from the following Github repository: https://github.com/Yale-LILY/SummEval\n raw_dataset = []\n\n dataset_folder = os.path.join(self.benchmark_folder, \"summeval/\")\n fn = os.path.join(dataset_folder, \"model_annotations.aligned.scored.jsonl\")\n if not os.path.exists(dataset_folder):\n print(\"==== SummEval dataset not found, downloading from scratch\")\n os.makedirs(dataset_folder)\n\n # From the 4/19/2020 update on the README: https://github.com/Yale-LILY/SummEval\n download_file_from_google_drive(\"1d2Iaz3jNraURP1i7CfTqPIj8REZMJ3tS\", fn)\n\n with open(fn, \"r\") as f:\n for line in f:\n raw_dataset.append(json.loads(line))\n\n clean_dataset = []\n\n for i, d in enumerate(raw_dataset):\n c = \"val\" if i % 2 == 0 else \"test\"\n _, _, article_id = d[\"id\"].split(\"-\")\n document = self.get_cnndm_document(article_id)\n annotations = d[\"expert_annotations\"]\n\n consistencies = [a[key_focus] for a in annotations]\n final_label = 1 if len([cons for cons in consistencies if cons==5]) > len(annotations)/2 else 0\n\n # annotations = [1 if cons == 5 else 0 for cons in consistencies]\n annotations = consistencies\n error_type = \"no error\" if final_label == 1 else \"error\"\n\n clean_dataset.append({\"document\": document, \"claim\": d[\"decoded\"], \"label\": final_label, \"model_name\": d[\"model_id\"], \"cnndm_id\": d[\"id\"], \"cut\": c, \"annotations\": annotations, \"dataset\": \"summeval\", \"origin\": \"cnndm\", \"error_type\": error_type})\n final_dataset = [d for d in clean_dataset if d[\"cut\"] == self.cut]\n self.datasets.append({\"name\": \"summeval\", \"dataset\": final_dataset})\n\n def load_frank(self):\n # FRANK: Factuality Evaluation Benchmark [https://aclanthology.org/2021.naacl-main.383.pdf]\n # Files must be downloaded from the Github repository: https://github.com/artidoro/frank\n\n dataset_folder = os.path.join(self.benchmark_folder, \"frank/\")\n if not os.path.exists(dataset_folder):\n print(\"==== Frank dataset not found, downloading from scratch\")\n os.makedirs(dataset_folder)\n\n fns = [\"human_annotations_sentence.json\", \"validation_split.txt\", \"test_split.txt\"]\n for fn in fns:\n data = requests.get(\"https://raw.githubusercontent.com/artidoro/frank/main/data/%s\" % fn)\n with open(os.path.join(dataset_folder, fn), \"w\") as f:\n f.write(data.text)\n\n raw_file = os.path.join(dataset_folder, \"human_annotations_sentence.json\")\n val_hash_file = os.path.join(dataset_folder, \"validation_split.txt\")\n test_hash_file = os.path.join(dataset_folder, \"test_split.txt\")\n with open(val_hash_file if self.cut==\"val\" else test_hash_file, \"r\") as f:\n valid_hashes = set([line.strip() for line in f])\n\n with open(raw_file, \"r\") as f:\n raw_dataset = json.load(f)\n dataset = []\n for d in raw_dataset:\n article = d[\"article\"]\n origin = \"cnndm\" if len(d[\"hash\"]) >= 40 else \"xsum\"\n\n if d[\"hash\"] not in valid_hashes:\n continue\n\n summ_labels = []\n annotator_labels = {}\n for annot in d[\"summary_sentences_annotations\"]:\n annot_vals = [an for ans in annot.values() for an in ans]\n noerror_count = len([an for an in annot_vals if an==\"NoE\"])\n label = 1 if noerror_count >= 2 else 0\n summ_labels.append(label)\n for anno_name, anno in annot.items():\n if anno_name not in annotator_labels:\n annotator_labels[anno_name] = []\n annotator_labels[anno_name] += anno\n\n annotations = [1 if all(a==\"NoE\" for a in annos) else 0 for annos in annotator_labels.values()]\n label = 0 if any(sl==0 for sl in summ_labels) else 1\n\n error_type = \"NoE\"\n if label == 0:\n errors = [anno for annos in annotator_labels.values() for anno in annos if anno != \"NoE\"]\n error_type = Counter(errors).most_common(1)[0][0]\n\n summary = d[\"summary\"]\n dataset.append({\"document\": article, \"claim\": summary, \"label\": label, \"cut\": self.cut, \"hash\": d[\"hash\"], \"model_name\": d[\"model_name\"], \"annotations\": annotations, \"dataset\": \"frank\", \"origin\": origin, \"error_type\": error_type})\n self.datasets.append({\"name\": \"frank\", \"dataset\": dataset})\n\n def get_dataset(self, dataset_name):\n for dataset in self.datasets:\n if dataset[\"name\"] == dataset_name:\n return dataset[\"dataset\"]\n raise ValueError(\"Unrecognized dataset name: %s\" % (dataset_name))\n\n def print_stats(self):\n dataset_stats = []\n for dataset in self.datasets:\n N_pos, N_neg = len([d for d in dataset[\"dataset\"] if d[\"label\"]==1]), len([d for d in dataset[\"dataset\"] if d[\"label\"]==0])\n dataset_stats.append({\"name\": dataset[\"name\"], \"N\": len(dataset[\"dataset\"]), \"N_pos\": N_pos, \"N_neg\": N_neg, \"frac_pos\": N_pos/(N_pos+N_neg)})\n print(pd.DataFrame(dataset_stats))\n\n def evaluate(self, scorer):\n benchmark = []\n\n for dataset in self.datasets:\n dataset_labels = [d[\"label\"] for d in dataset[\"dataset\"]]\n dataset_preds = scorer.score([d[\"document\"] for d in dataset[\"dataset\"]], [d[\"claim\"] for d in dataset[\"dataset\"]])[\"scores\"]\n\n dataset_thresh, dataset_f1 = choose_best_threshold(dataset_labels, dataset_preds)\n benchmark.append({\"name\": dataset[\"name\"], \"score\": dataset_f1, \"threshold\": dataset_thresh})\n return {\"overall_score\": np.mean([t[\"score\"] for t in benchmark]), \"benchmark\": benchmark}"
},
{
"identifier": "load_factcc",
"path": "summac/benchmark.py",
"snippet": "def load_factcc(self, max_entries=-1):\n # Evaluating the Factual Consistency of Abstractive Text Summarization [https://arxiv.org/abs/1910.12840]\n # Dataset for each split must be downloaded from the Github repo: https://github.com/salesforce/factCC\n\n dataset_folder = os.path.join(self.benchmark_folder, \"factcc/\")\n if not os.path.exists(dataset_folder):\n print(\"==== FactCC dataset not found, downloading from scratch\")\n os.makedirs(dataset_folder)\n\n urls = [\"https://storage.googleapis.com/sfr-factcc-data-research/unpaired_generated_data.tar.gz\", \"https://storage.googleapis.com/sfr-factcc-data-research/unpaired_annotated_data.tar.gz\"]\n for url in urls:\n zip_name = url.split(\"/\")[-1]\n r = requests.get(url)\n with open(os.path.join(dataset_folder, zip_name), \"wb\") as f:\n f.write(r.content)\n \n with tarfile.open(os.path.join(dataset_folder, zip_name), \"r:gz\") as f:\n f.extractall(dataset_folder)\n os.remove(os.path.join(dataset_folder, zip_name))\n\n if self.cut == \"train\":\n dataset = []\n with open(os.path.join(dataset_folder, \"unpaired_generated_data/data-original/data-train.jsonl\"), \"r\") as f:\n for i, line in enumerate(f):\n if max_entries > 0 and i >= max_entries:\n break\n D = json.loads(line)\n aid = D[\"filepath\"].split(\"/\")[-1].replace(\".story\", \"\")\n full_text = self.get_cnndm_document(aid)\n\n label = 1 if D[\"label\"]==\"CORRECT\" else 0\n datum = {\"document\": full_text, \"claim\": D[\"claim\"], \"cnndm_id\": D[\"id\"], \"label\": label, \"dataset\": \"factcc\", \"origin\": \"cnndm\"}\n dataset.append(datum)\n\n if self.cut in [\"val\", \"test\"]:\n factcc_file = os.path.join(dataset_folder, \"unpaired_annotated_data/%s/data-dev.jsonl\" % (self.cut))\n dataset = []\n with open(factcc_file, \"r\") as f:\n for line in f:\n dataset.append(json.loads(line))\n\n for d in dataset:\n aid = d[\"filepath\"].split(\"/\")[-1].replace(\".story\", \"\")\n d[\"document\"] = self.get_cnndm_document(aid)\n d[\"label\"] = 1 if d[\"label\"] == \"CORRECT\" else 0\n d[\"annotations\"] = [d[\"label\"]]\n d[\"dataset\"] = \"factcc\"\n d[\"origin\"] = \"cnndm\"\n\n self.datasets.append({\"name\": \"factcc\", \"dataset\": dataset})"
},
{
"identifier": "SummaCConv",
"path": "summac/model_summac.py",
"snippet": "def card_to_name(card):\ndef name_to_card(name):\ndef get_neutral_idx(ent_idx, con_idx):\n def __init__(self, model_name=\"mnli\", granularity=\"paragraph\", use_cache=True, max_doc_sents=100, device=\"cuda\", **kwargs):\n def load_nli(self):\n def split_sentences(self, text):\n def split_2sents(self, text):\n def split_paragraphs(self, text):\n def split_text(self, text, granularity=\"sentence\"):\n def build_chunk_dataset(self, original, generated, pair_idx=None):\n def build_image(self, original, generated):\n def build_images(self, originals, generateds, batch_size=128):\n def get_cache_file(self):\n def save_cache(self):\n def load_cache(self):\n def __init__(self, models=[\"mnli\", \"anli\", \"vitc\"], bins='even50', granularity=\"sentence\", nli_labels=\"e\", device=\"cuda\", start_file=None, imager_load_cache=True, agg=\"mean\", **kwargs):\n def build_image(self, original, generated):\n def compute_histogram(self, original=None, generated=None, image=None):\n def forward(self, originals, generateds, images=None):\n def save_imager_cache(self):\n def score(self, originals, generateds, **kwargs):\n def __init__(self, model_name=\"mnli\", granularity=\"paragraph\", op1=\"max\", op2=\"mean\", use_ent=True, use_con=True, imager_load_cache=True, device=\"cuda\", **kwargs):\n def save_imager_cache(self):\n def score_one(self, original, generated):\n def image2score(self, image):\n def score(self, sources, generateds, batch_size=128, **kwargs):\nclass SummaCImager:\nclass SummaCConv(torch.nn.Module):\nclass SummaCZS:\n N = len(histograms)"
}
] | from .utils_misc import select_freer_gpu
from torch.utils.data import DataLoader, RandomSampler
from .utils_optim import build_optimizer
from .benchmark import SummaCBenchmark, load_factcc
from .model_summac import SummaCConv, model_map
import torch, tqdm, nltk, numpy as np, argparse, json
import os, time | 7,231 |
select_freer_gpu()
def train(model="mnli", granularity="sentence", nli_labels="e", pre_file="", num_epochs=5, optimizer="adam", train_batch_size=32, learning_rate=0.1, bins="even50", silent=False, norm_histo=False):
experiment = "%s_%s_%s_%s" % (model, granularity, bins, nli_labels)
if not silent:
print("Experiment name: %s" % (experiment))
if len(pre_file) == 0:
standard_pre_file = "/home/phillab/data/summac_cache/train_%s_%s.jsonl" % (model, granularity)
if os.path.isfile(standard_pre_file):
pre_file = standard_pre_file
precomputed = len(pre_file) > 0
device = "cpu" if precomputed else "cuda"
if model == "multi":
models = ["mnli", "anli", "vitc"]
elif model == "multi2":
models = ["mnli", "vitc", "vitc-only", "vitc-base"]
else:
models = [model]
model = SummaCConv(models=models, granularity=granularity, nli_labels=nli_labels, device=device, bins=bins, norm_histo=norm_histo)
optimizer = build_optimizer(model, learning_rate=learning_rate, optimizer_name=optimizer)
if not silent:
print("Model Loaded")
def sent_tok(text):
sentences = nltk.tokenize.sent_tokenize(text)
return [sent for sent in sentences if len(sent)>10]
def collate_func(inps):
documents, claims, labels = [], [], []
for inp in inps:
if len(sent_tok(inp["claim"])) > 0 and len(sent_tok(inp["document"])) > 0:
documents.append(inp["document"])
claims.append(inp["claim"])
labels.append(inp["label"])
labels = torch.LongTensor(labels).to(device)
return documents, claims, labels
def collate_pre(inps):
documents = [inp["document"] for inp in inps]
claims = [inp["claim"] for inp in inps]
# images = [[np.array(im) for im in inp["image"]] for inp in inps]
images = [np.array(inp["image"]) for inp in inps]
labels = torch.LongTensor([inp["label"] for inp in inps]).to(device)
return documents, claims, images, labels
if precomputed:
d_train = []
with open(pre_file, "r") as f:
for line in f:
d_train.append(json.loads(line))
dl_train = DataLoader(dataset=d_train, batch_size=train_batch_size, sampler=RandomSampler(d_train), collate_fn=collate_pre)
else:
|
select_freer_gpu()
def train(model="mnli", granularity="sentence", nli_labels="e", pre_file="", num_epochs=5, optimizer="adam", train_batch_size=32, learning_rate=0.1, bins="even50", silent=False, norm_histo=False):
experiment = "%s_%s_%s_%s" % (model, granularity, bins, nli_labels)
if not silent:
print("Experiment name: %s" % (experiment))
if len(pre_file) == 0:
standard_pre_file = "/home/phillab/data/summac_cache/train_%s_%s.jsonl" % (model, granularity)
if os.path.isfile(standard_pre_file):
pre_file = standard_pre_file
precomputed = len(pre_file) > 0
device = "cpu" if precomputed else "cuda"
if model == "multi":
models = ["mnli", "anli", "vitc"]
elif model == "multi2":
models = ["mnli", "vitc", "vitc-only", "vitc-base"]
else:
models = [model]
model = SummaCConv(models=models, granularity=granularity, nli_labels=nli_labels, device=device, bins=bins, norm_histo=norm_histo)
optimizer = build_optimizer(model, learning_rate=learning_rate, optimizer_name=optimizer)
if not silent:
print("Model Loaded")
def sent_tok(text):
sentences = nltk.tokenize.sent_tokenize(text)
return [sent for sent in sentences if len(sent)>10]
def collate_func(inps):
documents, claims, labels = [], [], []
for inp in inps:
if len(sent_tok(inp["claim"])) > 0 and len(sent_tok(inp["document"])) > 0:
documents.append(inp["document"])
claims.append(inp["claim"])
labels.append(inp["label"])
labels = torch.LongTensor(labels).to(device)
return documents, claims, labels
def collate_pre(inps):
documents = [inp["document"] for inp in inps]
claims = [inp["claim"] for inp in inps]
# images = [[np.array(im) for im in inp["image"]] for inp in inps]
images = [np.array(inp["image"]) for inp in inps]
labels = torch.LongTensor([inp["label"] for inp in inps]).to(device)
return documents, claims, images, labels
if precomputed:
d_train = []
with open(pre_file, "r") as f:
for line in f:
d_train.append(json.loads(line))
dl_train = DataLoader(dataset=d_train, batch_size=train_batch_size, sampler=RandomSampler(d_train), collate_fn=collate_pre)
else: | d_train = load_factcc(cut="train") | 3 | 2023-10-13 11:29:39+00:00 | 12k |
cpuimage/minSDXLTF | stable_diffusion_xl/stable_diffusion_xl.py | [
{
"identifier": "SimpleTokenizer",
"path": "stable_diffusion_xl/clip_tokenizer.py",
"snippet": "class SimpleTokenizer:\n def __init__(self, bpe_path=None):\n bpe_path = bpe_path or tf.keras.utils.get_file(\n \"bpe_simple_vocab_16e6.txt.gz\",\n \"https://github.com/openai/CLIP/blob/main/clip/bpe_simple_vocab_16e6.txt.gz?raw=true\", # noqa: E501\n file_hash=\"924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a\", # noqa: E501\n )\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split(\"\\n\")\n merges = merges[1: 49152 - 256 - 2 + 1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v + \"</w>\" for v in vocab]\n for merge in merges:\n vocab.append(\"\".join(merge))\n vocab.extend([\"<|startoftext|>\", \"<|endoftext|>\"])\n self.vocab = vocab\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n\n self.special_tokens = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.cache = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.pat = self._create_pat()\n\n def _create_encoder(self, vocab):\n return dict(zip(vocab, range(len(vocab))))\n\n def _create_decoder(self, encoder):\n return {v: k for k, v in encoder.items()}\n\n def _create_pat(self):\n return re.compile(\n \"|\".join([re.escape(key) for key in self.special_tokens.keys()])\n + r\"\"\"|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\",\n re.IGNORECASE,\n )\n\n @property\n def end_of_text(self):\n return self.encoder[\"<|endoftext|>\"]\n\n @property\n def start_of_text(self):\n return self.encoder[\"<|startoftext|>\"]\n\n def add_tokens(self, tokens):\n if isinstance(tokens, str):\n tokens = [tokens]\n tokens_added = 0\n for token in tokens:\n if token in self.vocab:\n continue\n tokens_added += 1\n self.vocab.append(token)\n self.special_tokens[token] = token\n self.cache[token] = token\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.pat = self._create_pat()\n return tokens_added\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + (token[-1] + \"</w>\",)\n pairs = get_pairs(word)\n\n if not pairs:\n return token + \"</w>\"\n\n while True:\n bigram = min(\n pairs, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\"))\n )\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if (word[i] == first\n and i < len(word) - 1\n and word[i + 1] == second):\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \" \".join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = \"\".join(self.byte_encoder[b] for b in token.encode(\"utf-8\"))\n bpe_tokens.extend(\n self.encoder[bpe_token]\n for bpe_token in self.bpe(token).split(\" \")\n )\n return [self.start_of_text] + bpe_tokens + [self.end_of_text]\n\n def decode(self, tokens):\n text = \"\".join([self.decoder[token] for token in tokens])\n text = (\n bytearray([self.byte_decoder[c] for c in text])\n .decode(\"utf-8\", errors=\"replace\")\n .replace(\"</w>\", \" \")\n )\n return text"
},
{
"identifier": "DiffusionXLModel",
"path": "stable_diffusion_xl/diffusion_model.py",
"snippet": "class DiffusionXLModel(tf.keras.Model):\n @staticmethod\n def push_block(hidden_states, res_stack):\n res_stack.append(hidden_states)\n return res_stack\n\n @staticmethod\n def pop_block(hidden_states, res_stack):\n res_hidden_states = res_stack.pop()\n hidden_states = tf.concat([hidden_states, res_hidden_states], axis=-1)\n return hidden_states, res_stack\n\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None, lora_dict=None):\n sample = tf.keras.layers.Input((img_height // 8, img_width // 8, 4))\n timestep = tf.keras.layers.Input(())\n text_emb = tf.keras.layers.Input((None, 2048))\n text_embeds = tf.keras.layers.Input((1280,))\n time_ids = tf.keras.layers.Input((6,))\n # 1. time\n t_emb = Timesteps(320, name=\"time_proj\")(timestep)\n t_emb = tf.reshape(t_emb, (-1, 320))\n t_emb = Linear(1280, name=\"time_embedding.linear_1\")(tf.cast(t_emb, sample.dtype))\n t_emb = tf.keras.layers.Activation(\"swish\")(t_emb)\n t_emb = Linear(1280, name=\"time_embedding.linear_2\")(t_emb)\n time_embeds = Timesteps(256, name=\"add_time_proj\")(time_ids)\n time_embeds = tf.reshape(time_embeds, (-1, 1536)) # 6*256 = 1536\n add_embeds = tf.concat([text_embeds, time_embeds], axis=-1)\n add_embeds = tf.cast(add_embeds, sample.dtype)\n add_embeds = Linear(1280, name=\"add_embedding.linear_1\")(add_embeds)\n add_embeds = tf.keras.layers.Activation(\"swish\")(add_embeds)\n add_embeds = Linear(1280, name=\"add_embedding.linear_2\")(add_embeds)\n time_emb = tf.keras.layers.Activation(\"swish\")(t_emb + add_embeds)\n # 2. pre-process\n hidden_states = tf.keras.layers.Conv2D(320, kernel_size=3, strides=1, name=\"conv_in\")(\n tf.keras.layers.ZeroPadding2D(1)(sample))\n res_stack = [hidden_states]\n # 3. blocks\n # DownBlock2D\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.0\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.1\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(320, name=\"down_blocks.0.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(640, name=\"down_blocks.1.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n # UNetMidBlock2DCrossAttn\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"mid_block.attentions.0\")((hidden_states, text_emb))\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.1\")((hidden_states, time_emb))\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(1280, name=\"up_blocks.0.upsamplers.0\")(hidden_states)\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(640, name=\"up_blocks.1.upsamplers.0\")(hidden_states)\n # UpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.2\")((hidden_states, time_emb))\n hidden_states = GroupNormalization(32, epsilon=1e-05, center=True, scale=True,\n name=\"conv_norm_out\")(\n hidden_states)\n hidden_states = tf.keras.layers.Activation(\"swish\")(hidden_states)\n output = tf.keras.layers.Conv2D(4, kernel_size=3, strides=1, name=\"conv_out\")(\n tf.keras.layers.ZeroPadding2D(1)(hidden_states))\n super().__init__([sample, timestep, text_emb, time_ids, text_embeds], output, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"diffusion_model\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)"
},
{
"identifier": "ImageDecoder",
"path": "stable_diffusion_xl/image_decoder.py",
"snippet": "class ImageDecoder(tf.keras.Sequential):\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((img_height // 8, img_width // 8, 4)),\n tf.keras.layers.Rescaling(1.0 / 0.13025),\n tf.keras.layers.Conv2D(4, 1, strides=1),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(512, 3, strides=1),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n UpSampler(256),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(3, 3, strides=1),\n ],\n name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"decoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)"
},
{
"identifier": "ImageEncoder",
"path": "stable_diffusion_xl/image_encoder.py",
"snippet": "class ImageEncoder(tf.keras.Sequential):\n \"\"\"ImageEncoder is the VAE Encoder for StableDiffusionXL.\"\"\"\n\n def __init__(self, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((None, None, 3)),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(128, 3, strides=1),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n DownSampler(128, padding=((0, 1), (0, 1))),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n DownSampler(256, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n DownSampler(512, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(8, 3, strides=1),\n tf.keras.layers.Conv2D(8, 1, strides=1),\n tf.keras.layers.Lambda(lambda x: tf.split(x, num_or_size_splits=2, axis=-1)[0] * 0.13025),\n ])\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"encoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)"
},
{
"identifier": "get_weighted_text_embeddings",
"path": "stable_diffusion_xl/long_prompt_weighting.py",
"snippet": "def get_weighted_text_embeddings(\n tokenizer,\n text_encoder,\n prompt: Union[str, List[str]],\n max_embeddings_multiples: Optional[int] = 4,\n no_boseos_middle: Optional[bool] = False,\n skip_parsing: Optional[bool] = False,\n skip_weighting: Optional[bool] = False,\n model_max_length=77,\n pad_token_id=49407,\n text_encoder_pool=None,\n):\n r\"\"\"\n Prompts can be assigned with local weights using brackets. For example,\n prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',\n and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.\n\n Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.\n\n Args:\n tokenizer : provide access to the tokenizer\n text_encoder : provide access to the text encoder.\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n max_embeddings_multiples (`int`, *optional*, defaults to `1`):\n The max multiple length of prompt embeddings compared to the max output length of text encoder.\n no_boseos_middle (`bool`, *optional*, defaults to `False`):\n If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and\n ending token in each of the chunk in the middle.\n skip_parsing (`bool`, *optional*, defaults to `False`):\n Skip the parsing of brackets.\n skip_weighting (`bool`, *optional*, defaults to `False`):\n Skip the weighting. When the parsing is skipped, it is forced True.\n \"\"\"\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n if isinstance(prompt, str):\n prompt = [prompt]\n\n if not skip_parsing:\n prompt_tokens, prompt_weights = get_prompts_with_weights(tokenizer, prompt, max_length - 2)\n else:\n prompt_tokens = [\n token[1:-1]\n for token in tokenizer.encode(prompt)[:max_length]\n ]\n prompt_weights = [[1.0] * len(token) for token in prompt_tokens]\n\n # round up the longest length of tokens to a multiple of (model_max_length - 2)\n max_length = max([len(token) for token in prompt_tokens])\n\n max_embeddings_multiples = min(\n max_embeddings_multiples,\n (max_length - 1) // (model_max_length - 2) + 1,\n )\n max_embeddings_multiples = max(1, max_embeddings_multiples)\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n\n # pad the length of tokens and weights\n bos = tokenizer.start_of_text\n eos = tokenizer.end_of_text\n pad = pad_token_id\n prompt_tokens, prompt_weights = pad_tokens_and_weights(\n prompt_tokens,\n prompt_weights,\n max_length,\n bos,\n eos,\n pad,\n no_boseos_middle=no_boseos_middle,\n chunk_length=model_max_length,\n )\n prompt_tokens = np.array(prompt_tokens, dtype=np.int32)\n # get the embeddings\n if pad_token_id != 0:\n text_embeddings_pool = None\n text_embeddings = get_unweighted_text_embeddings_openai(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n )\n else:\n text_embeddings, text_embeddings_pool = get_unweighted_text_embeddings_laion(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n text_encoder_pool=text_encoder_pool,\n )\n prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)\n if (not skip_parsing) and (not skip_weighting):\n previous_mean = text_embeddings.mean(axis=(-2, -1))\n text_embeddings *= prompt_weights[:, :, None]\n text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]\n return text_embeddings, text_embeddings_pool"
},
{
"identifier": "Scheduler",
"path": "stable_diffusion_xl/scheduler.py",
"snippet": "class Scheduler(object):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n active_lcm (`bool`, defaults true):\n apply lcm or not.\n original_inference_steps (`int`, *optional*, defaults to 50):\n The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we\n will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule.\n timestep_scaling (`float`, defaults to 10.0):\n The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions\n `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation\n error at the default of `10.0` is already pretty small).\n \"\"\"\n\n def __init__(self, num_train_timesteps: int = 1000, beta_start: float = 0.00085, beta_end: float = 0.012,\n original_inference_steps: int = 50, timestep_scaling: float = 10.0, active_lcm=True):\n self.active_lcm = active_lcm\n self.num_train_timesteps = num_train_timesteps\n self.original_inference_steps = original_inference_steps\n self.timestep_scaling = timestep_scaling\n # this schedule is very specific to the latent diffusion model.\n self.alphas_cumprod = np.cumprod(\n 1. - np.square(np.linspace(np.sqrt(beta_start), np.sqrt(beta_end), num_train_timesteps)), axis=0)\n self.signal_rates = np.sqrt(self.alphas_cumprod)\n self.noise_rates = np.sqrt(1. - self.alphas_cumprod)\n self.final_alpha_cumprod = 1.0\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n # setable values\n self.num_inference_steps = None\n self.timesteps = np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int32)\n self._step_index = None\n\n # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index\n def _init_step_index(self, timestep):\n index_candidates = np.nonzero(self.timesteps == timestep)\n # The sigma index that is taken for the **very** first `step`\n # is always the second index (or the last index if there is only 1)\n # This way we can ensure we don't accidentally skip a sigma in\n # case we start in the middle of the denoising schedule (e.g. for image-to-image)\n if len(index_candidates) > 1:\n step_index = index_candidates[1]\n else:\n step_index = index_candidates[0]\n self._step_index = step_index\n\n @property\n def step_index(self):\n return self._step_index\n\n def set_timesteps(self, num_inference_steps: int, original_inference_steps: Optional[int] = None,\n strength: int = 1.0):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n original_inference_steps (`int`, *optional*):\n The original number of inference steps, which will be used to generate a linearly-spaced timestep\n schedule (which is different from the standard `diffusers` implementation). We will then take\n `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as\n our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.\n \"\"\"\n\n if num_inference_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n self.num_inference_steps = num_inference_steps\n if self.active_lcm:\n original_steps = (\n original_inference_steps if original_inference_steps is not None else self.original_inference_steps)\n\n if original_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`original_steps`: {original_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n if num_inference_steps > original_steps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:\"\n f\" {original_steps} because the final timestep schedule will be a subset of the\"\n f\" `original_inference_steps`-sized initial timestep schedule.\")\n # LCM Timesteps Setting\n # Currently, only linear spacing is supported.\n c = self.num_train_timesteps // original_steps\n # LCM Training Steps Schedule\n lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * c - 1\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n # LCM Inference Steps Schedule\n timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps]\n else:\n timesteps = np.linspace(0, 1000 - 1, num_inference_steps, dtype=np.int32)[::-1]\n self.timesteps = timesteps.copy().astype(np.int32)\n self._step_index = None\n\n def get_scalings_for_boundary_condition_discrete(self, timestep, sigma_data=0.5):\n scaled_timestep = timestep * self.timestep_scaling\n c_skip = sigma_data ** 2 / (scaled_timestep ** 2 + sigma_data ** 2)\n c_out = scaled_timestep / (scaled_timestep ** 2 + sigma_data ** 2) ** 0.5\n return c_skip, c_out\n\n def step(self, latent: np.ndarray, timestep: int, latent_prev: np.ndarray):\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n latent (`np.ndarray`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n latent_prev (`np.ndarray`):\n A current instance of a sample created by the diffusion process.\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\")\n\n if self.step_index is None:\n self._init_step_index(timestep)\n # 1. get previous step value\n prev_step_index = self.step_index + 1\n if prev_step_index < len(self.timesteps):\n prev_timestep = self.timesteps[prev_step_index]\n else:\n prev_timestep = timestep\n next_signal_rates = self.signal_rates[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n next_noise_rates = self.noise_rates[prev_timestep]\n signal_rates = self.signal_rates[timestep]\n noise_rates = self.noise_rates[timestep]\n # 2. Compute the predicted original sample x_0 based on the model parameterization\n pred_x0 = (latent_prev - noise_rates * latent) / signal_rates\n # 3. Denoise model output using boundary conditions\n if self.active_lcm:\n # 4. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)\n denoised = c_out * pred_x0 + c_skip * latent_prev\n # 5. Sample and inject noise z ~ N(0, I) for MultiStep Inference\n # Noise is not used on the final timestep of the timestep schedule.\n # This also means that noise is not used for one-step sampling.\n if self.step_index != self.num_inference_steps - 1:\n noise = np.random.randn(*latent.shape).astype(np.float32)\n latent = next_signal_rates * denoised + next_noise_rates * noise\n else:\n latent = denoised\n else:\n if self.step_index != self.num_inference_steps - 1:\n latent = next_signal_rates * pred_x0 + next_noise_rates * latent\n else:\n latent = pred_x0\n # upon completion increase step index by one\n self._step_index += 1\n return latent\n\n def __len__(self):\n return self.num_train_timesteps"
},
{
"identifier": "TextEncoderLaion",
"path": "stable_diffusion_xl/text_encoder_laion.py",
"snippet": "class TextEncoderLaion(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=1280, vocab_size=49408, num_heads=20, num_layers=32, name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(out[-1])\n super().__init__([tokens, positions], [out[-2], embedded], name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n # ckpt_mapping.append(('text_projection.weight', (1, 0)))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)"
},
{
"identifier": "TextEncoderLaionProj",
"path": "stable_diffusion_xl/text_encoder_laion.py",
"snippet": "class TextEncoderLaionProj(tf.keras.Model):\n def __init__(self, embed_dim=1280, name=None, ckpt_path=None, lora_dict=None):\n embedded = tf.keras.layers.Input(shape=(embed_dim,), dtype=\"float32\", name=\"embedded\")\n proje_out = tf.keras.layers.Dense(1280, name=\"text_projection\", use_bias=False)(embedded)\n super().__init__(embedded, proje_out, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_projection.weight', (1, 0))]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)"
},
{
"identifier": "TextEncoderOpenAi",
"path": "stable_diffusion_xl/text_encoder_openai.py",
"snippet": "class TextEncoderOpenAi(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=768, vocab_size=49408, num_heads=12, num_layers=12, clip_skip=-2,\n final_layer_norm=False,\n name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=quick_gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = out[clip_skip]\n if final_layer_norm:\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(embedded)\n super().__init__([tokens, positions], embedded, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers + clip_skip + 1):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n if final_layer_norm:\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)"
}
] | import numpy as np
import tensorflow as tf
from PIL import Image
from scipy.ndimage import correlate1d
from .clip_tokenizer import SimpleTokenizer
from .diffusion_model import DiffusionXLModel
from .image_decoder import ImageDecoder
from .image_encoder import ImageEncoder
from .long_prompt_weighting import get_weighted_text_embeddings
from .scheduler import Scheduler
from .text_encoder_laion import TextEncoderLaion, TextEncoderLaionProj
from .text_encoder_openai import TextEncoderOpenAi | 10,336 | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras implementation of StableDiffusionXL."""
MAX_PROMPT_LENGTH = 77
class StableDiffusionXLBase:
"""Base class for stable diffusion xl model."""
def __init__(self, img_height=1024, img_width=1024, jit_compile=False,
active_lcm=False):
self.img_height = img_height
self.img_width = img_width
# lazy initialize the component models and the tokenizer
self._image_encoder = None
self._text_encoder_laion = None
self._text_encoder_laion_proj = None
self._text_encoder_openai = None
self._diffusion_model = None
self._image_decoder = None
self._tokenizer = None
self.jit_compile = jit_compile
self.active_lcm = active_lcm
| # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras implementation of StableDiffusionXL."""
MAX_PROMPT_LENGTH = 77
class StableDiffusionXLBase:
"""Base class for stable diffusion xl model."""
def __init__(self, img_height=1024, img_width=1024, jit_compile=False,
active_lcm=False):
self.img_height = img_height
self.img_width = img_width
# lazy initialize the component models and the tokenizer
self._image_encoder = None
self._text_encoder_laion = None
self._text_encoder_laion_proj = None
self._text_encoder_openai = None
self._diffusion_model = None
self._image_decoder = None
self._tokenizer = None
self.jit_compile = jit_compile
self.active_lcm = active_lcm | self.scheduler = Scheduler(active_lcm=active_lcm) | 5 | 2023-10-14 18:40:16+00:00 | 12k |
cumulo-autumn/StreamDiffusion | examples/screen/main.py | [
{
"identifier": "receive_images",
"path": "utils/viewer.py",
"snippet": "def receive_images(queue: Queue, fps_queue: Queue) -> None:\n \"\"\"\n Setup the Tkinter window and start the thread to receive images.\n\n Parameters\n ----------\n queue : Queue\n The queue to receive images from.\n fps_queue : Queue\n The queue to put the calculated fps.\n \"\"\"\n root = tk.Tk()\n root.title(\"Image Viewer\")\n label = tk.Label(root)\n fps_label = tk.Label(root, text=\"FPS: 0\")\n label.grid(column=0)\n fps_label.grid(column=1)\n\n def on_closing():\n print(\"window closed\")\n root.quit() # stop event loop\n return\n\n thread = threading.Thread(\n target=_receive_images, args=(queue, fps_queue, label, fps_label), daemon=True\n )\n thread.start()\n\n try:\n root.protocol(\"WM_DELETE_WINDOW\", on_closing)\n root.mainloop()\n except KeyboardInterrupt:\n return"
},
{
"identifier": "StreamDiffusionWrapper",
"path": "utils/wrapper.py",
"snippet": "class StreamDiffusionWrapper:\n def __init__(\n self,\n model_id_or_path: str,\n t_index_list: List[int],\n lora_dict: Optional[Dict[str, float]] = None,\n mode: Literal[\"img2img\", \"txt2img\"] = \"img2img\",\n output_type: Literal[\"pil\", \"pt\", \"np\", \"latent\"] = \"pil\",\n lcm_lora_id: Optional[str] = None,\n vae_id: Optional[str] = None,\n device: Literal[\"cpu\", \"cuda\"] = \"cuda\",\n dtype: torch.dtype = torch.float16,\n frame_buffer_size: int = 1,\n width: int = 512,\n height: int = 512,\n warmup: int = 10,\n acceleration: Literal[\"none\", \"xformers\", \"tensorrt\"] = \"tensorrt\",\n do_add_noise: bool = True,\n device_ids: Optional[List[int]] = None,\n use_lcm_lora: bool = True,\n use_tiny_vae: bool = True,\n enable_similar_image_filter: bool = False,\n similar_image_filter_threshold: float = 0.98,\n similar_image_filter_max_skip_frame: int = 10,\n use_denoising_batch: bool = True,\n cfg_type: Literal[\"none\", \"full\", \"self\", \"initialize\"] = \"self\",\n seed: int = 2,\n use_safety_checker: bool = False,\n engine_dir: Optional[Union[str, Path]] = \"engines\",\n ):\n \"\"\"\n Initializes the StreamDiffusionWrapper.\n\n Parameters\n ----------\n model_id_or_path : str\n The model id or path to load.\n t_index_list : List[int]\n The t_index_list to use for inference.\n lora_dict : Optional[Dict[str, float]], optional\n The lora_dict to load, by default None.\n Keys are the LoRA names and values are the LoRA scales.\n Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}\n mode : Literal[\"img2img\", \"txt2img\"], optional\n txt2img or img2img, by default \"img2img\".\n output_type : Literal[\"pil\", \"pt\", \"np\", \"latent\"], optional\n The output type of image, by default \"pil\".\n lcm_lora_id : Optional[str], optional\n The lcm_lora_id to load, by default None.\n If None, the default LCM-LoRA\n (\"latent-consistency/lcm-lora-sdv1-5\") will be used.\n vae_id : Optional[str], optional\n The vae_id to load, by default None.\n If None, the default TinyVAE\n (\"madebyollin/taesd\") will be used.\n device : Literal[\"cpu\", \"cuda\"], optional\n The device to use for inference, by default \"cuda\".\n dtype : torch.dtype, optional\n The dtype for inference, by default torch.float16.\n frame_buffer_size : int, optional\n The frame buffer size for denoising batch, by default 1.\n width : int, optional\n The width of the image, by default 512.\n height : int, optional\n The height of the image, by default 512.\n warmup : int, optional\n The number of warmup steps to perform, by default 10.\n acceleration : Literal[\"none\", \"xformers\", \"tensorrt\"], optional\n The acceleration method, by default \"tensorrt\".\n do_add_noise : bool, optional\n Whether to add noise for following denoising steps or not,\n by default True.\n device_ids : Optional[List[int]], optional\n The device ids to use for DataParallel, by default None.\n use_lcm_lora : bool, optional\n Whether to use LCM-LoRA or not, by default True.\n use_tiny_vae : bool, optional\n Whether to use TinyVAE or not, by default True.\n enable_similar_image_filter : bool, optional\n Whether to enable similar image filter or not,\n by default False.\n similar_image_filter_threshold : float, optional\n The threshold for similar image filter, by default 0.98.\n similar_image_filter_max_skip_frame : int, optional\n The max skip frame for similar image filter, by default 10.\n use_denoising_batch : bool, optional\n Whether to use denoising batch or not, by default True.\n cfg_type : Literal[\"none\", \"full\", \"self\", \"initialize\"],\n optional\n The cfg_type for img2img mode, by default \"self\".\n You cannot use anything other than \"none\" for txt2img mode.\n seed : int, optional\n The seed, by default 2.\n use_safety_checker : bool, optional\n Whether to use safety checker or not, by default False.\n \"\"\"\n self.sd_turbo = \"turbo\" in model_id_or_path\n\n if mode == \"txt2img\":\n if cfg_type != \"none\":\n raise ValueError(\n f\"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}\"\n )\n if use_denoising_batch and frame_buffer_size > 1:\n if not self.sd_turbo:\n raise ValueError(\n \"txt2img mode cannot use denoising batch with frame_buffer_size > 1.\"\n )\n\n if mode == \"img2img\":\n if not use_denoising_batch:\n raise NotImplementedError(\n \"img2img mode must use denoising batch for now.\"\n )\n\n self.device = device\n self.dtype = dtype\n self.width = width\n self.height = height\n self.mode = mode\n self.output_type = output_type\n self.frame_buffer_size = frame_buffer_size\n self.batch_size = (\n len(t_index_list) * frame_buffer_size\n if use_denoising_batch\n else frame_buffer_size\n )\n\n self.use_denoising_batch = use_denoising_batch\n self.use_safety_checker = use_safety_checker\n\n self.stream: StreamDiffusion = self._load_model(\n model_id_or_path=model_id_or_path,\n lora_dict=lora_dict,\n lcm_lora_id=lcm_lora_id,\n vae_id=vae_id,\n t_index_list=t_index_list,\n acceleration=acceleration,\n warmup=warmup,\n do_add_noise=do_add_noise,\n use_lcm_lora=use_lcm_lora,\n use_tiny_vae=use_tiny_vae,\n cfg_type=cfg_type,\n seed=seed,\n engine_dir=engine_dir,\n )\n\n if device_ids is not None:\n self.stream.unet = torch.nn.DataParallel(\n self.stream.unet, device_ids=device_ids\n )\n\n if enable_similar_image_filter:\n self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)\n\n def prepare(\n self,\n prompt: str,\n negative_prompt: str = \"\",\n num_inference_steps: int = 50,\n guidance_scale: float = 1.2,\n delta: float = 1.0,\n ) -> None:\n \"\"\"\n Prepares the model for inference.\n\n Parameters\n ----------\n prompt : str\n The prompt to generate images from.\n num_inference_steps : int, optional\n The number of inference steps to perform, by default 50.\n guidance_scale : float, optional\n The guidance scale to use, by default 1.2.\n delta : float, optional\n The delta multiplier of virtual residual noise,\n by default 1.0.\n \"\"\"\n self.stream.prepare(\n prompt,\n negative_prompt,\n num_inference_steps=num_inference_steps,\n guidance_scale=guidance_scale,\n delta=delta,\n )\n\n def __call__(\n self,\n image: Optional[Union[str, Image.Image, torch.Tensor]] = None,\n prompt: Optional[str] = None,\n ) -> Union[Image.Image, List[Image.Image]]:\n \"\"\"\n Performs img2img or txt2img based on the mode.\n\n Parameters\n ----------\n image : Optional[Union[str, Image.Image, torch.Tensor]]\n The image to generate from.\n prompt : Optional[str]\n The prompt to generate images from.\n\n Returns\n -------\n Union[Image.Image, List[Image.Image]]\n The generated image.\n \"\"\"\n if self.mode == \"img2img\":\n return self.img2img(image, prompt)\n else:\n return self.txt2img(prompt)\n\n def txt2img(\n self, prompt: Optional[str] = None\n ) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:\n \"\"\"\n Performs txt2img.\n\n Parameters\n ----------\n prompt : Optional[str]\n The prompt to generate images from.\n\n Returns\n -------\n Union[Image.Image, List[Image.Image]]\n The generated image.\n \"\"\"\n if prompt is not None:\n self.stream.update_prompt(prompt)\n\n if self.sd_turbo:\n image_tensor = self.stream.txt2img_sd_turbo(self.batch_size)\n else:\n image_tensor = self.stream.txt2img(self.frame_buffer_size)\n image = self.postprocess_image(image_tensor, output_type=self.output_type)\n\n if self.use_safety_checker:\n safety_checker_input = self.feature_extractor(\n image, return_tensors=\"pt\"\n ).to(self.device)\n _, has_nsfw_concept = self.safety_checker(\n images=image_tensor.to(self.dtype),\n clip_input=safety_checker_input.pixel_values.to(self.dtype),\n )\n image = self.nsfw_fallback_img if has_nsfw_concept[0] else image\n\n return image\n\n def img2img(\n self, image: Union[str, Image.Image, torch.Tensor], prompt: Optional[str] = None\n ) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:\n \"\"\"\n Performs img2img.\n\n Parameters\n ----------\n image : Union[str, Image.Image, torch.Tensor]\n The image to generate from.\n\n Returns\n -------\n Image.Image\n The generated image.\n \"\"\"\n if prompt is not None:\n self.stream.update_prompt(prompt)\n\n if isinstance(image, str) or isinstance(image, Image.Image):\n image = self.preprocess_image(image)\n\n image_tensor = self.stream(image)\n image = self.postprocess_image(image_tensor, output_type=self.output_type)\n\n if self.use_safety_checker:\n safety_checker_input = self.feature_extractor(\n image, return_tensors=\"pt\"\n ).to(self.device)\n _, has_nsfw_concept = self.safety_checker(\n images=image_tensor.to(self.dtype),\n clip_input=safety_checker_input.pixel_values.to(self.dtype),\n )\n image = self.nsfw_fallback_img if has_nsfw_concept[0] else image\n\n return image\n\n def preprocess_image(self, image: Union[str, Image.Image]) -> torch.Tensor:\n \"\"\"\n Preprocesses the image.\n\n Parameters\n ----------\n image : Union[str, Image.Image, torch.Tensor]\n The image to preprocess.\n\n Returns\n -------\n torch.Tensor\n The preprocessed image.\n \"\"\"\n if isinstance(image, str):\n image = Image.open(image).convert(\"RGB\").resize((self.width, self.height))\n if isinstance(image, Image.Image):\n image = image.convert(\"RGB\").resize((self.width, self.height))\n\n return self.stream.image_processor.preprocess(\n image, self.height, self.width\n ).to(device=self.device, dtype=self.dtype)\n\n def postprocess_image(\n self, image_tensor: torch.Tensor, output_type: str = \"pil\"\n ) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:\n \"\"\"\n Postprocesses the image.\n\n Parameters\n ----------\n image_tensor : torch.Tensor\n The image tensor to postprocess.\n\n Returns\n -------\n Union[Image.Image, List[Image.Image]]\n The postprocessed image.\n \"\"\"\n if self.frame_buffer_size > 1:\n return postprocess_image(image_tensor.cpu(), output_type=output_type)\n else:\n return postprocess_image(image_tensor.cpu(), output_type=output_type)[0]\n\n def _load_model(\n self,\n model_id_or_path: str,\n t_index_list: List[int],\n lora_dict: Optional[Dict[str, float]] = None,\n lcm_lora_id: Optional[str] = None,\n vae_id: Optional[str] = None,\n acceleration: Literal[\"none\", \"xformers\", \"tensorrt\"] = \"tensorrt\",\n warmup: int = 10,\n do_add_noise: bool = True,\n use_lcm_lora: bool = True,\n use_tiny_vae: bool = True,\n cfg_type: Literal[\"none\", \"full\", \"self\", \"initialize\"] = \"self\",\n seed: int = 2,\n engine_dir: Optional[Union[str, Path]] = \"engines\",\n ) -> StreamDiffusion:\n \"\"\"\n Loads the model.\n\n This method does the following:\n\n 1. Loads the model from the model_id_or_path.\n 2. Loads and fuses the LCM-LoRA model from the lcm_lora_id if needed.\n 3. Loads the VAE model from the vae_id if needed.\n 4. Enables acceleration if needed.\n 5. Prepares the model for inference.\n 6. Load the safety checker if needed.\n\n Parameters\n ----------\n model_id_or_path : str\n The model id or path to load.\n t_index_list : List[int]\n The t_index_list to use for inference.\n lora_dict : Optional[Dict[str, float]], optional\n The lora_dict to load, by default None.\n Keys are the LoRA names and values are the LoRA scales.\n Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}\n lcm_lora_id : Optional[str], optional\n The lcm_lora_id to load, by default None.\n vae_id : Optional[str], optional\n The vae_id to load, by default None.\n acceleration : Literal[\"none\", \"xfomers\", \"sfast\", \"tensorrt\"], optional\n The acceleration method, by default \"tensorrt\".\n warmup : int, optional\n The number of warmup steps to perform, by default 10.\n do_add_noise : bool, optional\n Whether to add noise for following denoising steps or not,\n by default True.\n use_lcm_lora : bool, optional\n Whether to use LCM-LoRA or not, by default True.\n use_tiny_vae : bool, optional\n Whether to use TinyVAE or not, by default True.\n cfg_type : Literal[\"none\", \"full\", \"self\", \"initialize\"],\n optional\n The cfg_type for img2img mode, by default \"self\".\n You cannot use anything other than \"none\" for txt2img mode.\n seed : int, optional\n The seed, by default 2.\n\n Returns\n -------\n StreamDiffusion\n The loaded model.\n \"\"\"\n\n try: # Load from local directory\n pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(\n model_id_or_path,\n ).to(device=self.device, dtype=self.dtype)\n\n except ValueError: # Load from huggingface\n pipe: StableDiffusionPipeline = StableDiffusionPipeline.from_single_file(\n model_id_or_path,\n ).to(device=self.device, dtype=self.dtype)\n except Exception: # No model found\n traceback.print_exc()\n print(\"Model load has failed. Doesn't exist.\")\n exit()\n\n stream = StreamDiffusion(\n pipe=pipe,\n t_index_list=t_index_list,\n torch_dtype=self.dtype,\n width=self.width,\n height=self.height,\n do_add_noise=do_add_noise,\n frame_buffer_size=self.frame_buffer_size,\n use_denoising_batch=self.use_denoising_batch,\n cfg_type=cfg_type,\n )\n if not self.sd_turbo:\n if use_lcm_lora:\n if lcm_lora_id is not None:\n stream.load_lcm_lora(\n pretrained_model_name_or_path_or_dict=lcm_lora_id\n )\n else:\n stream.load_lcm_lora()\n stream.fuse_lora()\n\n if lora_dict is not None:\n for lora_name, lora_scale in lora_dict.items():\n stream.load_lora(lora_name)\n stream.fuse_lora(lora_scale=lora_scale)\n print(f\"Use LoRA: {lora_name} in weights {lora_scale}\")\n\n if use_tiny_vae:\n if vae_id is not None:\n stream.vae = AutoencoderTiny.from_pretrained(vae_id).to(\n device=pipe.device, dtype=pipe.dtype\n )\n else:\n stream.vae = AutoencoderTiny.from_pretrained(\"madebyollin/taesd\").to(\n device=pipe.device, dtype=pipe.dtype\n )\n\n try:\n if acceleration == \"xformers\":\n stream.pipe.enable_xformers_memory_efficient_attention()\n if acceleration == \"tensorrt\":\n from polygraphy import cuda\n from streamdiffusion.acceleration.tensorrt import (\n TorchVAEEncoder,\n compile_unet,\n compile_vae_decoder,\n compile_vae_encoder,\n )\n from streamdiffusion.acceleration.tensorrt.engine import (\n AutoencoderKLEngine,\n UNet2DConditionModelEngine,\n )\n from streamdiffusion.acceleration.tensorrt.models import (\n VAE,\n UNet,\n VAEEncoder,\n )\n\n def create_prefix(\n model_id_or_path: str,\n max_batch_size: int,\n min_batch_size: int,\n ):\n maybe_path = Path(model_id_or_path)\n if maybe_path.exists():\n return f\"{maybe_path.stem}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}\"\n else:\n return f\"{model_id_or_path}--lcm_lora-{use_lcm_lora}--tiny_vae-{use_tiny_vae}--max_batch-{max_batch_size}--min_batch-{min_batch_size}--mode-{self.mode}\"\n\n engine_dir = Path(engine_dir)\n unet_path = os.path.join(\n engine_dir,\n create_prefix(\n model_id_or_path=model_id_or_path,\n max_batch_size=stream.trt_unet_batch_size,\n min_batch_size=stream.trt_unet_batch_size,\n ),\n \"unet.engine\",\n )\n vae_encoder_path = os.path.join(\n engine_dir,\n create_prefix(\n model_id_or_path=model_id_or_path,\n max_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n min_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n ),\n \"vae_encoder.engine\",\n )\n vae_decoder_path = os.path.join(\n engine_dir,\n create_prefix(\n model_id_or_path=model_id_or_path,\n max_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n min_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n ),\n \"vae_decoder.engine\",\n )\n\n if not os.path.exists(unet_path):\n os.makedirs(os.path.dirname(unet_path), exist_ok=True)\n unet_model = UNet(\n fp16=True,\n device=stream.device,\n max_batch_size=stream.trt_unet_batch_size,\n min_batch_size=stream.trt_unet_batch_size,\n embedding_dim=stream.text_encoder.config.hidden_size,\n unet_dim=stream.unet.config.in_channels,\n )\n compile_unet(\n stream.unet,\n unet_model,\n unet_path + \".onnx\",\n unet_path + \".opt.onnx\",\n unet_path,\n opt_batch_size=stream.trt_unet_batch_size,\n )\n\n if not os.path.exists(vae_decoder_path):\n os.makedirs(os.path.dirname(vae_decoder_path), exist_ok=True)\n stream.vae.forward = stream.vae.decode\n vae_decoder_model = VAE(\n device=stream.device,\n max_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n min_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n )\n compile_vae_decoder(\n stream.vae,\n vae_decoder_model,\n vae_decoder_path + \".onnx\",\n vae_decoder_path + \".opt.onnx\",\n vae_decoder_path,\n opt_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n )\n delattr(stream.vae, \"forward\")\n\n if not os.path.exists(vae_encoder_path):\n os.makedirs(os.path.dirname(vae_encoder_path), exist_ok=True)\n vae_encoder = TorchVAEEncoder(stream.vae).to(torch.device(\"cuda\"))\n vae_encoder_model = VAEEncoder(\n device=stream.device,\n max_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n min_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n )\n compile_vae_encoder(\n vae_encoder,\n vae_encoder_model,\n vae_encoder_path + \".onnx\",\n vae_encoder_path + \".opt.onnx\",\n vae_encoder_path,\n opt_batch_size=self.batch_size\n if self.mode == \"txt2img\"\n else stream.frame_bff_size,\n )\n\n cuda_steram = cuda.Stream()\n\n vae_config = stream.vae.config\n vae_dtype = stream.vae.dtype\n\n stream.unet = UNet2DConditionModelEngine(\n unet_path, cuda_steram, use_cuda_graph=False\n )\n stream.vae = AutoencoderKLEngine(\n vae_encoder_path,\n vae_decoder_path,\n cuda_steram,\n stream.pipe.vae_scale_factor,\n use_cuda_graph=False,\n )\n setattr(stream.vae, \"config\", vae_config)\n setattr(stream.vae, \"dtype\", vae_dtype)\n\n gc.collect()\n torch.cuda.empty_cache()\n\n print(\"TensorRT acceleration enabled.\")\n if acceleration == \"sfast\":\n from streamdiffusion.acceleration.sfast import (\n accelerate_with_stable_fast,\n )\n\n stream = accelerate_with_stable_fast(stream)\n print(\"StableFast acceleration enabled.\")\n except Exception:\n traceback.print_exc()\n print(\"Acceleration has failed. Falling back to normal mode.\")\n\n if seed < 0: # Random seed\n seed = np.random.randint(0, 1000000)\n\n stream.prepare(\n \"\",\n \"\",\n num_inference_steps=50,\n guidance_scale=1.1\n if stream.cfg_type in [\"full\", \"self\", \"initialize\"]\n else 1.0,\n generator=torch.manual_seed(seed),\n seed=seed,\n )\n\n if self.use_safety_checker:\n from transformers import CLIPFeatureExtractor\n from diffusers.pipelines.stable_diffusion.safety_checker import (\n StableDiffusionSafetyChecker,\n )\n\n self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(\n \"CompVis/stable-diffusion-safety-checker\"\n ).to(pipe.device)\n self.feature_extractor = CLIPFeatureExtractor.from_pretrained(\n \"openai/clip-vit-base-patch32\"\n )\n self.nsfw_fallback_img = Image.new(\"RGB\", (512, 512), (0, 0, 0))\n\n return stream"
}
] | import os
import sys
import time
import threading
import torch
import PIL.Image
import mss
import fire
import tkinter as tk
from multiprocessing import Process, Queue, get_context
from multiprocessing.connection import Connection
from typing import List, Literal, Dict, Optional
from streamdiffusion.image_utils import pil2tensor
from utils.viewer import receive_images
from utils.wrapper import StreamDiffusionWrapper | 8,251 | use_denoising_batch=use_denoising_batch,
cfg_type=cfg_type,
seed=seed,
)
stream.prepare(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=50,
guidance_scale=guidance_scale,
delta=delta,
)
monitor = monitor_receiver.recv()
event = threading.Event()
input_screen = threading.Thread(target=screen, args=(event, height, width, monitor))
input_screen.start()
time.sleep(5)
while True:
try:
if not close_queue.empty(): # closing check
break
if len(inputs) < frame_buffer_size:
time.sleep(0.005)
continue
start_time = time.time()
sampled_inputs = []
for i in range(frame_buffer_size):
index = (len(inputs) // frame_buffer_size) * i
sampled_inputs.append(inputs[len(inputs) - index - 1])
input_batch = torch.cat(sampled_inputs)
inputs.clear()
output_images = stream.stream(
input_batch.to(device=stream.device, dtype=stream.dtype)
).cpu()
if frame_buffer_size == 1:
output_images = [output_images]
for output_image in output_images:
queue.put(output_image, block=False)
fps = 1 / (time.time() - start_time)
fps_queue.put(fps)
except KeyboardInterrupt:
break
print("closing image_generation_process...")
event.set() # stop capture thread
input_screen.join()
print(f"fps: {fps}")
def main(
model_id_or_path: str = "KBlueLeaf/kohaku-v2.1",
lora_dict: Optional[Dict[str, float]] = None,
prompt: str = "1girl with brown dog hair, thick glasses, smiling",
negative_prompt: str = "low quality, bad quality, blurry, low resolution",
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
acceleration: Literal["none", "xformers", "tensorrt"] = "xformers",
use_denoising_batch: bool = True,
seed: int = 2,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
guidance_scale: float = 1.4,
delta: float = 0.5,
do_add_noise: bool = False,
enable_similar_image_filter: bool = True,
similar_image_filter_threshold: float = 0.99,
similar_image_filter_max_skip_frame: float = 10,
) -> None:
"""
Main function to start the image generation and viewer processes.
"""
ctx = get_context('spawn')
queue = ctx.Queue()
fps_queue = ctx.Queue()
close_queue = Queue()
monitor_sender, monitor_receiver = ctx.Pipe()
process1 = ctx.Process(
target=image_generation_process,
args=(
queue,
fps_queue,
close_queue,
model_id_or_path,
lora_dict,
prompt,
negative_prompt,
frame_buffer_size,
width,
height,
acceleration,
use_denoising_batch,
seed,
cfg_type,
guidance_scale,
delta,
do_add_noise,
enable_similar_image_filter,
similar_image_filter_threshold,
similar_image_filter_max_skip_frame,
monitor_receiver,
),
)
process1.start()
monitor_process = ctx.Process(
target=monitor_setting_process,
args=(
width,
height,
monitor_sender,
),
)
monitor_process.start()
monitor_process.join()
|
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
inputs = []
top = 0
left = 0
def screen(
event: threading.Event,
height: int = 512,
width: int = 512,
monitor: Dict[str, int] = {"top": 300, "left": 200, "width": 512, "height": 512},
):
global inputs
with mss.mss() as sct:
while True:
if event.is_set():
print("terminate read thread")
break
img = sct.grab(monitor)
img = PIL.Image.frombytes("RGB", img.size, img.bgra, "raw", "BGRX")
img.resize((height, width))
inputs.append(pil2tensor(img))
print('exit : screen')
def dummy_screen(
width: int,
height: int,
):
root = tk.Tk()
root.title("Press Enter to start")
root.geometry(f"{width}x{height}")
root.resizable(False, False)
root.attributes("-alpha", 0.8)
root.configure(bg="black")
def destroy(event):
root.destroy()
root.bind("<Return>", destroy)
def update_geometry(event):
global top, left
top = root.winfo_y()
left = root.winfo_x()
root.bind("<Configure>", update_geometry)
root.mainloop()
return {"top": top, "left": left, "width": width, "height": height}
def monitor_setting_process(
width: int,
height: int,
monitor_sender: Connection,
) -> None:
monitor = dummy_screen(width, height)
monitor_sender.send(monitor)
def image_generation_process(
queue: Queue,
fps_queue: Queue,
close_queue: Queue,
model_id_or_path: str,
lora_dict: Optional[Dict[str, float]],
prompt: str,
negative_prompt: str,
frame_buffer_size: int,
width: int,
height: int,
acceleration: Literal["none", "xformers", "tensorrt"],
use_denoising_batch: bool,
seed: int,
cfg_type: Literal["none", "full", "self", "initialize"],
guidance_scale: float,
delta: float,
do_add_noise: bool,
enable_similar_image_filter: bool,
similar_image_filter_threshold: float,
similar_image_filter_max_skip_frame: float,
monitor_receiver : Connection,
) -> None:
"""
Process for generating images based on a prompt using a specified model.
Parameters
----------
queue : Queue
The queue to put the generated images in.
fps_queue : Queue
The queue to put the calculated fps.
model_id_or_path : str
The name of the model to use for image generation.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
prompt : str
The prompt to generate images from.
negative_prompt : str, optional
The negative prompt to use.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
seed : int, optional
The seed, by default 2. if -1, use random seed.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
guidance_scale : float, optional
The CFG scale, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
"""
global inputs
stream = StreamDiffusionWrapper(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
t_index_list=[32, 45],
frame_buffer_size=frame_buffer_size,
width=width,
height=height,
warmup=10,
acceleration=acceleration,
do_add_noise=do_add_noise,
enable_similar_image_filter=enable_similar_image_filter,
similar_image_filter_threshold=similar_image_filter_threshold,
similar_image_filter_max_skip_frame=similar_image_filter_max_skip_frame,
mode="img2img",
use_denoising_batch=use_denoising_batch,
cfg_type=cfg_type,
seed=seed,
)
stream.prepare(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=50,
guidance_scale=guidance_scale,
delta=delta,
)
monitor = monitor_receiver.recv()
event = threading.Event()
input_screen = threading.Thread(target=screen, args=(event, height, width, monitor))
input_screen.start()
time.sleep(5)
while True:
try:
if not close_queue.empty(): # closing check
break
if len(inputs) < frame_buffer_size:
time.sleep(0.005)
continue
start_time = time.time()
sampled_inputs = []
for i in range(frame_buffer_size):
index = (len(inputs) // frame_buffer_size) * i
sampled_inputs.append(inputs[len(inputs) - index - 1])
input_batch = torch.cat(sampled_inputs)
inputs.clear()
output_images = stream.stream(
input_batch.to(device=stream.device, dtype=stream.dtype)
).cpu()
if frame_buffer_size == 1:
output_images = [output_images]
for output_image in output_images:
queue.put(output_image, block=False)
fps = 1 / (time.time() - start_time)
fps_queue.put(fps)
except KeyboardInterrupt:
break
print("closing image_generation_process...")
event.set() # stop capture thread
input_screen.join()
print(f"fps: {fps}")
def main(
model_id_or_path: str = "KBlueLeaf/kohaku-v2.1",
lora_dict: Optional[Dict[str, float]] = None,
prompt: str = "1girl with brown dog hair, thick glasses, smiling",
negative_prompt: str = "low quality, bad quality, blurry, low resolution",
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
acceleration: Literal["none", "xformers", "tensorrt"] = "xformers",
use_denoising_batch: bool = True,
seed: int = 2,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
guidance_scale: float = 1.4,
delta: float = 0.5,
do_add_noise: bool = False,
enable_similar_image_filter: bool = True,
similar_image_filter_threshold: float = 0.99,
similar_image_filter_max_skip_frame: float = 10,
) -> None:
"""
Main function to start the image generation and viewer processes.
"""
ctx = get_context('spawn')
queue = ctx.Queue()
fps_queue = ctx.Queue()
close_queue = Queue()
monitor_sender, monitor_receiver = ctx.Pipe()
process1 = ctx.Process(
target=image_generation_process,
args=(
queue,
fps_queue,
close_queue,
model_id_or_path,
lora_dict,
prompt,
negative_prompt,
frame_buffer_size,
width,
height,
acceleration,
use_denoising_batch,
seed,
cfg_type,
guidance_scale,
delta,
do_add_noise,
enable_similar_image_filter,
similar_image_filter_threshold,
similar_image_filter_max_skip_frame,
monitor_receiver,
),
)
process1.start()
monitor_process = ctx.Process(
target=monitor_setting_process,
args=(
width,
height,
monitor_sender,
),
)
monitor_process.start()
monitor_process.join()
| process2 = ctx.Process(target=receive_images, args=(queue, fps_queue)) | 0 | 2023-11-28 13:40:30+00:00 | 12k |
zhyever/PatchFusion | ui_prediction.py | [
{
"identifier": "parse_unknown",
"path": "zoedepth/utils/arg_utils.py",
"snippet": "def parse_unknown(unknown_args):\n clean = []\n for a in unknown_args:\n if \"=\" in a:\n k, v = a.split(\"=\")\n clean.extend([k, v])\n else:\n clean.append(a)\n\n keys = clean[::2]\n values = clean[1::2]\n return {k.replace(\"--\", \"\"): infer_type(v) for k, v in zip(keys, values)}"
},
{
"identifier": "build_model",
"path": "zoedepth/models/builder.py",
"snippet": "def build_model(config) -> DepthModel:\n \"\"\"Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.\n This function should be used to construct models for training and evaluation.\n\n Args:\n config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.\n\n Returns:\n torch.nn.Module: Model corresponding to name and version as specified in config\n \"\"\"\n module_name = f\"zoedepth.models.{config.model}\"\n try:\n module = import_module(module_name)\n except ModuleNotFoundError as e:\n # print the original error message\n print(e)\n raise ValueError(\n f\"Model {config.model} not found. Refer above error for details.\") from e\n try:\n get_version = getattr(module, \"get_version\")\n except AttributeError as e:\n raise ValueError(\n f\"Model {config.model} has no get_version function.\") from e\n return get_version(config.version_name).build_from_config(config)"
},
{
"identifier": "get_config_user",
"path": "zoedepth/utils/config.py",
"snippet": "def get_config_user(model_name, mode='infer', model_cfg_path=None, **overwrite_kwargs):\n \"\"\"Main entry point to get the config for the model.\n\n Args:\n model_name (str): name of the desired model.\n mode (str, optional): \"train\" or \"infer\". Defaults to 'train'.\n dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.\n \n Keyword Args: key-value pairs of arguments to overwrite the default config.\n\n The order of precedence for overwriting the config is (Higher precedence first):\n # 1. overwrite_kwargs\n # 2. \"config_version\": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json\n # 3. \"version_name\": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json\n # 4. common_config: Default config for all models specified in COMMON_CONFIG\n\n Returns:\n easydict: The config dictionary for the model.\n \"\"\"\n\n check_choices(\"Model\", model_name, [\"zoedepth\", \"zoedepth_nk\", \"zoedepth_custom\"])\n check_choices(\"Mode\", mode, [\"train\", \"infer\", \"eval\"])\n \n config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG})\n config = update_model_config(config, mode, model_name, model_cfg_path=model_cfg_path)\n\n # update with model version specific config\n version_name = overwrite_kwargs.get(\"version_name\", config[\"version_name\"])\n config = update_model_config(config, mode, model_name, version_name, model_cfg_path=model_cfg_path)\n\n # update with config version if specified\n config_version = overwrite_kwargs.get(\"config_version\", None)\n if config_version is not None:\n print(\"Overwriting config with config_version\", config_version)\n config = update_model_config(config, mode, model_name, config_version, model_cfg_path=model_cfg_path)\n\n # update with overwrite_kwargs\n # Combined args are useful for hyperparameter search\n overwrite_kwargs = split_combined_args(overwrite_kwargs)\n config = {**config, **overwrite_kwargs}\n\n # Casting to bool # TODO: Not necessary. Remove and test\n for key in KEYS_TYPE_BOOL:\n if key in config:\n config[key] = bool(config[key])\n\n # Model specific post processing of config\n parse_list(config, \"n_attractors\")\n\n # adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs\n if 'bin_conf' in config and 'n_bins' in overwrite_kwargs:\n bin_conf = config['bin_conf'] # list of dicts\n n_bins = overwrite_kwargs['n_bins']\n new_bin_conf = []\n for conf in bin_conf:\n conf['n_bins'] = n_bins\n new_bin_conf.append(conf)\n config['bin_conf'] = new_bin_conf\n\n config['model'] = model_name\n typed_config = {k: infer_type(v) for k, v in config.items()}\n # add hostname to config\n config['hostname'] = platform.node()\n return edict(typed_config)"
},
{
"identifier": "regular_tile_param",
"path": "infer_user.py",
"snippet": "def regular_tile_param(model, image, offset_x=0, offset_y=0, img_lr=None, iter_pred=None, boundary=0, update=False, avg_depth_map=None, blr_mask=False, crop_size=None,\n img_resolution=None, transform=None):\n # crop size\n # height = 540\n # width = 960\n height = crop_size[0]\n width = crop_size[1]\n\n assert offset_x >= 0 and offset_y >= 0\n \n tile_num_x = (img_resolution[1] - offset_x) // width\n tile_num_y = (img_resolution[0] - offset_y) // height\n x_start = [width * x + offset_x for x in range(tile_num_x)]\n y_start = [height * y + offset_y for y in range(tile_num_y)]\n imgs_crop = []\n crop_areas = []\n bboxs_roi = []\n bboxs_raw = []\n\n if iter_pred is not None:\n iter_pred = iter_pred.unsqueeze(dim=0).unsqueeze(dim=0)\n\n iter_priors = []\n for x in x_start: # w\n for y in y_start: # h\n bbox = (int(y), int(y+height), int(x), int(x+width))\n img_crop, crop_area = crop(image, bbox)\n imgs_crop.append(img_crop)\n crop_areas.append(crop_area)\n crop_y1, crop_y2, crop_x1, crop_x2 = bbox\n bbox_roi = torch.tensor([crop_x1 / img_resolution[1] * 512, crop_y1 / img_resolution[0] * 384, crop_x2 / img_resolution[1] * 512, crop_y2 / img_resolution[0] * 384])\n bboxs_roi.append(bbox_roi)\n bbox_raw = torch.tensor([crop_x1, crop_y1, crop_x2, crop_y2]) \n bboxs_raw.append(bbox_raw)\n\n if iter_pred is not None:\n iter_prior, _ = crop(iter_pred, bbox)\n iter_priors.append(iter_prior)\n\n crop_areas = torch.cat(crop_areas, dim=0)\n imgs_crop = torch.cat(imgs_crop, dim=0)\n bboxs_roi = torch.stack(bboxs_roi, dim=0)\n bboxs_raw = torch.stack(bboxs_raw, dim=0)\n\n if iter_pred is not None:\n iter_priors = torch.cat(iter_priors, dim=0)\n iter_priors = transform(iter_priors)\n iter_priors = iter_priors.cuda().float()\n\n crop_areas = transform(crop_areas)\n imgs_crop = transform(imgs_crop)\n\n imgs_crop = imgs_crop.cuda().float()\n bboxs_roi = bboxs_roi.cuda().float()\n crop_areas = crop_areas.cuda().float()\n img_lr = img_lr.cuda().float()\n \n pred_depth_crops = []\n with torch.no_grad():\n for i, (img, bbox, crop_area) in enumerate(zip(imgs_crop, bboxs_roi, crop_areas)):\n\n if iter_pred is not None:\n iter_prior = iter_priors[i].unsqueeze(dim=0)\n else:\n iter_prior = None\n\n if i == 0:\n out_dict = model(img.unsqueeze(dim=0), mode='eval', image_raw=img_lr, bbox=bbox.unsqueeze(dim=0), crop_area=crop_area.unsqueeze(dim=0), iter_prior=iter_prior if update is True else None)\n whole_depth_pred = out_dict['coarse_depth_pred']\n # return whole_depth_pred.squeeze()\n # pred_depth_crop = out_dict['fine_depth_pred']\n pred_depth_crop = out_dict['metric_depth']\n else:\n pred_depth_crop = model(img.unsqueeze(dim=0), mode='eval', image_raw=img_lr, bbox=bbox.unsqueeze(dim=0), crop_area=crop_area.unsqueeze(dim=0), iter_prior=iter_prior if update is True else None)['metric_depth']\n # pred_depth_crop = model(img.unsqueeze(dim=0), mode='eval', image_raw=img_lr, bbox=bbox.unsqueeze(dim=0), crop_area=crop_area.unsqueeze(dim=0), iter_prior=iter_prior if update is True else None)['fine_depth_pred']\n\n\n pred_depth_crop = nn.functional.interpolate(\n pred_depth_crop, (height, width), mode='bilinear', align_corners=True)\n # pred_depth_crop = nn.functional.interpolate(\n # pred_depth_crop, (height, width), mode='nearest')\n pred_depth_crops.append(pred_depth_crop.squeeze())\n\n whole_depth_pred = whole_depth_pred.squeeze()\n whole_depth_pred = nn.functional.interpolate(whole_depth_pred.unsqueeze(dim=0).unsqueeze(dim=0), img_resolution, mode='bilinear', align_corners=True).squeeze()\n\n ####### stich part\n inner_idx = 0\n init_flag = False\n if offset_x == 0 and offset_y == 0:\n init_flag = True\n # pred_depth = whole_depth_pred\n pred_depth = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n else:\n iter_pred = iter_pred.squeeze()\n pred_depth = iter_pred\n\n blur_mask = generatemask((height, width)) + 1e-3\n count_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n\n for ii, x in enumerate(x_start):\n for jj, y in enumerate(y_start):\n if init_flag:\n # pred_depth[y: y+height, x: x+width] = blur_mask * pred_depth_crops[inner_idx] + (1 - blur_mask) * crop_temp\n # pred_depth[y: y+height, x: x+width] = blur_mask * pred_depth_crops[inner_idx] + (1 - blur_mask) * crop_temp\n blur_mask = torch.tensor(blur_mask, device=whole_depth_pred.device)\n count_map[y: y+height, x: x+width] = blur_mask\n pred_depth[y: y+height, x: x+width] = pred_depth_crops[inner_idx] * blur_mask\n\n else:\n # ensemble with running mean\n if blr_mask:\n blur_mask = torch.tensor(blur_mask, device=whole_depth_pred.device)\n count_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n count_map[y: y+height, x: x+width] = blur_mask\n pred_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n pred_map[y: y+height, x: x+width] = pred_depth_crops[inner_idx] * blur_mask\n avg_depth_map.update(pred_map, count_map)\n else:\n if boundary != 0:\n count_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n count_map[y+boundary: y+height-boundary, x+boundary: x+width-boundary] = 1\n pred_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n pred_map[y+boundary: y+height-boundary, x+boundary: x+width-boundary] = pred_depth_crops[inner_idx][boundary:-boundary, boundary:-boundary] \n avg_depth_map.update(pred_map, count_map)\n else:\n count_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n count_map[y: y+height, x: x+width] = 1\n pred_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n pred_map[y: y+height, x: x+width] = pred_depth_crops[inner_idx]\n avg_depth_map.update(pred_map, count_map)\n\n\n inner_idx += 1\n\n if init_flag:\n avg_depth_map = RunningAverageMap(pred_depth, count_map)\n # blur_mask = generatemask_coarse(img_resolution)\n # blur_mask = torch.tensor(blur_mask, device=whole_depth_pred.device)\n # count_map = (1 - blur_mask)\n # pred_map = whole_depth_pred * (1 - blur_mask)\n # avg_depth_map.update(pred_map, count_map)\n return avg_depth_map"
},
{
"identifier": "random_tile_param",
"path": "infer_user.py",
"snippet": "def random_tile_param(model, image, img_lr=None, iter_pred=None, boundary=0, update=False, avg_depth_map=None, blr_mask=False, crop_size=None,\n img_resolution=None, transform=None):\n height = crop_size[0]\n width = crop_size[1]\n \n \n x_start = [random.randint(0, img_resolution[1] - width - 1)]\n y_start = [random.randint(0, img_resolution[0] - height - 1)]\n \n imgs_crop = []\n crop_areas = []\n bboxs_roi = []\n bboxs_raw = []\n\n if iter_pred is not None:\n iter_pred = iter_pred.unsqueeze(dim=0).unsqueeze(dim=0)\n\n iter_priors = []\n for x in x_start: # w\n for y in y_start: # h\n bbox = (int(y), int(y+height), int(x), int(x+width))\n img_crop, crop_area = crop(image, bbox)\n imgs_crop.append(img_crop)\n crop_areas.append(crop_area)\n crop_y1, crop_y2, crop_x1, crop_x2 = bbox\n bbox_roi = torch.tensor([crop_x1 / img_resolution[1] * 512, crop_y1 / img_resolution[0] * 384, crop_x2 / img_resolution[1] * 512, crop_y2 / img_resolution[0] * 384])\n bboxs_roi.append(bbox_roi)\n bbox_raw = torch.tensor([crop_x1, crop_y1, crop_x2, crop_y2]) \n bboxs_raw.append(bbox_raw)\n\n if iter_pred is not None:\n iter_prior, _ = crop(iter_pred, bbox)\n iter_priors.append(iter_prior)\n\n crop_areas = torch.cat(crop_areas, dim=0)\n imgs_crop = torch.cat(imgs_crop, dim=0)\n bboxs_roi = torch.stack(bboxs_roi, dim=0)\n bboxs_raw = torch.stack(bboxs_raw, dim=0)\n\n if iter_pred is not None:\n iter_priors = torch.cat(iter_priors, dim=0)\n iter_priors = transform(iter_priors)\n iter_priors = iter_priors.cuda().float()\n\n crop_areas = transform(crop_areas)\n imgs_crop = transform(imgs_crop)\n \n imgs_crop = imgs_crop.cuda().float()\n bboxs_roi = bboxs_roi.cuda().float()\n crop_areas = crop_areas.cuda().float()\n img_lr = img_lr.cuda().float()\n \n pred_depth_crops = []\n with torch.no_grad():\n for i, (img, bbox, crop_area) in enumerate(zip(imgs_crop, bboxs_roi, crop_areas)):\n\n if iter_pred is not None:\n iter_prior = iter_priors[i].unsqueeze(dim=0)\n else:\n iter_prior = None\n\n if i == 0:\n out_dict = model(img.unsqueeze(dim=0), mode='eval', image_raw=img_lr, bbox=bbox.unsqueeze(dim=0), crop_area=crop_area.unsqueeze(dim=0), iter_prior=iter_prior if update is True else None)\n whole_depth_pred = out_dict['coarse_depth_pred']\n pred_depth_crop = out_dict['metric_depth']\n # return whole_depth_pred.squeeze()\n else:\n pred_depth_crop = model(img.unsqueeze(dim=0), mode='eval', image_raw=img_lr, bbox=bbox.unsqueeze(dim=0), crop_area=crop_area.unsqueeze(dim=0), iter_prior=iter_prior if update is True else None)['metric_depth']\n\n\n pred_depth_crop = nn.functional.interpolate(\n pred_depth_crop, (height, width), mode='bilinear', align_corners=True)\n # pred_depth_crop = nn.functional.interpolate(\n # pred_depth_crop, (height, width), mode='nearest')\n pred_depth_crops.append(pred_depth_crop.squeeze())\n\n whole_depth_pred = whole_depth_pred.squeeze()\n\n ####### stich part\n inner_idx = 0\n init_flag = False\n iter_pred = iter_pred.squeeze()\n pred_depth = iter_pred\n\n blur_mask = generatemask((height, width)) + 1e-3\n for ii, x in enumerate(x_start):\n for jj, y in enumerate(y_start):\n if init_flag:\n # wont be here\n crop_temp = copy.deepcopy(whole_depth_pred[y: y+height, x: x+width])\n blur_mask = torch.ones((height, width))\n blur_mask = torch.tensor(blur_mask, device=whole_depth_pred.device)\n pred_depth[y: y+height, x: x+width] = blur_mask * pred_depth_crops[inner_idx]+ (1 - blur_mask) * crop_temp\n else:\n\n if blr_mask:\n blur_mask = torch.tensor(blur_mask, device=whole_depth_pred.device)\n count_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n count_map[y: y+height, x: x+width] = blur_mask\n pred_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n pred_map[y: y+height, x: x+width] = pred_depth_crops[inner_idx] * blur_mask\n avg_depth_map.update(pred_map, count_map)\n else:\n # ensemble with running mean\n if boundary != 0:\n count_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n count_map[y+boundary: y+height-boundary, x+boundary: x+width-boundary] = 1\n pred_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n pred_map[y+boundary: y+height-boundary, x+boundary: x+width-boundary] = pred_depth_crops[inner_idx][boundary:-boundary, boundary:-boundary] \n avg_depth_map.update(pred_map, count_map)\n else:\n count_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n count_map[y: y+height, x: x+width] = 1\n pred_map = torch.zeros(img_resolution, device=pred_depth_crops[inner_idx].device)\n pred_map[y: y+height, x: x+width] = pred_depth_crops[inner_idx]\n avg_depth_map.update(pred_map, count_map)\n\n inner_idx += 1\n\n if avg_depth_map is None:\n return pred_depth"
},
{
"identifier": "Resize",
"path": "zoedepth/models/base_models/midas.py",
"snippet": "class Resize(object):\n \"\"\"Resize sample to given size (width, height).\n \"\"\"\n\n def __init__(\n self,\n width,\n height,\n resize_target=True,\n keep_aspect_ratio=False,\n ensure_multiple_of=1,\n resize_method=\"lower_bound\",\n ):\n \"\"\"Init.\n Args:\n width (int): desired output width\n height (int): desired output height\n resize_target (bool, optional):\n True: Resize the full sample (image, mask, target).\n False: Resize image only.\n Defaults to True.\n keep_aspect_ratio (bool, optional):\n True: Keep the aspect ratio of the input sample.\n Output sample might not have the given width and height, and\n resize behaviour depends on the parameter 'resize_method'.\n Defaults to False.\n ensure_multiple_of (int, optional):\n Output width and height is constrained to be multiple of this parameter.\n Defaults to 1.\n resize_method (str, optional):\n \"lower_bound\": Output will be at least as large as the given size.\n \"upper_bound\": Output will be at max as large as the given size. (Output size might be smaller than given size.)\n \"minimal\": Scale as least as possible. (Output size might be smaller than given size.)\n Defaults to \"lower_bound\".\n \"\"\"\n print(\"Params passed to Resize transform:\")\n print(\"\\twidth: \", width)\n print(\"\\theight: \", height)\n print(\"\\tresize_target: \", resize_target)\n print(\"\\tkeep_aspect_ratio: \", keep_aspect_ratio)\n print(\"\\tensure_multiple_of: \", ensure_multiple_of)\n print(\"\\tresize_method: \", resize_method)\n\n self.__width = width\n self.__height = height\n\n self.__keep_aspect_ratio = keep_aspect_ratio\n self.__multiple_of = ensure_multiple_of\n self.__resize_method = resize_method\n\n def constrain_to_multiple_of(self, x, min_val=0, max_val=None):\n y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)\n\n if max_val is not None and y > max_val:\n y = (np.floor(x / self.__multiple_of)\n * self.__multiple_of).astype(int)\n\n if y < min_val:\n y = (np.ceil(x / self.__multiple_of)\n * self.__multiple_of).astype(int)\n\n return y\n\n def get_size(self, width, height):\n # determine new height and width\n scale_height = self.__height / height\n scale_width = self.__width / width\n\n if self.__keep_aspect_ratio:\n if self.__resize_method == \"lower_bound\":\n # scale such that output size is lower bound\n if scale_width > scale_height:\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n elif self.__resize_method == \"upper_bound\":\n # scale such that output size is upper bound\n if scale_width < scale_height:\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n elif self.__resize_method == \"minimal\":\n # scale as least as possbile\n if abs(1 - scale_width) < abs(1 - scale_height):\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n else:\n raise ValueError(\n f\"resize_method {self.__resize_method} not implemented\"\n )\n\n if self.__resize_method == \"lower_bound\":\n new_height = self.constrain_to_multiple_of(\n scale_height * height, min_val=self.__height\n )\n new_width = self.constrain_to_multiple_of(\n scale_width * width, min_val=self.__width\n )\n elif self.__resize_method == \"upper_bound\":\n new_height = self.constrain_to_multiple_of(\n scale_height * height, max_val=self.__height\n )\n new_width = self.constrain_to_multiple_of(\n scale_width * width, max_val=self.__width\n )\n elif self.__resize_method == \"minimal\":\n new_height = self.constrain_to_multiple_of(scale_height * height)\n new_width = self.constrain_to_multiple_of(scale_width * width)\n else:\n raise ValueError(\n f\"resize_method {self.__resize_method} not implemented\")\n\n return (new_width, new_height)\n\n def __call__(self, x):\n width, height = self.get_size(*x.shape[-2:][::-1])\n return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True)"
},
{
"identifier": "Resize",
"path": "zoedepth/models/base_models/midas.py",
"snippet": "class Resize(object):\n \"\"\"Resize sample to given size (width, height).\n \"\"\"\n\n def __init__(\n self,\n width,\n height,\n resize_target=True,\n keep_aspect_ratio=False,\n ensure_multiple_of=1,\n resize_method=\"lower_bound\",\n ):\n \"\"\"Init.\n Args:\n width (int): desired output width\n height (int): desired output height\n resize_target (bool, optional):\n True: Resize the full sample (image, mask, target).\n False: Resize image only.\n Defaults to True.\n keep_aspect_ratio (bool, optional):\n True: Keep the aspect ratio of the input sample.\n Output sample might not have the given width and height, and\n resize behaviour depends on the parameter 'resize_method'.\n Defaults to False.\n ensure_multiple_of (int, optional):\n Output width and height is constrained to be multiple of this parameter.\n Defaults to 1.\n resize_method (str, optional):\n \"lower_bound\": Output will be at least as large as the given size.\n \"upper_bound\": Output will be at max as large as the given size. (Output size might be smaller than given size.)\n \"minimal\": Scale as least as possible. (Output size might be smaller than given size.)\n Defaults to \"lower_bound\".\n \"\"\"\n print(\"Params passed to Resize transform:\")\n print(\"\\twidth: \", width)\n print(\"\\theight: \", height)\n print(\"\\tresize_target: \", resize_target)\n print(\"\\tkeep_aspect_ratio: \", keep_aspect_ratio)\n print(\"\\tensure_multiple_of: \", ensure_multiple_of)\n print(\"\\tresize_method: \", resize_method)\n\n self.__width = width\n self.__height = height\n\n self.__keep_aspect_ratio = keep_aspect_ratio\n self.__multiple_of = ensure_multiple_of\n self.__resize_method = resize_method\n\n def constrain_to_multiple_of(self, x, min_val=0, max_val=None):\n y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)\n\n if max_val is not None and y > max_val:\n y = (np.floor(x / self.__multiple_of)\n * self.__multiple_of).astype(int)\n\n if y < min_val:\n y = (np.ceil(x / self.__multiple_of)\n * self.__multiple_of).astype(int)\n\n return y\n\n def get_size(self, width, height):\n # determine new height and width\n scale_height = self.__height / height\n scale_width = self.__width / width\n\n if self.__keep_aspect_ratio:\n if self.__resize_method == \"lower_bound\":\n # scale such that output size is lower bound\n if scale_width > scale_height:\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n elif self.__resize_method == \"upper_bound\":\n # scale such that output size is upper bound\n if scale_width < scale_height:\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n elif self.__resize_method == \"minimal\":\n # scale as least as possbile\n if abs(1 - scale_width) < abs(1 - scale_height):\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n else:\n raise ValueError(\n f\"resize_method {self.__resize_method} not implemented\"\n )\n\n if self.__resize_method == \"lower_bound\":\n new_height = self.constrain_to_multiple_of(\n scale_height * height, min_val=self.__height\n )\n new_width = self.constrain_to_multiple_of(\n scale_width * width, min_val=self.__width\n )\n elif self.__resize_method == \"upper_bound\":\n new_height = self.constrain_to_multiple_of(\n scale_height * height, max_val=self.__height\n )\n new_width = self.constrain_to_multiple_of(\n scale_width * width, max_val=self.__width\n )\n elif self.__resize_method == \"minimal\":\n new_height = self.constrain_to_multiple_of(scale_height * height)\n new_width = self.constrain_to_multiple_of(scale_width * width)\n else:\n raise ValueError(\n f\"resize_method {self.__resize_method} not implemented\")\n\n return (new_width, new_height)\n\n def __call__(self, x):\n width, height = self.get_size(*x.shape[-2:][::-1])\n return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True)"
},
{
"identifier": "depth_to_points",
"path": "zoedepth/utils/geometry.py",
"snippet": "def depth_to_points(depth, R=None, t=None, fov=55):\n\n K = get_intrinsics(depth.shape[1], depth.shape[2], fov=fov)\n Kinv = np.linalg.inv(K)\n if R is None:\n R = np.eye(3)\n if t is None:\n t = np.zeros(3)\n\n # M converts from your coordinate to PyTorch3D's coordinate system\n M = np.eye(3)\n M[0, 0] = -1.0\n M[1, 1] = -1.0\n\n height, width = depth.shape[1:3]\n\n x = np.arange(width)\n y = np.arange(height)\n coord = np.stack(np.meshgrid(x, y), -1)\n coord = np.concatenate((coord, np.ones_like(coord)[:, :, [0]]), -1) # z=1\n coord = coord.astype(np.float32)\n # coord = torch.as_tensor(coord, dtype=torch.float32, device=device)\n coord = coord[None] # bs, h, w, 3\n\n D = depth[:, :, :, None, None]\n # print(D.shape, Kinv[None, None, None, ...].shape, coord[:, :, :, :, None].shape )\n pts3D_1 = D * Kinv[None, None, None, ...] @ coord[:, :, :, :, None]\n # pts3D_1 live in your coordinate system. Convert them to Py3D's\n pts3D_1 = M[None, None, None, ...] @ pts3D_1\n # from reference to targe tviewpoint\n pts3D_2 = R[None, None, None, ...] @ pts3D_1 + t[None, None, None, :, None]\n # pts3D_2 = pts3D_1\n # depth_2 = pts3D_2[:, :, :, 2, :] # b,1,h,w\n return pts3D_2[:, :, :, :3, 0][0]"
},
{
"identifier": "create_triangles",
"path": "zoedepth/utils/geometry.py",
"snippet": "def create_triangles(h, w, mask=None):\n \"\"\"\n Reference: https://github.com/google-research/google-research/blob/e96197de06613f1b027d20328e06d69829fa5a89/infinite_nature/render_utils.py#L68\n Creates mesh triangle indices from a given pixel grid size.\n This function is not and need not be differentiable as triangle indices are\n fixed.\n Args:\n h: (int) denoting the height of the image.\n w: (int) denoting the width of the image.\n Returns:\n triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3)\n \"\"\"\n x, y = np.meshgrid(range(w - 1), range(h - 1))\n tl = y * w + x\n tr = y * w + x + 1\n bl = (y + 1) * w + x\n br = (y + 1) * w + x + 1\n triangles = np.array([tl, bl, tr, br, tr, bl])\n triangles = np.transpose(triangles, (1, 2, 0)).reshape(\n ((w - 1) * (h - 1) * 2, 3))\n if mask is not None:\n mask = mask.reshape(-1)\n triangles = triangles[mask[triangles].all(1)]\n return triangles"
}
] | import gradio as gr
import tempfile
import torch
import numpy as np
import argparse
import matplotlib
import cv2
import torch.nn.functional as F
import gradio as gr
import numpy as np
import trimesh
import tempfile
from PIL import Image
from zoedepth.utils.arg_utils import parse_unknown
from zoedepth.models.builder import build_model
from zoedepth.utils.config import get_config_user
from infer_user import regular_tile_param, random_tile_param
from zoedepth.models.base_models.midas import Resize
from torchvision.transforms import Compose
from PIL import Image
from torchvision import transforms
from zoedepth.models.base_models.midas import Resize
from torchvision.transforms import Compose
from zoedepth.utils.geometry import depth_to_points, create_triangles
from functools import partial | 9,100 | # copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# File author: Zhenyu Li
def depth_edges_mask(depth, occ_filter_thr):
"""Returns a mask of edges in the depth map.
Args:
depth: 2D numpy array of shape (H, W) with dtype float32.
Returns:
mask: 2D numpy array of shape (H, W) with dtype bool.
"""
# Compute the x and y gradients of the depth map.
depth_dx, depth_dy = np.gradient(depth)
# Compute the gradient magnitude.
depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2)
# Compute the edge mask.
# mask = depth_grad > 0.05 # default in zoedepth
mask = depth_grad > occ_filter_thr # preserve more edges (?)
return mask
def load_state_dict(model, state_dict):
"""Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict.
DataParallel prefixes state_dict keys with 'module.' when saving.
If the model is not a DataParallel model but the state_dict is, then prefixes are removed.
If the model is a DataParallel model but the state_dict is not, then prefixes are added.
"""
state_dict = state_dict.get('model', state_dict)
# if model is a DataParallel model, then state_dict keys are prefixed with 'module.'
do_prefix = isinstance(
model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))
state = {}
for k, v in state_dict.items():
if k.startswith('module.') and not do_prefix:
k = k[7:]
if not k.startswith('module.') and do_prefix:
k = 'module.' + k
state[k] = v
model.load_state_dict(state, strict=True)
print("Loaded successfully")
return model
def load_wts(model, checkpoint_path):
ckpt = torch.load(checkpoint_path, map_location='cpu')
return load_state_dict(model, ckpt)
def load_ckpt(model, checkpoint):
model = load_wts(model, checkpoint)
print("Loaded weights from {0}".format(checkpoint))
return model
parser = argparse.ArgumentParser()
parser.add_argument("--ckp_path", type=str, required=True)
parser.add_argument("-m", "--model", type=str, default="zoedepth")
parser.add_argument("--model_cfg_path", type=str, default="")
args, unknown_args = parser.parse_known_args()
overwrite_kwargs = parse_unknown(unknown_args)
overwrite_kwargs['model_cfg_path'] = args.model_cfg_path
overwrite_kwargs["model"] = args.model
config = get_config_user(args.model, **overwrite_kwargs)
config["pretrained_resource"] = ''
model = build_model(config)
model = load_ckpt(model, args.ckp_path)
model.eval()
model.cuda()
def colorize(value, cmap='magma_r', vmin=None, vmax=None):
# normalize
vmin = value.min() if vmin is None else vmin
# vmax = value.max() if vmax is None else vmax
vmax = np.percentile(value, 95) if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
value = value * 0.
cmapper = matplotlib.cm.get_cmap(cmap)
value = cmapper(value, bytes=True) # ((1)xhxwx4)
value = value[:, :, :3] # bgr -> rgb
# rgb_value = value[..., ::-1]
rgb_value = value
return rgb_value
def predict_depth(model, image, mode, pn, reso, ps):
pil_image = image
image = transforms.ToTensor()(pil_image).unsqueeze(0).cuda()
image_height, image_width = image.shape[-2], image.shape[-1]
if reso != '':
image_resolution = (int(reso.split('x')[0]), int(reso.split('x')[1]))
else:
image_resolution = (2160, 3840)
image_hr = F.interpolate(image, image_resolution, mode='bicubic', align_corners=True)
| # MIT License
# Copyright (c) 2022 Intelligent Systems Lab Org
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# File author: Zhenyu Li
def depth_edges_mask(depth, occ_filter_thr):
"""Returns a mask of edges in the depth map.
Args:
depth: 2D numpy array of shape (H, W) with dtype float32.
Returns:
mask: 2D numpy array of shape (H, W) with dtype bool.
"""
# Compute the x and y gradients of the depth map.
depth_dx, depth_dy = np.gradient(depth)
# Compute the gradient magnitude.
depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2)
# Compute the edge mask.
# mask = depth_grad > 0.05 # default in zoedepth
mask = depth_grad > occ_filter_thr # preserve more edges (?)
return mask
def load_state_dict(model, state_dict):
"""Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict.
DataParallel prefixes state_dict keys with 'module.' when saving.
If the model is not a DataParallel model but the state_dict is, then prefixes are removed.
If the model is a DataParallel model but the state_dict is not, then prefixes are added.
"""
state_dict = state_dict.get('model', state_dict)
# if model is a DataParallel model, then state_dict keys are prefixed with 'module.'
do_prefix = isinstance(
model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))
state = {}
for k, v in state_dict.items():
if k.startswith('module.') and not do_prefix:
k = k[7:]
if not k.startswith('module.') and do_prefix:
k = 'module.' + k
state[k] = v
model.load_state_dict(state, strict=True)
print("Loaded successfully")
return model
def load_wts(model, checkpoint_path):
ckpt = torch.load(checkpoint_path, map_location='cpu')
return load_state_dict(model, ckpt)
def load_ckpt(model, checkpoint):
model = load_wts(model, checkpoint)
print("Loaded weights from {0}".format(checkpoint))
return model
parser = argparse.ArgumentParser()
parser.add_argument("--ckp_path", type=str, required=True)
parser.add_argument("-m", "--model", type=str, default="zoedepth")
parser.add_argument("--model_cfg_path", type=str, default="")
args, unknown_args = parser.parse_known_args()
overwrite_kwargs = parse_unknown(unknown_args)
overwrite_kwargs['model_cfg_path'] = args.model_cfg_path
overwrite_kwargs["model"] = args.model
config = get_config_user(args.model, **overwrite_kwargs)
config["pretrained_resource"] = ''
model = build_model(config)
model = load_ckpt(model, args.ckp_path)
model.eval()
model.cuda()
def colorize(value, cmap='magma_r', vmin=None, vmax=None):
# normalize
vmin = value.min() if vmin is None else vmin
# vmax = value.max() if vmax is None else vmax
vmax = np.percentile(value, 95) if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
value = value * 0.
cmapper = matplotlib.cm.get_cmap(cmap)
value = cmapper(value, bytes=True) # ((1)xhxwx4)
value = value[:, :, :3] # bgr -> rgb
# rgb_value = value[..., ::-1]
rgb_value = value
return rgb_value
def predict_depth(model, image, mode, pn, reso, ps):
pil_image = image
image = transforms.ToTensor()(pil_image).unsqueeze(0).cuda()
image_height, image_width = image.shape[-2], image.shape[-1]
if reso != '':
image_resolution = (int(reso.split('x')[0]), int(reso.split('x')[1]))
else:
image_resolution = (2160, 3840)
image_hr = F.interpolate(image, image_resolution, mode='bicubic', align_corners=True) | preprocess = Compose([Resize(512, 384, keep_aspect_ratio=False, ensure_multiple_of=32, resize_method="minimal")]) | 6 | 2023-12-04 08:43:15+00:00 | 12k |
LTH14/rcg | main_adm.py | [
{
"identifier": "NativeScalerWithGradNormCount",
"path": "util/misc.py",
"snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = get_grad_norm_(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)"
},
{
"identifier": "train_one_epoch",
"path": "engine_adm.py",
"snippet": "def train_one_epoch(model: torch.nn.Module,\n diffusion,\n schedule_sampler,\n pretrained_encoder,\n model_params, ema_params,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler,\n log_writer=None,\n args=None):\n model.train(True)\n metric_logger = misc.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 20\n\n accum_iter = args.accum_iter\n\n optimizer.zero_grad()\n\n if log_writer is not None:\n print('log_dir: {}'.format(log_writer.log_dir))\n\n for data_iter_step, (images, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n\n images = images.to(device, non_blocking=True)\n images = images * 2 - 1 # image to [-1, 1] to be compatible with ADM\n\n model_kwargs = {}\n if args.class_cond:\n model_kwargs[\"y\"] = targets.to(device, non_blocking=True)\n\n # get loss\n t, weights = schedule_sampler.sample(images.shape[0], dist_util.dev())\n\n compute_losses = functools.partial(\n diffusion.training_losses,\n model,\n images,\n t,\n pretrained_encoder,\n model_kwargs=model_kwargs,\n )\n\n loss = compute_losses()\n\n if isinstance(schedule_sampler, LossAwareSampler):\n schedule_sampler.update_with_local_losses(\n t, loss[\"loss\"].detach()\n )\n\n loss = (loss[\"loss\"] * weights).mean()\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n loss /= accum_iter\n loss_scaler(loss, optimizer, parameters=model.parameters(), update_grad=(data_iter_step + 1) % accum_iter == 0)\n if (data_iter_step + 1) % accum_iter == 0:\n optimizer.zero_grad()\n\n torch.cuda.synchronize()\n\n # update ema\n update_ema(ema_params, model_params, rate=args.ema_rate)\n\n metric_logger.update(loss=loss_value)\n\n lr = optimizer.param_groups[0][\"lr\"]\n metric_logger.update(lr=lr)\n\n loss_value_reduce = misc.all_reduce_mean(loss_value)\n if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:\n \"\"\" We use epoch_1000x as the x-axis in tensorboard.\n This calibrates different curves when batch size changes.\n \"\"\"\n epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)\n log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)\n log_writer.add_scalar('lr', lr, epoch_1000x)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}"
},
{
"identifier": "gen_img",
"path": "engine_adm.py",
"snippet": "def gen_img(model, model_without_ddp, diffusion, ema_params, rdm_sampler, args, epoch, batch_size=16, log_writer=None, use_ema=False):\n model.eval()\n num_steps = args.num_images // (batch_size * misc.get_world_size()) + 1\n save_folder = os.path.join(args.output_dir, \"steps{}\".format(args.gen_timestep_respacing))\n if misc.get_rank() == 0:\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n # switch to ema params\n if use_ema:\n model_state_dict = copy.deepcopy(model_without_ddp.state_dict())\n ema_state_dict = copy.deepcopy(model_without_ddp.state_dict())\n for i, (name, _value) in enumerate(model_without_ddp.named_parameters()):\n assert name in ema_state_dict\n ema_state_dict[name] = ema_params[i]\n print(\"Switch to ema\")\n model_without_ddp.load_state_dict(ema_state_dict)\n\n for i in range(num_steps):\n print(\"Generation step {}/{}\".format(i, num_steps))\n\n # sample representation\n if args.rep_cond:\n with rdm_sampler.model.ema_scope(\"Plotting\"):\n shape = [rdm_sampler.model.model.diffusion_model.in_channels,\n rdm_sampler.model.model.diffusion_model.image_size,\n rdm_sampler.model.model.diffusion_model.image_size]\n cond = {\"class_label\": torch.zeros(batch_size).cuda().long()}\n cond = rdm_sampler.model.get_learned_conditioning(cond)\n\n sampled_rep, _ = rdm_sampler.sample(args.rdm_steps, conditioning=cond, batch_size=batch_size,\n shape=shape, eta=args.rdm_eta, verbose=False)\n sampled_rep = sampled_rep.squeeze(-1).squeeze(-1)\n model_kwargs = {'rep': sampled_rep}\n elif args.class_cond:\n model_kwargs = {'y': torch.randint(0, 1000, (batch_size,)).cuda()}\n else:\n model_kwargs = None\n\n with torch.no_grad():\n sample_fn = (\n diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop\n )\n gen_images_batch = sample_fn(\n model,\n (batch_size, 3, args.image_size, args.image_size),\n clip_denoised=True,\n model_kwargs=model_kwargs,\n )\n gen_images_batch = (gen_images_batch + 1) / 2\n\n gen_images_batch = misc.concat_all_gather(gen_images_batch)\n gen_images_batch = gen_images_batch.detach().cpu()\n\n # save img\n if misc.get_rank() == 0:\n for b_id in range(gen_images_batch.size(0)):\n if i*gen_images_batch.size(0)+b_id >= args.num_images:\n break\n gen_img = np.clip(gen_images_batch[b_id].numpy().transpose([1, 2, 0]) * 255, 0, 255)\n gen_img = gen_img.astype(np.uint8)[:, :, ::-1]\n cv2.imwrite(\n os.path.join(save_folder, '{}.png'.format(str(i * gen_images_batch.size(0) + b_id).zfill(5))),\n gen_img)\n\n # back to no ema\n if use_ema:\n print(\"Switch back from ema\")\n model_without_ddp.load_state_dict(model_state_dict)\n\n # compute FID and IS\n if log_writer is not None:\n metrics_dict = torch_fidelity.calculate_metrics(\n input1=save_folder,\n input2='imagenet-val',\n cuda=True,\n isc=True,\n fid=True,\n kid=False,\n prc=False,\n verbose=False,\n )\n fid = metrics_dict['frechet_inception_distance']\n inception_score = metrics_dict['inception_score_mean']\n if use_ema:\n log_writer.add_scalar('fid_ema', fid, epoch)\n log_writer.add_scalar('is_ema', inception_score, epoch)\n print(\"EMA FID: {}, EMA Inception Score: {}\".format(fid, inception_score))\n else:\n log_writer.add_scalar('fid', fid, epoch)\n log_writer.add_scalar('is', inception_score, epoch)\n print(\"FID: {}, Inception Score: {}\".format(fid, inception_score))\n # remove temporal saving folder\n shutil.rmtree(save_folder)"
},
{
"identifier": "create_named_schedule_sampler",
"path": "pixel_generator/guided_diffusion/resample.py",
"snippet": "def create_named_schedule_sampler(name, diffusion):\n \"\"\"\n Create a ScheduleSampler from a library of pre-defined samplers.\n\n :param name: the name of the sampler.\n :param diffusion: the diffusion object to sample for.\n \"\"\"\n if name == \"uniform\":\n return UniformSampler(diffusion)\n elif name == \"loss-second-moment\":\n return LossSecondMomentResampler(diffusion)\n else:\n raise NotImplementedError(f\"unknown schedule sampler: {name}\")"
},
{
"identifier": "model_and_diffusion_defaults",
"path": "pixel_generator/guided_diffusion/script_util.py",
"snippet": "def model_and_diffusion_defaults():\n \"\"\"\n Defaults for image training.\n \"\"\"\n res = dict(\n image_size=64,\n num_channels=128,\n num_res_blocks=2,\n num_heads=4,\n num_heads_upsample=-1,\n num_head_channels=-1,\n attention_resolutions=\"16,8\",\n channel_mult=\"\",\n dropout=0.0,\n class_cond=False,\n rep_cond=False,\n rep_dim=256,\n use_checkpoint=False,\n use_scale_shift_norm=True,\n resblock_updown=False,\n use_fp16=False,\n use_new_attention_order=False,\n )\n res.update(diffusion_defaults())\n return res"
},
{
"identifier": "create_model_and_diffusion",
"path": "pixel_generator/guided_diffusion/script_util.py",
"snippet": "def create_model_and_diffusion(\n image_size,\n class_cond,\n rep_cond,\n rep_dim,\n learn_sigma,\n num_channels,\n num_res_blocks,\n channel_mult,\n num_heads,\n num_head_channels,\n num_heads_upsample,\n attention_resolutions,\n dropout,\n diffusion_steps,\n noise_schedule,\n timestep_respacing,\n use_kl,\n predict_xstart,\n rescale_timesteps,\n rescale_learned_sigmas,\n use_checkpoint,\n use_scale_shift_norm,\n resblock_updown,\n use_fp16,\n use_new_attention_order,\n):\n model = create_model(\n image_size,\n num_channels,\n num_res_blocks,\n channel_mult=channel_mult,\n learn_sigma=learn_sigma,\n class_cond=class_cond,\n rep_cond=rep_cond,\n rep_dim=rep_dim,\n use_checkpoint=use_checkpoint,\n attention_resolutions=attention_resolutions,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n num_heads_upsample=num_heads_upsample,\n use_scale_shift_norm=use_scale_shift_norm,\n dropout=dropout,\n resblock_updown=resblock_updown,\n use_fp16=use_fp16,\n use_new_attention_order=use_new_attention_order,\n )\n diffusion = create_gaussian_diffusion(\n steps=diffusion_steps,\n learn_sigma=learn_sigma,\n noise_schedule=noise_schedule,\n use_kl=use_kl,\n predict_xstart=predict_xstart,\n rescale_timesteps=rescale_timesteps,\n rescale_learned_sigmas=rescale_learned_sigmas,\n timestep_respacing=timestep_respacing,\n )\n return model, diffusion"
},
{
"identifier": "create_gaussian_diffusion",
"path": "pixel_generator/guided_diffusion/script_util.py",
"snippet": "def create_gaussian_diffusion(\n *,\n steps=1000,\n learn_sigma=False,\n sigma_small=False,\n noise_schedule=\"linear\",\n use_kl=False,\n predict_xstart=False,\n rescale_timesteps=False,\n rescale_learned_sigmas=False,\n timestep_respacing=\"\",\n):\n betas = gd.get_named_beta_schedule(noise_schedule, steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if not timestep_respacing:\n timestep_respacing = [steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n (\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n ),\n loss_type=loss_type,\n rescale_timesteps=rescale_timesteps,\n )"
},
{
"identifier": "args_to_dict",
"path": "pixel_generator/guided_diffusion/script_util.py",
"snippet": "def args_to_dict(args, keys):\n return {k: getattr(args, k) for k in keys}"
},
{
"identifier": "load_model",
"path": "rdm/util.py",
"snippet": "def load_model(config, ckpt):\n if ckpt:\n print(f\"Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n if \"state_dict\" not in pl_sd:\n pl_sd[\"state_dict\"] = pl_sd[\"model\"]\n else:\n pl_sd = {\"state_dict\": None}\n model = load_model_from_config(config.model,\n pl_sd[\"state_dict\"])\n\n return model"
},
{
"identifier": "DDIMSampler",
"path": "rdm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).cuda()\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img / self.model.input_scale, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n if self.model.parameterization == \"x0\":\n e_t = self.model._predict_eps_from_xstart(x, t, e_t)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0"
}
] | import argparse
import datetime
import json
import numpy as np
import os
import time
import copy
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import timm
import util.misc as misc
import pretrained_enc.models_pretrained_enc as models_pretrained_enc
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
from util.misc import NativeScalerWithGradNormCount as NativeScaler
from engine_adm import train_one_epoch, gen_img
from pixel_generator.guided_diffusion.resample import create_named_schedule_sampler
from pixel_generator.guided_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
create_gaussian_diffusion,
args_to_dict,
)
from rdm.util import load_model
from rdm.models.diffusion.ddim import DDIMSampler
from omegaconf import OmegaConf | 8,696 | sampler_train = torch.utils.data.RandomSampler(dataset_train)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
# load model
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.to(device)
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
# pre-trained encoder
if args.rep_cond:
assert args.pretrained_enc_path is not None
pretrained_encoder = models_pretrained_enc.__dict__[args.pretrained_enc_arch](proj_dim=args.rep_dim)
# load pre-trained encoder parameters
if 'moco' in args.pretrained_enc_arch:
pretrained_encoder = models_pretrained_enc.load_pretrained_moco(pretrained_encoder, args.pretrained_enc_path)
else:
raise NotImplementedError
for param in pretrained_encoder.parameters():
param.requires_grad = False
pretrained_encoder.to(device)
pretrained_encoder.eval()
else:
pretrained_encoder = None
# pre-trained RDM
if args.rep_cond:
rdm_config = OmegaConf.load(args.pretrained_rdm_cfg)
ldm_model = load_model(rdm_config, args.pretrained_rdm_ckpt)
rdm_sampler = DDIMSampler(ldm_model)
else:
rdm_sampler = None
# sampling diffusion
gen_diffusion = create_gaussian_diffusion(
steps=args.diffusion_steps,
learn_sigma=args.learn_sigma,
noise_schedule=args.noise_schedule,
use_kl=args.use_kl,
predict_xstart=args.predict_xstart,
rescale_timesteps=args.rescale_timesteps,
rescale_learned_sigmas=args.rescale_learned_sigmas,
timestep_respacing=args.gen_timestep_respacing,
)
model_without_ddp = model
print("Model = %s" % str(model_without_ddp))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size
print("base lr: %.2e" % (args.lr / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
# Log parameters
params = list(model_without_ddp.parameters())
n_params = sum(p.numel() for p in model_without_ddp.parameters() if p.requires_grad)
print("Number of trainable parameters: {}M".format(n_params / 1e6))
if global_rank == 0:
log_writer.add_scalar('num_params', n_params / 1e6, 0)
optimizer = torch.optim.AdamW(params, lr=args.lr, weight_decay=args.weight_decay)
print(optimizer)
loss_scaler = NativeScaler()
# Resume training or from pre-trained unconditional ADM
if os.path.exists(os.path.join(args.resume, "checkpoint-last.pth")):
resume_path = os.path.join(args.resume, "checkpoint-last.pth")
else:
resume_path = args.resume
if resume_path:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
model_params = list(model_without_ddp.parameters())
ema_state_dict = checkpoint['model_ema']
ema_params = [ema_state_dict[name].cuda() for name, _ in model_without_ddp.named_parameters()]
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval):
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
model_params = list(model_without_ddp.parameters())
ema_params = copy.deepcopy(model_params)
print("Training from scratch")
if args.evaluate:
print("Start evaluating")
gen_img(model, model_without_ddp, gen_diffusion, ema_params, rdm_sampler, args, 0, batch_size=16, log_writer=log_writer, use_ema=True)
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
|
assert timm.__version__ == "0.3.2" # version check
def get_args_parser():
parser = argparse.ArgumentParser('ADM training', add_help=False)
parser.add_argument('--batch_size', default=4, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=400, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# config
parser.add_argument('--image_size', default=256, type=int,
help='images input size')
parser.add_argument('--config', type=str, help='config file')
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1e-6, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--cosine_lr', action='store_true',
help='Use cosine lr scheduling.')
parser.add_argument('--warmup_epochs', default=0, type=int)
# ADM parameters
parser.add_argument('--schedule_sampler', default='uniform', type=str)
parser.add_argument('--lr_anneal_steps', default=0, type=int)
parser.add_argument('--microbatch', default=-1, type=int)
parser.add_argument('--ema_rate', default=0.9999, type=float)
parser.add_argument('--use_fp16', action='store_true')
parser.add_argument('--fp16_scale_growth', default=1e-3, type=float)
# ADM model parameters
parser.add_argument('--num_channels', default=128, type=int)
parser.add_argument('--num_res_blocks', default=2, type=int)
parser.add_argument('--num_heads', default=4, type=int)
parser.add_argument('--num_heads_upsample', default=-1, type=int)
parser.add_argument('--num_head_channels', default=-1, type=int)
parser.add_argument('--attention_resolutions', default="16,8", type=str)
parser.add_argument('--channel_mult', default="", type=str)
parser.add_argument('--dropout', default=0.0, type=float)
parser.add_argument('--class_cond', action='store_true')
parser.add_argument('--use_checkpoint', action='store_true')
parser.add_argument('--use_scale_shift_norm', action='store_true')
parser.add_argument('--resblock_updown', action='store_true')
parser.add_argument('--use_new_attention_order', action='store_true')
# ADM diffusion parameters
parser.add_argument('--learn_sigma', action='store_true')
parser.add_argument('--use_kl', action='store_true')
parser.add_argument('--predict_xstart', action='store_true')
parser.add_argument('--rescale_timesteps', action='store_true')
parser.add_argument('--rescale_learned_sigmas', action='store_true')
parser.add_argument('--diffusion_steps', default=1000, type=int)
parser.add_argument('--noise_schedule', default="linear", type=str)
parser.add_argument('--timestep_respacing', default="", type=str)
# RDM parameters
parser.add_argument('--rep_cond', action='store_true')
parser.add_argument('--rep_dim', default=256, type=int)
parser.add_argument('--pretrained_enc_arch', default=None, type=str)
parser.add_argument('--pretrained_enc_path', default=None, type=str)
parser.add_argument('--rdm_steps', default=250, type=int)
parser.add_argument('--rdm_eta', default=1.0, type=float)
parser.add_argument('--pretrained_rdm_cfg', default=None, type=str)
parser.add_argument('--pretrained_rdm_ckpt', default=None, type=str)
# ADM generation parameters
parser.add_argument('--evaluate', action='store_true', help="perform only evaluation")
parser.add_argument('--eval_freq', type=int, default=8, help='evaluation frequency')
parser.add_argument('--num_images', default=50000, type=int)
parser.add_argument('--use_ddim', action='store_true')
parser.add_argument('--gen_timestep_respacing', default="", type=str)
# Dataset parameters
parser.add_argument('--data_path', default='./data/imagenet', type=str,
help='dataset path')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
transform_train = transforms.Compose([
transforms.Resize(256, interpolation=3),
transforms.RandomCrop(256),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
print(dataset_train)
if True: # args.distributed:
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
# load model
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
model.to(device)
schedule_sampler = create_named_schedule_sampler(args.schedule_sampler, diffusion)
# pre-trained encoder
if args.rep_cond:
assert args.pretrained_enc_path is not None
pretrained_encoder = models_pretrained_enc.__dict__[args.pretrained_enc_arch](proj_dim=args.rep_dim)
# load pre-trained encoder parameters
if 'moco' in args.pretrained_enc_arch:
pretrained_encoder = models_pretrained_enc.load_pretrained_moco(pretrained_encoder, args.pretrained_enc_path)
else:
raise NotImplementedError
for param in pretrained_encoder.parameters():
param.requires_grad = False
pretrained_encoder.to(device)
pretrained_encoder.eval()
else:
pretrained_encoder = None
# pre-trained RDM
if args.rep_cond:
rdm_config = OmegaConf.load(args.pretrained_rdm_cfg)
ldm_model = load_model(rdm_config, args.pretrained_rdm_ckpt)
rdm_sampler = DDIMSampler(ldm_model)
else:
rdm_sampler = None
# sampling diffusion
gen_diffusion = create_gaussian_diffusion(
steps=args.diffusion_steps,
learn_sigma=args.learn_sigma,
noise_schedule=args.noise_schedule,
use_kl=args.use_kl,
predict_xstart=args.predict_xstart,
rescale_timesteps=args.rescale_timesteps,
rescale_learned_sigmas=args.rescale_learned_sigmas,
timestep_respacing=args.gen_timestep_respacing,
)
model_without_ddp = model
print("Model = %s" % str(model_without_ddp))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size
print("base lr: %.2e" % (args.lr / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
# Log parameters
params = list(model_without_ddp.parameters())
n_params = sum(p.numel() for p in model_without_ddp.parameters() if p.requires_grad)
print("Number of trainable parameters: {}M".format(n_params / 1e6))
if global_rank == 0:
log_writer.add_scalar('num_params', n_params / 1e6, 0)
optimizer = torch.optim.AdamW(params, lr=args.lr, weight_decay=args.weight_decay)
print(optimizer)
loss_scaler = NativeScaler()
# Resume training or from pre-trained unconditional ADM
if os.path.exists(os.path.join(args.resume, "checkpoint-last.pth")):
resume_path = os.path.join(args.resume, "checkpoint-last.pth")
else:
resume_path = args.resume
if resume_path:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
model_params = list(model_without_ddp.parameters())
ema_state_dict = checkpoint['model_ema']
ema_params = [ema_state_dict[name].cuda() for name, _ in model_without_ddp.named_parameters()]
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval):
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
model_params = list(model_without_ddp.parameters())
ema_params = copy.deepcopy(model_params)
print("Training from scratch")
if args.evaluate:
print("Start evaluating")
gen_img(model, model_without_ddp, gen_diffusion, ema_params, rdm_sampler, args, 0, batch_size=16, log_writer=log_writer, use_ema=True)
return
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
| train_stats = train_one_epoch( | 1 | 2023-12-01 02:08:50+00:00 | 12k |
baaivision/GeoDream | threestudio/models/geometry/geodream_geometry_volume.py | [
{
"identifier": "BaseGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}"
},
{
"identifier": "BaseImplicitGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseImplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n isosurface: bool = True\n isosurface_method: str = \"mt\"\n isosurface_resolution: int = 128\n isosurface_threshold: Union[float, str] = 0.0\n isosurface_chunk: int = 0\n isosurface_coarse_to_fine: bool = True\n isosurface_deformable_grid: bool = False\n isosurface_remove_outliers: bool = True\n isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )\n self.isosurface_helper: Optional[IsosurfaceHelper] = None\n self.unbounded: bool = False\n\n def _initilize_isosurface_helper(self):\n if self.cfg.isosurface and self.isosurface_helper is None:\n if self.cfg.isosurface_method == \"mc-cpu\":\n self.isosurface_helper = MarchingCubeCPUHelper(\n self.cfg.isosurface_resolution\n ).to(self.device)\n elif self.cfg.isosurface_method == \"mt\":\n self.isosurface_helper = MarchingTetrahedraHelper(\n self.cfg.isosurface_resolution,\n f\"load/tets/{self.cfg.isosurface_resolution}_tets.npz\",\n ).to(self.device)\n else:\n raise AttributeError(\n \"Unknown isosurface method {self.cfg.isosurface_method}\"\n )\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n raise NotImplementedError\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n # return the value of the implicit field, could be density / signed distance\n # also return a deformation field if the grid vertices can be optimized\n raise NotImplementedError\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n # return the value of the implicit field, where the zero level set represents the surface\n raise NotImplementedError\n\n def _isosurface(self, bbox: Float[Tensor, \"2 3\"], fine_stage: bool = False) -> Mesh:\n def batch_func(x):\n # scale to bbox as the input vertices are in [0, 1]\n field, deformation = self.forward_field(\n scale_tensor(\n x.to(bbox.device), self.isosurface_helper.points_range, bbox\n ),\n )\n field = field.to(\n x.device\n ) # move to the same device as the input (could be CPU)\n if deformation is not None:\n deformation = deformation.to(x.device)\n return field, deformation\n\n assert self.isosurface_helper is not None\n\n field, deformation = chunk_batch(\n batch_func,\n self.cfg.isosurface_chunk,\n self.isosurface_helper.grid_vertices,\n )\n\n threshold: float\n\n if isinstance(self.cfg.isosurface_threshold, float):\n threshold = self.cfg.isosurface_threshold\n elif self.cfg.isosurface_threshold == \"auto\":\n eps = 1.0e-5\n threshold = field[field > eps].mean().item()\n threestudio.info(\n f\"Automatically determined isosurface threshold: {threshold}\"\n )\n else:\n raise TypeError(\n f\"Unknown isosurface_threshold {self.cfg.isosurface_threshold}\"\n )\n\n level = self.forward_level(field, threshold)\n mesh: Mesh = self.isosurface_helper(level, deformation=deformation)\n mesh.v_pos = scale_tensor(\n mesh.v_pos, self.isosurface_helper.points_range, bbox\n ) # scale to bbox as the grid vertices are in [0, 1]\n mesh.add_extra(\"bbox\", bbox)\n\n if self.cfg.isosurface_remove_outliers:\n # remove outliers components with small number of faces\n # only enabled when the mesh is not differentiable\n mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)\n\n return mesh\n\n def isosurface(self) -> Mesh:\n if not self.cfg.isosurface:\n raise NotImplementedError(\n \"Isosurface is not enabled in the current configuration\"\n )\n self._initilize_isosurface_helper()\n if self.cfg.isosurface_coarse_to_fine:\n threestudio.debug(\"First run isosurface to get a tight bounding box ...\")\n with torch.no_grad():\n mesh_coarse = self._isosurface(self.bbox)\n vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)\n vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])\n vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])\n threestudio.debug(\"Run isosurface again with the tight bounding box ...\")\n mesh = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)\n else:\n mesh = self._isosurface(self.bbox)\n return mesh"
},
{
"identifier": "contract_to_unisphere",
"path": "threestudio/models/geometry/base.py",
"snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x"
},
{
"identifier": "get_encoding",
"path": "threestudio/models/networks.py",
"snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding"
},
{
"identifier": "get_mlp",
"path": "threestudio/models/networks.py",
"snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network"
},
{
"identifier": "get_activation",
"path": "threestudio/utils/ops.py",
"snippet": "def get_activation(name) -> Callable:\n if name is None:\n return lambda x: x\n name = name.lower()\n if name == \"none\":\n return lambda x: x\n elif name == \"lin2srgb\":\n return lambda x: torch.where(\n x > 0.0031308,\n torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,\n 12.92 * x,\n ).clamp(0.0, 1.0)\n elif name == \"exp\":\n return lambda x: torch.exp(x)\n elif name == \"shifted_exp\":\n return lambda x: torch.exp(x - 1.0)\n elif name == \"trunc_exp\":\n return trunc_exp\n elif name == \"shifted_trunc_exp\":\n return lambda x: trunc_exp(x - 1.0)\n elif name == \"sigmoid\":\n return lambda x: torch.sigmoid(x)\n elif name == \"tanh\":\n return lambda x: torch.tanh(x)\n elif name == \"shifted_softplus\":\n return lambda x: F.softplus(x - 1.0)\n elif name == \"scale_-11_01\":\n return lambda x: x * 0.5 + 0.5\n else:\n try:\n return getattr(F, name)\n except AttributeError:\n raise ValueError(f\"Unknown activation function: {name}\")"
},
{
"identifier": "grid_sample_3d",
"path": "threestudio/models/geometry/grid_sampler.py",
"snippet": "def grid_sample_3d(volume, optical):\n \"\"\"\n bilinear sampling cannot guarantee continuous first-order gradient\n mimic pytorch grid_sample function\n The 8 corner points of a volume noted as: 4 points (front view); 4 points (back view)\n fnw (front north west) point\n bse (back south east) point\n :param volume: [B, C, X, Y, Z]\n :param optical: [B, x, y, z, 3]\n :return:\n \"\"\"\n N, C, ID, IH, IW = volume.shape\n _, D, H, W, _ = optical.shape\n\n ix = optical[..., 0]\n iy = optical[..., 1]\n iz = optical[..., 2]\n\n ix = ((ix + 1) / 2) * (IW - 1)\n iy = ((iy + 1) / 2) * (IH - 1)\n iz = ((iz + 1) / 2) * (ID - 1)\n\n mask_x = (ix > 0) & (ix < IW)\n mask_y = (iy > 0) & (iy < IH)\n mask_z = (iz > 0) & (iz < ID)\n\n mask = mask_x & mask_y & mask_z # [B, x, y, z]\n mask = mask[:, None, :, :, :].repeat(1, C, 1, 1, 1) # [B, C, x, y, z]\n\n with torch.no_grad():\n # back north west\n ix_bnw = torch.floor(ix)\n iy_bnw = torch.floor(iy)\n iz_bnw = torch.floor(iz)\n\n ix_bne = ix_bnw + 1\n iy_bne = iy_bnw\n iz_bne = iz_bnw\n\n ix_bsw = ix_bnw\n iy_bsw = iy_bnw + 1\n iz_bsw = iz_bnw\n\n ix_bse = ix_bnw + 1\n iy_bse = iy_bnw + 1\n iz_bse = iz_bnw\n\n # front view\n ix_fnw = ix_bnw\n iy_fnw = iy_bnw\n iz_fnw = iz_bnw + 1\n\n ix_fne = ix_bnw + 1\n iy_fne = iy_bnw\n iz_fne = iz_bnw + 1\n\n ix_fsw = ix_bnw\n iy_fsw = iy_bnw + 1\n iz_fsw = iz_bnw + 1\n\n ix_fse = ix_bnw + 1\n iy_fse = iy_bnw + 1\n iz_fse = iz_bnw + 1\n\n # back view\n bnw = (ix_fse - ix) * (iy_fse - iy) * (iz_fse - iz) # smaller volume, larger weight\n bne = (ix - ix_fsw) * (iy_fsw - iy) * (iz_fsw - iz)\n bsw = (ix_fne - ix) * (iy - iy_fne) * (iz_fne - iz)\n bse = (ix - ix_fnw) * (iy - iy_fnw) * (iz_fnw - iz)\n\n # front view\n fnw = (ix_bse - ix) * (iy_bse - iy) * (iz - iz_bse) # smaller volume, larger weight\n fne = (ix - ix_bsw) * (iy_bsw - iy) * (iz - iz_bsw)\n fsw = (ix_bne - ix) * (iy - iy_bne) * (iz - iz_bne)\n fse = (ix - ix_bnw) * (iy - iy_bnw) * (iz - iz_bnw)\n\n with torch.no_grad():\n # back view\n torch.clamp(ix_bnw, 0, IW - 1, out=ix_bnw)\n torch.clamp(iy_bnw, 0, IH - 1, out=iy_bnw)\n torch.clamp(iz_bnw, 0, ID - 1, out=iz_bnw)\n\n torch.clamp(ix_bne, 0, IW - 1, out=ix_bne)\n torch.clamp(iy_bne, 0, IH - 1, out=iy_bne)\n torch.clamp(iz_bne, 0, ID - 1, out=iz_bne)\n\n torch.clamp(ix_bsw, 0, IW - 1, out=ix_bsw)\n torch.clamp(iy_bsw, 0, IH - 1, out=iy_bsw)\n torch.clamp(iz_bsw, 0, ID - 1, out=iz_bsw)\n\n torch.clamp(ix_bse, 0, IW - 1, out=ix_bse)\n torch.clamp(iy_bse, 0, IH - 1, out=iy_bse)\n torch.clamp(iz_bse, 0, ID - 1, out=iz_bse)\n\n # front view\n torch.clamp(ix_fnw, 0, IW - 1, out=ix_fnw)\n torch.clamp(iy_fnw, 0, IH - 1, out=iy_fnw)\n torch.clamp(iz_fnw, 0, ID - 1, out=iz_fnw)\n\n torch.clamp(ix_fne, 0, IW - 1, out=ix_fne)\n torch.clamp(iy_fne, 0, IH - 1, out=iy_fne)\n torch.clamp(iz_fne, 0, ID - 1, out=iz_fne)\n\n torch.clamp(ix_fsw, 0, IW - 1, out=ix_fsw)\n torch.clamp(iy_fsw, 0, IH - 1, out=iy_fsw)\n torch.clamp(iz_fsw, 0, ID - 1, out=iz_fsw)\n\n torch.clamp(ix_fse, 0, IW - 1, out=ix_fse)\n torch.clamp(iy_fse, 0, IH - 1, out=iy_fse)\n torch.clamp(iz_fse, 0, ID - 1, out=iz_fse)\n\n # xxx = volume[:, :, iz_bnw.long(), iy_bnw.long(), ix_bnw.long()]\n volume = volume.view(N, C, ID * IH * IW)\n # yyy = volume[:, :, (iz_bnw * ID + iy_bnw * IW + ix_bnw).long()]\n\n # back view\n bnw_val = torch.gather(volume, 2,\n (iz_bnw * ID ** 2 + iy_bnw * IW + ix_bnw).long().view(N, 1, D * H * W).repeat(1, C, 1))\n bne_val = torch.gather(volume, 2,\n (iz_bne * ID ** 2 + iy_bne * IW + ix_bne).long().view(N, 1, D * H * W).repeat(1, C, 1))\n bsw_val = torch.gather(volume, 2,\n (iz_bsw * ID ** 2 + iy_bsw * IW + ix_bsw).long().view(N, 1, D * H * W).repeat(1, C, 1))\n bse_val = torch.gather(volume, 2,\n (iz_bse * ID ** 2 + iy_bse * IW + ix_bse).long().view(N, 1, D * H * W).repeat(1, C, 1))\n\n # front view\n fnw_val = torch.gather(volume, 2,\n (iz_fnw * ID ** 2 + iy_fnw * IW + ix_fnw).long().view(N, 1, D * H * W).repeat(1, C, 1))\n fne_val = torch.gather(volume, 2,\n (iz_fne * ID ** 2 + iy_fne * IW + ix_fne).long().view(N, 1, D * H * W).repeat(1, C, 1))\n fsw_val = torch.gather(volume, 2,\n (iz_fsw * ID ** 2 + iy_fsw * IW + ix_fsw).long().view(N, 1, D * H * W).repeat(1, C, 1))\n fse_val = torch.gather(volume, 2,\n (iz_fse * ID ** 2 + iy_fse * IW + ix_fse).long().view(N, 1, D * H * W).repeat(1, C, 1))\n\n out_val = (\n # back\n bnw_val.view(N, C, D, H, W) * bnw.view(N, 1, D, H, W) +\n bne_val.view(N, C, D, H, W) * bne.view(N, 1, D, H, W) +\n bsw_val.view(N, C, D, H, W) * bsw.view(N, 1, D, H, W) +\n bse_val.view(N, C, D, H, W) * bse.view(N, 1, D, H, W) +\n # front\n fnw_val.view(N, C, D, H, W) * fnw.view(N, 1, D, H, W) +\n fne_val.view(N, C, D, H, W) * fne.view(N, 1, D, H, W) +\n fsw_val.view(N, C, D, H, W) * fsw.view(N, 1, D, H, W) +\n fse_val.view(N, C, D, H, W) * fse.view(N, 1, D, H, W)\n\n )\n\n # * zero padding\n out_val = torch.where(mask, out_val, torch.zeros_like(out_val).float().to(out_val.device))\n\n return out_val"
},
{
"identifier": "tricubic_sample_3d",
"path": "threestudio/models/geometry/grid_sampler.py",
"snippet": "def tricubic_sample_3d(volume, optical):\n \"\"\"\n tricubic sampling; can guarantee continuous gradient (interpolation border)\n :param volume: [B, C, ID, IH, IW]\n :param optical: [B, D, H, W, 3]\n :param sample_num:\n :return:\n \"\"\"\n\n @torch.no_grad()\n def get_shifts(x):\n x1 = -1 * (1 + x - torch.floor(x))\n x2 = -1 * (x - torch.floor(x))\n x3 = torch.floor(x) + 1 - x\n x4 = torch.floor(x) + 2 - x\n\n return torch.stack([x1, x2, x3, x4], dim=-1) # (B,d,h,w,4)\n\n N, C, ID, IH, IW = volume.shape\n _, D, H, W, _ = optical.shape\n\n device = volume.device\n\n ix = optical[..., 0]\n iy = optical[..., 1]\n iz = optical[..., 2]\n\n ix = ((ix + 1) / 2) * (IW - 1) # (B,d,h,w)\n iy = ((iy + 1) / 2) * (IH - 1)\n iz = ((iz + 1) / 2) * (ID - 1)\n\n ix = ix.view(-1)\n iy = iy.view(-1)\n iz = iz.view(-1)\n\n with torch.no_grad():\n shifts_x = get_shifts(ix).view(-1, 4) # (B*d*h*w,4)\n shifts_y = get_shifts(iy).view(-1, 4)\n shifts_z = get_shifts(iz).view(-1, 4)\n\n perm_weights = torch.ones([N * D * H * W, 4 * 4 * 4]).long().to(device)\n perm = torch.cumsum(perm_weights, dim=-1) - 1 # (B*d*h*w,64)\n\n perm_z = perm // 16 # [N*D*H*W, num]\n perm_y = (perm - perm_z * 16) // 4\n perm_x = (perm - perm_z * 16 - perm_y * 4)\n\n shifts_x = torch.gather(shifts_x, 1, perm_x) # [N*D*H*W, num]\n shifts_y = torch.gather(shifts_y, 1, perm_y)\n shifts_z = torch.gather(shifts_z, 1, perm_z)\n\n ix_target = (ix[:, None] + shifts_x).long() # [N*D*H*W, num]\n iy_target = (iy[:, None] + shifts_y).long()\n iz_target = (iz[:, None] + shifts_z).long()\n\n torch.clamp(ix_target, 0, IW - 1, out=ix_target)\n torch.clamp(iy_target, 0, IH - 1, out=iy_target)\n torch.clamp(iz_target, 0, ID - 1, out=iz_target)\n\n local_dist_x = ix - ix_target[:, 1] # ! attention here is [:, 1]\n local_dist_y = iy - iy_target[:, 1 + 4]\n local_dist_z = iz - iz_target[:, 1 + 16]\n\n local_dist_x = local_dist_x.view(N, 1, D * H * W).repeat(1, C, 1).view(-1)\n local_dist_y = local_dist_y.view(N, 1, D * H * W).repeat(1, C, 1).view(-1)\n local_dist_z = local_dist_z.view(N, 1, D * H * W).repeat(1, C, 1).view(-1)\n\n # ! attention: IW is correct\n idx_target = iz_target * ID ** 2 + iy_target * IW + ix_target # [N*D*H*W, num]\n\n volume = volume.view(N, C, ID * IH * IW)\n\n out = torch.gather(volume, 2,\n idx_target.view(N, 1, D * H * W * 64).repeat(1, C, 1))\n out = out.view(N * C * D * H * W, 4, 4, 4)\n\n # - tricubic_interpolate() is a bit faster than tricubic_interpolate_batch()\n final = tricubic_interpolate(out, local_dist_x, local_dist_y, local_dist_z).view(N, C, D, H, W) # [N,C,D,H,W]\n\n return final"
}
] | from dataclasses import dataclass, field
from threestudio.models.geometry.base import (
BaseGeometry,
BaseImplicitGeometry,
contract_to_unisphere,
)
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
from threestudio.models.geometry.grid_sampler import grid_sample_3d, tricubic_sample_3d
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio | 8,194 | if self.cfg.sdf_network_grad:
print("sdf_layers network is training")
else:
for p in self.sdf_layers.parameters():
p.requires_grad_(False)
print("sdf_layers network is freezeing")
# volume weight
volume_weight = torch.load(self.cfg.init_volume_path)
self.volume = nn.Parameter(volume_weight, requires_grad=True)
print("volume network is loading weight at " + self.cfg.init_volume_path)
def get_activated_density(
self, points: Float[Tensor, "*N Di"], density: Float[Tensor, "*N 1"]
) -> Tuple[Float[Tensor, "*N 1"], Float[Tensor, "*N 1"]]:
density_bias: Union[float, Float[Tensor, "*N 1"]]
if self.cfg.density_bias == "blob_dreamfusion":
# pre-activation density bias
density_bias = (
self.cfg.density_blob_scale
* torch.exp(
-0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2
)[..., None]
)
elif self.cfg.density_bias == "blob_magic3d":
# pre-activation density bias
density_bias = (
self.cfg.density_blob_scale
* (
1
- torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std
)[..., None]
)
elif isinstance(self.cfg.density_bias, float):
density_bias = self.cfg.density_bias
else:
raise ValueError(f"Unknown density bias {self.cfg.density_bias}")
raw_density: Float[Tensor, "*N 1"] = density + density_bias
density = get_activation(self.cfg.density_activation)(raw_density)
return raw_density, density
def forward(
self, points: Float[Tensor, "*N Di"], viewdirs, dists, output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
grad_enabled = torch.is_grad_enabled()
if output_normal and self.cfg.normal_type == "analytic":
torch.set_grad_enabled(True)
points.requires_grad_(True)
points_unscaled = points # points in the original scale
sdf, feature_vector = self.sdf(points.view(-1, self.cfg.n_input_dims))
output = {
"density": sdf,
}
g = self.gradient(points.view(-1, self.cfg.n_input_dims))
alphas = self.get_alpha(points.view(-1, self.cfg.n_input_dims), viewdirs, dists, feature_vector, sdf, g)
output.update({"ALPHA": alphas})
points_norm = contract_to_unisphere(
points, self.bbox, self.unbounded
) # points normalized to (0, 1)
enc = self.encoding(points_norm.view(-1, self.cfg.n_input_dims))
if self.cfg.n_feature_dims > 0:
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
output.update({"features": features})
torch.set_grad_enabled(grad_enabled)
return output
def forward_density(self, points: Float[Tensor, "*N Di"]) -> Float[Tensor, "*N 1"]:
points_unscaled = points
density, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))
density = density.reshape(*points.shape[:-1], 1)
return density
def forward_field(
self, points: Float[Tensor, "*N Di"]
) -> Tuple[Float[Tensor, "*N 1"], Optional[Float[Tensor, "*N 3"]]]:
sdf, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))
sdf = sdf.reshape(*points.shape[:-1], 1)
deformation: Optional[Float[Tensor, "*N 3"]] = None
return sdf, deformation
def forward_level(
self, field: Float[Tensor, "*N 1"], threshold: float
) -> Float[Tensor, "*N 1"]:
return field - threshold
def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str, Any]:
out: Dict[str, Any] = {}
if self.cfg.n_feature_dims == 0:
return out
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
out.update(
{
"features": features,
}
)
return out
@staticmethod
@torch.no_grad()
def create_from(
|
class Embedding(nn.Module):
def __init__(self, in_channels, N_freqs, logscale=True, normalize=False):
"""
Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)
in_channels: number of input channels (3 for both xyz and direction)
"""
super(Embedding, self).__init__()
self.N_freqs = N_freqs
self.in_channels = in_channels
self.funcs = [torch.sin, torch.cos]
self.out_channels = in_channels * (len(self.funcs) * N_freqs + 1)
self.normalize = normalize
if logscale:
self.freq_bands = 2 ** torch.linspace(0, N_freqs - 1, N_freqs)
else:
self.freq_bands = torch.linspace(1, 2 ** (N_freqs - 1), N_freqs)
def forward(self, x):
"""
Embeds x to (x, sin(2^k x), cos(2^k x), ...)
Different from the paper, "x" is also in the output
See https://github.com/bmild/nerf/issues/12
Inputs:
x: (B, self.in_channels)
Outputs:
out: (B, self.out_channels)
"""
out = [x]
for freq in self.freq_bands:
for func in self.funcs:
if self.normalize:
out += [func(freq * x) / freq]
else:
out += [func(freq * x)]
return torch.cat(out, -1)
@threestudio.register("geodream-geometry")
class GeodreamGeometryVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
init_volume_path: str = "con_volume_lod0.pth"
one2345_weight: str = "pretrain.pth"
sdf_network_grad: bool = False
cfg: Config
def configure(self) -> None:
super().configure()
self.encoding = get_encoding(
self.cfg.n_input_dims, self.cfg.pos_encoding_config
)
if self.cfg.n_feature_dims > 0:
self.feature_network = get_mlp(
self.encoding.n_output_dims,
self.cfg.n_feature_dims,
self.cfg.mlp_network_config,
)
self.sdf_layers = SdfLayer()
self.deviation_network = SingleVarianceNetwork(self.cfg.one2345_weight)
# sdf_layers weight
sdf_layers_weight = torch.load(self.cfg.one2345_weight)['sdf_network_lod0']
selected_state_dict = {}
prefix = 'sdf_layer'
for key, value in sdf_layers_weight.items():
if key.startswith(prefix):
selected_state_dict[key[10:]] = value# key need remove sdf_layer prefix
self.sdf_layers.load_state_dict(selected_state_dict)
print("sdf_layers is loading weight at " + self.cfg.one2345_weight)
# sdf_layers freeze
if self.cfg.sdf_network_grad:
print("sdf_layers network is training")
else:
for p in self.sdf_layers.parameters():
p.requires_grad_(False)
print("sdf_layers network is freezeing")
# volume weight
volume_weight = torch.load(self.cfg.init_volume_path)
self.volume = nn.Parameter(volume_weight, requires_grad=True)
print("volume network is loading weight at " + self.cfg.init_volume_path)
def get_activated_density(
self, points: Float[Tensor, "*N Di"], density: Float[Tensor, "*N 1"]
) -> Tuple[Float[Tensor, "*N 1"], Float[Tensor, "*N 1"]]:
density_bias: Union[float, Float[Tensor, "*N 1"]]
if self.cfg.density_bias == "blob_dreamfusion":
# pre-activation density bias
density_bias = (
self.cfg.density_blob_scale
* torch.exp(
-0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2
)[..., None]
)
elif self.cfg.density_bias == "blob_magic3d":
# pre-activation density bias
density_bias = (
self.cfg.density_blob_scale
* (
1
- torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std
)[..., None]
)
elif isinstance(self.cfg.density_bias, float):
density_bias = self.cfg.density_bias
else:
raise ValueError(f"Unknown density bias {self.cfg.density_bias}")
raw_density: Float[Tensor, "*N 1"] = density + density_bias
density = get_activation(self.cfg.density_activation)(raw_density)
return raw_density, density
def forward(
self, points: Float[Tensor, "*N Di"], viewdirs, dists, output_normal: bool = False
) -> Dict[str, Float[Tensor, "..."]]:
grad_enabled = torch.is_grad_enabled()
if output_normal and self.cfg.normal_type == "analytic":
torch.set_grad_enabled(True)
points.requires_grad_(True)
points_unscaled = points # points in the original scale
sdf, feature_vector = self.sdf(points.view(-1, self.cfg.n_input_dims))
output = {
"density": sdf,
}
g = self.gradient(points.view(-1, self.cfg.n_input_dims))
alphas = self.get_alpha(points.view(-1, self.cfg.n_input_dims), viewdirs, dists, feature_vector, sdf, g)
output.update({"ALPHA": alphas})
points_norm = contract_to_unisphere(
points, self.bbox, self.unbounded
) # points normalized to (0, 1)
enc = self.encoding(points_norm.view(-1, self.cfg.n_input_dims))
if self.cfg.n_feature_dims > 0:
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
output.update({"features": features})
torch.set_grad_enabled(grad_enabled)
return output
def forward_density(self, points: Float[Tensor, "*N Di"]) -> Float[Tensor, "*N 1"]:
points_unscaled = points
density, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))
density = density.reshape(*points.shape[:-1], 1)
return density
def forward_field(
self, points: Float[Tensor, "*N Di"]
) -> Tuple[Float[Tensor, "*N 1"], Optional[Float[Tensor, "*N 3"]]]:
sdf, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))
sdf = sdf.reshape(*points.shape[:-1], 1)
deformation: Optional[Float[Tensor, "*N 3"]] = None
return sdf, deformation
def forward_level(
self, field: Float[Tensor, "*N 1"], threshold: float
) -> Float[Tensor, "*N 1"]:
return field - threshold
def export(self, points: Float[Tensor, "*N Di"], **kwargs) -> Dict[str, Any]:
out: Dict[str, Any] = {}
if self.cfg.n_feature_dims == 0:
return out
points_unscaled = points
points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)
enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))
features = self.feature_network(enc).view(
*points.shape[:-1], self.cfg.n_feature_dims
)
out.update(
{
"features": features,
}
)
return out
@staticmethod
@torch.no_grad()
def create_from( | other: BaseGeometry, | 0 | 2023-12-01 01:59:42+00:00 | 12k |
dvlab-research/LLaMA-VID | llamavid/model/llamavid_arch.py | [
{
"identifier": "BertConfig",
"path": "llamavid/model/qformer.py",
"snippet": "class BertEmbeddings(nn.Module):\nclass BertSelfAttention(nn.Module):\nclass BertSelfOutput(nn.Module):\nclass BertAttention(nn.Module):\nclass BertIntermediate(nn.Module):\nclass BertOutput(nn.Module):\nclass BertLayer(nn.Module):\nclass BertEncoder(nn.Module):\nclass BertPooler(nn.Module):\nclass BertPredictionHeadTransform(nn.Module):\nclass BertLMPredictionHead(nn.Module):\nclass BertOnlyMLMHead(nn.Module):\nclass BertPreTrainedModel(PreTrainedModel):\nclass BertModel(BertPreTrainedModel):\nclass BertLMHeadModel(BertPreTrainedModel):\nclass BertForMaskedLM(BertPreTrainedModel):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n query_embeds=None,\n past_key_values_length=0,\n ):\n def __init__(self, config, is_cross_attention):\n def save_attn_gradients(self, attn_gradients):\n def get_attn_gradients(self):\n def save_attention_map(self, attention_map):\n def get_attention_map(self):\n def transpose_for_scores(self, x):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, is_cross_attention=False):\n def prune_heads(self, heads):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, layer_num):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n query_length=0,\n ):\n def feed_forward_chunk(self, attention_output):\n def feed_forward_chunk_query(self, attention_output):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n query_length=0,\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, sequence_output):\n def _init_weights(self, module):\n def __init__(self, config, add_pooling_layer=False):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prune_heads(self, heads_to_prune):\n def get_extended_attention_mask(\n self,\n attention_mask: Tensor,\n input_shape: Tuple[int],\n device: device,\n is_decoder: bool,\n has_query: bool = False,\n ) -> Tensor:\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n ):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=True,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False,\n is_decoder=True,\n reduction=\"mean\",\n ):\n def prepare_inputs_for_generation(\n self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs\n ):\n def _reorder_cache(self, past, beam_idx):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False,\n is_decoder=False,\n ):"
},
{
"identifier": "BertLMHeadModel",
"path": "llamavid/model/qformer.py",
"snippet": "class BertLMHeadModel(BertPreTrainedModel):\n\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.bert = BertModel(config, add_pooling_layer=False)\n self.cls = BertOnlyMLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=True,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False,\n is_decoder=True,\n reduction=\"mean\",\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n Returns:\n Example::\n >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig\n >>> import torch\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n >>> config = BertConfig.from_pretrained(\"bert-base-cased\")\n >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n if labels is not None:\n use_cache = False\n if past_key_values is not None:\n query_embeds = None\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n query_embeds=query_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n is_decoder=is_decoder,\n )\n\n sequence_output = outputs[0]\n if query_embeds is not None:\n sequence_output = outputs[0][:, query_embeds.shape[1] :, :]\n\n prediction_scores = self.cls(sequence_output)\n\n if return_logits:\n return prediction_scores[:, :-1, :].contiguous()\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)\n lm_loss = loss_fct(\n shifted_prediction_scores.view(-1, self.config.vocab_size),\n labels.view(-1),\n )\n if reduction == \"none\":\n lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs\n ):\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n query_mask = input_ids.new_ones(query_embeds.shape[:-1])\n attention_mask = torch.cat([query_mask, attention_mask], dim=-1)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\n \"input_ids\": input_ids,\n \"query_embeds\": query_embeds,\n \"attention_mask\": attention_mask,\n \"past_key_values\": past,\n \"encoder_hidden_states\": model_kwargs.get(\"encoder_hidden_states\", None),\n \"encoder_attention_mask\": model_kwargs.get(\"encoder_attention_mask\", None),\n \"is_decoder\": True,\n }\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (\n tuple(\n past_state.index_select(0, beam_idx) for past_state in layer_past\n ),\n )\n return reordered_past"
},
{
"identifier": "build_vision_tower",
"path": "llamavid/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n image_processor = getattr(vision_tower_cfg, 'image_processor', getattr(vision_tower_cfg, 'image_processor', \"./model_zoo/OpenAI/clip-vit-large-patch14\"))\n is_absolute_path_exists = os.path.exists(vision_tower)\n \n if not is_absolute_path_exists:\n raise ValueError(f'Not find vision tower: {vision_tower}')\n \n if \"openai\" in vision_tower.lower() or \"laion\" in vision_tower.lower():\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n elif \"lavis\" in vision_tower.lower() or \"eva\" in vision_tower.lower():\n return EVAVisionTowerLavis(vision_tower, image_processor, args=vision_tower_cfg, **kwargs)\n else:\n raise ValueError(f'Unknown vision tower: {vision_tower}')"
},
{
"identifier": "build_vision_projector",
"path": "llamavid/model/multimodal_projector/builder.py",
"snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')"
},
{
"identifier": "IGNORE_INDEX",
"path": "llamavid/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "llamavid/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_PATCH_TOKEN",
"path": "llamavid/constants.py",
"snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "llamavid/constants.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "llamavid/constants.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
}
] | from abc import ABC, abstractmethod
from transformers import BertTokenizer
from transformers.models.bert.modeling_bert import BertLMHeadModel as BertLMHeadModelRaw
from .qformer import BertConfig
from .qformer import BertLMHeadModel as BertLMHeadModelQF
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llamavid.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import os
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F | 7,361 | continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
if not long_video:
token_idx = 0
while image_token_indices.numel() > 0:
if isinstance(image_features, list):
cur_image_features = image_features[cur_image_idx][token_idx]
else:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
token_idx += 1
# changle image idx after processing one sample
cur_image_idx += 1
if cur_input_ids.numel() > 0:
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
else:
cur_new_input_embeds = torch.Tensor(len(cur_input_ids), self.config.hidden_size).to(dtype=self.dtype, device=self.device)
text_token_indices = torch.where(cur_input_ids != IMAGE_TOKEN_INDEX)[0]
if not self.training and self.get_model().embed_tokens.weight.device != cur_input_ids.device:
model_device = self.get_model().embed_tokens.weight.device
data_device = cur_input_ids.device
cur_input_ids_text = cur_input_ids[text_token_indices].to(device=model_device)
cur_new_input_embeds[text_token_indices] = self.get_model().embed_tokens(cur_input_ids_text).to(device=data_device)
else:
cur_new_input_embeds[text_token_indices] = self.get_model().embed_tokens(cur_input_ids[text_token_indices])
cur_image_features = image_features[cur_image_idx]
cur_new_input_embeds[image_token_indices] = cur_image_features
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
new_labels.append(cur_labels)
cur_image_idx += 1
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
# only used for right padding in tokenlizer
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
# only used for right padding in tokenlizer
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if model_args.mm_use_im_start_end:
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# Modified from LLaVA (https://github.com/haotian-liu/LLaVA)
# Copyright 2023 Yanwei Li
# ------------------------------------------------------------------------
class LLaMAVIDMetaModel:
def __init__(self, config):
super(LLaMAVIDMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None, max_token=2048):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
self.config.image_processor = getattr(model_args, 'image_processor', None)
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
self.config.max_token = max_token
if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
p.requires_grad = True
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
def initialize_attention_modules(self, model_args, for_eval=False):
pretrain_mm_mlp_adapter = getattr(model_args, "pretrain_mm_mlp_adapter", None)
pretrain_qformer = getattr(model_args, "pretrain_qformer", None)
self.config.bert_type = getattr(model_args, "bert_type", "qformer")
self.config.num_query = getattr(model_args, "num_query", 32)
self.config.compress_type = getattr(model_args, "compress_type", None)
if 'pretrain' in self.config.bert_type:
# for qformer that use evaclip for prtrain
att_feat_size = 1408
else:
att_feat_size = self.config.mm_hidden_size
self.vlm_att_tokenlizer, self.vlm_att_encoder, self.vlm_att_query = self.init_bert(att_feat_size, truncation_side="left")
self.vlm_att_projector = torch.nn.Linear(self.vlm_att_encoder.config.hidden_size, self.config.mm_hidden_size)
self.vlm_att_key_projector = torch.nn.Linear(self.config.mm_hidden_size, self.config.mm_hidden_size)
self.vlm_att_val_projector = torch.nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)
if "raw" in self.config.bert_type:
self.vlm_att_bert_proj = torch.nn.Linear(att_feat_size, self.vlm_att_encoder.config.hidden_size)
elif "pretrain" in self.config.bert_type and self.config.mm_hidden_size!=att_feat_size:
self.vlm_att_bert_proj = torch.nn.Linear(self.config.mm_hidden_size, att_feat_size)
else:
self.vlm_att_bert_proj = None
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
if 'qformer_pretrain' in self.config.bert_type:
self.vlm_att_ln = torch.nn.LayerNorm(att_feat_size)
if pretrain_qformer is not None:
print("Loading pretrained qformer weights...")
qformer_weight = torch.load(pretrain_qformer, map_location='cpu')['model']
bert_weight = {_key: qformer_weight[_key] for _key in qformer_weight if 'bert' in _key}
self.vlm_att_encoder.load_state_dict(get_w(bert_weight, 'Qformer'))
self.vlm_att_ln.load_state_dict(get_w(qformer_weight, 'ln_vision'))
self.vlm_att_query.data = qformer_weight['query_tokens']
if 'freeze_all' in self.config.bert_type:
print("Freezing all qformer weights...")
self.vlm_att_encoder.requires_grad_(False)
self.vlm_att_ln.requires_grad_(False)
self.vlm_att_query.requires_grad_(False)
self.vlm_att_projector.requires_grad_(False)
self.vlm_att_key_projector.requires_grad_(False)
self.vlm_att_val_projector.requires_grad_(False)
elif 'freeze' in self.config.bert_type:
print("Freezing pretrained qformer weights...")
self.vlm_att_encoder.requires_grad_(False)
self.vlm_att_ln.requires_grad_(False)
self.vlm_att_query.requires_grad_(False)
if pretrain_mm_mlp_adapter is not None:
att_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
else:
trainable_module = ['vlm_att_encoder', 'vlm_att_projector', 'vlm_att_key_projector',
'vlm_att_val_projector', 'vlm_att_query', 'vlm_att_visual_proj',
'vlm_att_ln']
if hasattr(model_args, 'model_name_or_path'):
model_save_path = model_args.model_name_or_path
else:
model_save_path = model_args.model_path
model_idx_path = getattr(model_args, 'model_path', model_save_path)
weight_file = json.load(open(os.path.join(model_idx_path, 'pytorch_model.bin.index.json'), 'r'))['weight_map']
model_path = set([weight_file[_key] for _key in weight_file if any([_module in _key for _module in trainable_module])])
att_projector_weights = {}
for _model in model_path:
att_projector_weights.update(torch.load(os.path.join(model_idx_path, _model), map_location='cpu'))
if len(att_projector_weights) == 0:
return
bert_dict = get_w(att_projector_weights, 'vlm_att_encoder')
if "bert.embeddings.position_ids" not in bert_dict and "raw_bert" not in self.config.bert_type:
bert_dict["bert.embeddings.position_ids"] = self.vlm_att_encoder.bert.embeddings.position_ids
print('Loading pretrained weights...')
self.vlm_att_encoder.load_state_dict(bert_dict)
self.vlm_att_projector.load_state_dict(get_w(att_projector_weights, 'vlm_att_projector'))
self.vlm_att_key_projector.load_state_dict(get_w(att_projector_weights, 'vlm_att_key_projector'))
self.vlm_att_val_projector.load_state_dict(get_w(att_projector_weights, 'vlm_att_val_projector'))
if "qformer" in self.config.bert_type:
print('Loading vlm_att_query weights...')
self.vlm_att_query.data = att_projector_weights['model.vlm_att_query']
if "pretrain" in self.config.bert_type:
print('Loading vlm_att_ln weights...')
self.vlm_att_ln.load_state_dict(get_w(att_projector_weights, 'vlm_att_ln'))
if self.vlm_att_bert_proj is not None:
print('Loading vlm_att_bert_proj weights...')
self.vlm_att_bert_proj.load_state_dict(get_w(att_projector_weights, 'vlm_att_bert_proj'))
if for_eval:
weight_type = torch.float16
device_type = self.mm_projector[0].weight.device
self.vlm_att_encoder = self.vlm_att_encoder.to(device=device_type, dtype=weight_type)
self.vlm_att_projector = self.vlm_att_projector.to(device=device_type, dtype=weight_type)
self.vlm_att_key_projector = self.vlm_att_key_projector.to(device=device_type, dtype=weight_type)
self.vlm_att_val_projector = self.vlm_att_val_projector.to(device=device_type, dtype=weight_type)
if "qformer" in self.config.bert_type:
self.vlm_att_query.data = self.vlm_att_query.data.to(device=device_type, dtype=weight_type)
if "pretrain" in self.config.bert_type:
self.vlm_att_ln = self.vlm_att_ln.to(device=device_type, dtype=weight_type)
if self.vlm_att_bert_proj is not None:
self.vlm_att_bert_proj = self.vlm_att_bert_proj.to(device=device_type, dtype=weight_type)
def init_bert(self, vision_width, cross_attention_freq=2, truncation_side="right"):
# initialize BERT tokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", truncation_side=truncation_side)
tokenizer.add_special_tokens({"bos_token": "[DEC]"})
# initialize BERT
encoder_config = BertConfig.from_pretrained("bert-base-uncased")
encoder_config.encoder_width = vision_width
# insert cross-attention layer every other block
encoder_config.add_cross_attention = True
encoder_config.cross_attention_freq = cross_attention_freq
query_tokens = None
if "qformer" in self.config.bert_type:
mm_model = BertLMHeadModelQF.from_pretrained(
"bert-base-uncased", config=encoder_config
)
query_tokens = nn.Parameter(
torch.zeros(1, self.config.num_query, encoder_config.hidden_size)
)
query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
elif "raw" in self.config.bert_type:
encoder_config.is_decoder = True
mm_model = BertLMHeadModelRaw.from_pretrained(
"bert-base-uncased", config=encoder_config
)
else:
raise NotImplementedError("BERT type not implemented...")
mm_model.resize_token_embeddings(len(tokenizer))
mm_model.cls = None
if "layer" in self.config.bert_type:
layer_num = int(self.config.bert_type.split(':')[-1])
mm_model.bert.encoder.layer = mm_model.bert.encoder.layer[:layer_num]
print(f"Only use {layer_num} layers in BERT...")
return tokenizer, mm_model, query_tokens
class LLaMAVIDMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images, prompts=None, image_counts=None, long_video=False):
if long_video:
# use pre-computed features
image_features = images
else:
image_features = self.get_model().get_vision_tower()(images)
image_features = self.vlm_attention(image_features,
prompts=prompts,
image_counts=image_counts,
long_video=long_video)
return image_features
def vlm_attention(self, image_features, prompts=None, image_counts=None, long_video=False):
img_feat_lst = []
if image_counts is None:
assert len(image_features) == len(prompts), f"Size mismatch! image_features: {len(image_features)}, prompts: {len(prompts)}"
else:
assert len(prompts) == len(image_counts), f"Size mismatch! prompts: {len(prompts)}, image_counts: {len(image_counts)}"
image_atts = torch.ones(image_features.size()[:-1], dtype=torch.long).to(image_features.device)
total_count = 0
# calculate each image feat according to the prompt
for _idx in range(len(prompts)):
assert isinstance(prompts[_idx], list), f"Prompt should be a list, but got {type(prompts[_idx])}"
input_token = self.get_model().vlm_att_tokenlizer(
prompts[_idx],
padding='longest',
truncation=True,
max_length=256,
return_tensors="pt"
).to(image_features.device)
input_ids = input_token.input_ids
attention_masks = input_token.attention_mask
if image_counts is None:
img_feat_prompt = image_features[_idx, None].expand(len(prompts[_idx]), -1, -1)
img_att_prompt = image_atts[_idx, None].expand(len(prompts[_idx]), -1)
else:
# shape: [prompt_num*frame_num, image_shape, feat_dim]
img_feat_prompt = image_features[total_count:total_count+image_counts[_idx]]
img_feat_prompt = img_feat_prompt[None].expand(len(prompts[_idx]), -1, -1, -1).flatten(0,1)
img_att_prompt = image_atts[total_count:total_count+image_counts[_idx]]
img_att_prompt = img_att_prompt[None].expand(len(prompts[_idx]), -1, -1).flatten(0,1)
input_ids = input_ids[:,None].expand(-1, image_counts[_idx], -1).flatten(0,1)
attention_masks = attention_masks[:,None].expand(-1, image_counts[_idx], -1).flatten(0,1)
total_count += image_counts[_idx]
if "pretrain" in self.config.bert_type and self.get_model().vlm_att_bert_proj is not None:
bert_feat = self.get_model().vlm_att_bert_proj(img_feat_prompt)
else:
bert_feat = img_feat_prompt.clone()
# remove cls embedding
if self.config.mm_vision_select_feature == 'patch':
if img_feat_prompt.shape[1]%2 == 1:
img_feat_prompt = img_feat_prompt[:, 1:]
if "qformer" in self.config.bert_type:
query_tokens = self.get_model().vlm_att_query.expand(bert_feat.shape[0], -1, -1)
query_atts = torch.cat([torch.ones(query_tokens.size()[:-1], dtype=torch.long).to(bert_feat.device),
attention_masks],dim=1)
if 'pretrain' in self.config.bert_type:
mm_img_in = self.get_model().vlm_att_ln(bert_feat)
else:
mm_img_in = bert_feat
if long_video:
outputs = []
block_size = 64
for L in range(0, len(input_ids), block_size):
R = L + block_size
mm_output = self.get_model().vlm_att_encoder.bert(
input_ids[L:R],
query_embeds=query_tokens[L:R],
attention_mask=query_atts[L:R],
encoder_hidden_states=mm_img_in[L:R],
encoder_attention_mask=img_att_prompt[L:R],
return_dict=True,
)
mm_output = mm_output.last_hidden_state[:,:query_tokens.shape[1]]
outputs.append(mm_output)
mm_output = torch.cat(outputs)
torch.cuda.empty_cache()
else:
mm_output = self.get_model().vlm_att_encoder.bert(
input_ids,
query_embeds=query_tokens,
attention_mask=query_atts,
encoder_hidden_states=mm_img_in,
encoder_attention_mask=img_att_prompt,
return_dict=True,
)
mm_output = mm_output.last_hidden_state[:,:query_tokens.shape[1]]
elif "raw" in self.config.bert_type:
if self.config.mm_vision_select_feature == 'patch' and bert_feat.shape[1]%2 == 1:
bert_feat = bert_feat[:, 1:]
img_att_prompt = img_att_prompt[:, 1:]
mm_output = self.get_model().vlm_att_encoder.bert(
input_ids,
attention_mask=attention_masks,
encoder_hidden_states=self.get_model().vlm_att_bert_proj(bert_feat),
encoder_attention_mask=img_att_prompt,
return_dict=True,
)
mm_output = mm_output.last_hidden_state
else:
raise ValueError(f'Unexpected bert type: {self.config.bert_type}')
text_q = self.get_model().vlm_att_projector(mm_output)
final_token = self.token_generation(text_q, img_feat_prompt, long_video=long_video)
if image_counts is not None:
# shape: [prompt_num, frame_num*image_shape, feat_dim]
final_token = final_token.reshape(len(prompts[_idx]), image_counts[_idx], *final_token.shape[-2:])
final_token = final_token.flatten(1,2)
img_feat_lst.append(final_token)
return img_feat_lst
def token_generation(self, text_q, vis_embed, long_video=False):
ctx_embed = self.get_model().vlm_att_key_projector(vis_embed)
# Key part 1: calculate context-related embedding
ctx_embed = text_q @ ctx_embed.transpose(-1,-2)
ctx_embed = ctx_embed / (vis_embed.shape[-1] ** 0.5)
if not long_video:
ctx_embed = (ctx_embed.softmax(-1) @ vis_embed).mean(1)
else:
block_size = 64
outputs = []
ctx_score = ctx_embed.softmax(-1)
for L in range(0, len(ctx_score), block_size):
R = L + block_size
sub_embed = (ctx_score[L:R] @ vis_embed[L:R]).mean(1)
outputs.append(sub_embed)
ctx_embed = torch.cat(outputs)
torch.cuda.empty_cache()
ctx_embed = self.get_model().vlm_att_val_projector(ctx_embed[:,None])
# Key part 2: calculate visual embedding
if self.config.compress_type is not None:
if 'grid' in self.config.compress_type:
grid_size = int(self.config.compress_type.split('grid:')[-1])
cur_shape = int(vis_embed.shape[1]**0.5)
assert grid_size > 1, f'Grid size should be larger than 1, but got {grid_size}'
vis_embed = vis_embed.reshape(vis_embed.shape[0], cur_shape, cur_shape, -1)
grid_stride = cur_shape // grid_size
vis_embed = F.avg_pool2d(vis_embed.permute(0, 3, 1, 2),
padding=0,
kernel_size=grid_stride,
stride=grid_stride)
vis_embed = vis_embed.permute(0, 2, 3, 1).flatten(1,2)
elif 'mean' in self.config.compress_type:
vis_embed = vis_embed.mean(dim=1, keepdim=True)
# concat token in shape (B, n+1, C)
vis_embed = self.get_model().mm_projector(vis_embed)
final_token = torch.cat([ctx_embed, vis_embed], dim=1)
return final_token
def update_prompt(self, prompts=None):
self.prompts = prompts
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images, prompts=None
):
if prompts is None and hasattr(self, 'prompts'):
prompts = self.prompts
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
# pre-process images for long video
if images[0].shape[-1] > 1000:
long_video = True
else:
long_video = False
if type(images) is list or images.ndim == 5:
# not reseshape for long video
if not long_video:
images = [image if len(image.shape) == 4 else image.unsqueeze(0) for image in images]
image_counts = [image.shape[0] for image in images]
concat_images = torch.cat(images, dim=0)
image_features = self.encode_images(concat_images, prompts, image_counts, long_video=long_video)
else:
image_features = self.encode_images(images, prompts, long_video=long_video)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
if isinstance(image_features, list):
cur_image_features = image_features[cur_image_idx][0]
else:
cur_image_features = image_features[cur_image_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
if not long_video:
token_idx = 0
while image_token_indices.numel() > 0:
if isinstance(image_features, list):
cur_image_features = image_features[cur_image_idx][token_idx]
else:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
token_idx += 1
# changle image idx after processing one sample
cur_image_idx += 1
if cur_input_ids.numel() > 0:
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
else:
cur_new_input_embeds = torch.Tensor(len(cur_input_ids), self.config.hidden_size).to(dtype=self.dtype, device=self.device)
text_token_indices = torch.where(cur_input_ids != IMAGE_TOKEN_INDEX)[0]
if not self.training and self.get_model().embed_tokens.weight.device != cur_input_ids.device:
model_device = self.get_model().embed_tokens.weight.device
data_device = cur_input_ids.device
cur_input_ids_text = cur_input_ids[text_token_indices].to(device=model_device)
cur_new_input_embeds[text_token_indices] = self.get_model().embed_tokens(cur_input_ids_text).to(device=data_device)
else:
cur_new_input_embeds[text_token_indices] = self.get_model().embed_tokens(cur_input_ids[text_token_indices])
cur_image_features = image_features[cur_image_idx]
cur_new_input_embeds[image_token_indices] = cur_image_features
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
new_labels.append(cur_labels)
cur_image_idx += 1
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
# only used for right padding in tokenlizer
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
# only used for right padding in tokenlizer
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if model_args.mm_use_im_start_end: | num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) | 8 | 2023-11-28 09:45:37+00:00 | 12k |
horseee/DeepCache | DeepCache/sdxl/unet_2d_condition.py | [
{
"identifier": "UNetMidBlock2DCrossAttn",
"path": "DeepCache/sdxl/unet_2d_blocks.py",
"snippet": "class UNetMidBlock2DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n attention_type=\"default\",\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n\n for _ in range(num_layers):\n if not dual_cross_attention:\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n attention_type=attention_type,\n )\n )\n else:\n attentions.append(\n DualTransformer2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0) if cross_attention_kwargs is not None else 1.0\n hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n else:\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = resnet(hidden_states, temb, scale=lora_scale)\n\n return hidden_states"
},
{
"identifier": "UNetMidBlock2DSimpleCrossAttn",
"path": "DeepCache/sdxl/unet_2d_blocks.py",
"snippet": "class UNetMidBlock2DSimpleCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attention_head_dim=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n skip_time_act=False,\n only_cross_attention=False,\n cross_attention_norm=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n\n self.attention_head_dim = attention_head_dim\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n self.num_heads = in_channels // self.attention_head_dim\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n ]\n attentions = []\n\n for _ in range(num_layers):\n processor = (\n AttnAddedKVProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") else AttnAddedKVProcessor()\n )\n\n attentions.append(\n Attention(\n query_dim=in_channels,\n cross_attention_dim=in_channels,\n heads=self.num_heads,\n dim_head=self.attention_head_dim,\n added_kv_proj_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n bias=True,\n upcast_softmax=True,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n processor=processor,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0)\n\n if attention_mask is None:\n # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.\n mask = None if encoder_hidden_states is None else encoder_attention_mask\n else:\n # when attention_mask is defined: we don't even check for encoder_attention_mask.\n # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.\n # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.\n # then we can simplify this whole if/else block to:\n # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask\n mask = attention_mask\n\n hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n # attn\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=mask,\n **cross_attention_kwargs,\n )\n\n # resnet\n hidden_states = resnet(hidden_states, temb, scale=lora_scale)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "DeepCache/sdxl/unet_2d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n attention_type=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n dropout=0.0,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock2D\":\n return DownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"ResnetDownsampleBlock2D\":\n return ResnetDownsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif down_block_type == \"AttnDownBlock2D\":\n if add_downsample is False:\n downsample_type = None\n else:\n downsample_type = downsample_type or \"conv\" # default to 'conv'\n return AttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n downsample_type=downsample_type,\n )\n elif down_block_type == \"CrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock2D\")\n return CrossAttnDownBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D\")\n return SimpleCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"SkipDownBlock2D\":\n return SkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnSkipDownBlock2D\":\n return AttnSkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"DownEncoderBlock2D\":\n return DownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnDownEncoderBlock2D\":\n return AttnDownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"KDownBlock2D\":\n return KDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif down_block_type == \"KCrossAttnDownBlock2D\":\n return KCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n add_self_attention=True if not add_downsample else False,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "DeepCache/sdxl/unet_2d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n attention_type=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n dropout=0.0,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock2D\":\n return UpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"ResnetUpsampleBlock2D\":\n return ResnetUpsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif up_block_type == \"CrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock2D\")\n return CrossAttnUpBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n )\n elif up_block_type == \"SimpleCrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D\")\n return SimpleCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"AttnUpBlock2D\":\n if add_upsample is False:\n upsample_type = None\n else:\n upsample_type = upsample_type or \"conv\" # default to 'conv'\n\n return AttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n dropout=dropout,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n upsample_type=upsample_type,\n )\n elif up_block_type == \"SkipUpBlock2D\":\n return SkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"AttnSkipUpBlock2D\":\n return AttnSkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"UpDecoderBlock2D\":\n return UpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"AttnUpDecoderBlock2D\":\n return AttnUpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"KUpBlock2D\":\n return KUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif up_block_type == \"KCrossAttnUpBlock2D\":\n return KCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n )\n\n raise ValueError(f\"{up_block_type} does not exist.\")"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import UNet2DConditionLoadersMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.activations import get_activation
from diffusers.models.attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from diffusers.models.embeddings import (
GaussianFourierProjection,
ImageHintTimeEmbedding,
ImageProjection,
ImageTimeEmbedding,
PositionNet,
TextImageProjection,
TextImageTimeEmbedding,
TextTimeEmbedding,
TimestepEmbedding,
Timesteps,
)
from diffusers.models.modeling_utils import ModelMixin
from .unet_2d_blocks import (
UNetMidBlock2DCrossAttn,
UNetMidBlock2DSimpleCrossAttn,
get_down_block,
get_up_block,
)
import torch
import torch.nn as nn
import torch.utils.checkpoint
import time | 9,700 | else:
text_time_embedding_from_dim = cross_attention_dim
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type == "text_image":
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
self.add_embedding = TextImageTimeEmbedding(
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
)
elif addition_embed_type == "text_time":
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif addition_embed_type == "image":
# Kandinsky 2.2
self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet
self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type is not None:
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
if time_embedding_act_fn is None:
self.time_embed_act = None
else:
self.time_embed_act = get_activation(time_embedding_act_fn)
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = only_cross_attention
only_cross_attention = [only_cross_attention] * len(down_block_types)
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = False
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(cross_attention_dim, int):
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
if isinstance(layers_per_block, int):
layers_per_block = [layers_per_block] * len(down_block_types)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block[i],
transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=blocks_time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim[i],
num_attention_heads=num_attention_heads[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock2DCrossAttn":
self.mid_block = UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block[-1],
in_channels=block_out_channels[-1],
temb_channels=blocks_time_embed_dim,
dropout=dropout,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim[-1],
num_attention_heads=num_attention_heads[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
attention_type=attention_type,
)
elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn":
| # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet2DConditionOutput(BaseOutput):
"""
The output of [`UNet2DConditionModel`].
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor = None
class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
r"""
A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
Whether to flip the sin to cos in the time embedding.
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or
`UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
The tuple of upsample blocks to use.
only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
Whether to include self-attention in the basic transformer blocks, see
[`~models.attention.BasicTransformerBlock`].
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, normalization and activation layers is skipped in post-processing.
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
The dimension of the cross attention features.
transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
encoder_hid_dim (`int`, *optional*, defaults to None):
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
dimension to `cross_attention_dim`.
encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
num_attention_heads (`int`, *optional*):
The number of attention heads. If not defined, defaults to `attention_head_dim`
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
class_embed_type (`str`, *optional*, defaults to `None`):
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
addition_embed_type (`str`, *optional*, defaults to `None`):
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
"text". "text" will use the `TextTimeEmbedding` layer.
addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
Dimension for the timestep embeddings.
num_class_embeds (`int`, *optional*, defaults to `None`):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
class conditioning with `class_embed_type` equal to `None`.
time_embedding_type (`str`, *optional*, defaults to `positional`):
The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
time_embedding_dim (`int`, *optional*, defaults to `None`):
An optional override for the dimension of the projected time embedding.
time_embedding_act_fn (`str`, *optional*, defaults to `None`):
Optional activation function to use only once on the time embeddings before they are passed to the rest of
the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
timestep_post_act (`str`, *optional*, defaults to `None`):
The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
time_cond_proj_dim (`int`, *optional*, defaults to `None`):
The dimension of `cond_proj` layer in the timestep embedding.
conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
`class_embed_type="projection"`. Required when `class_embed_type="projection"`.
class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
embeddings with the class embeddings.
mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
`only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
`only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
otherwise.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: Union[int, Tuple[int]] = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
dropout: float = 0.0,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: Union[int, Tuple[int]] = 1280,
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: int = 1.0,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
projection_class_embeddings_input_dim: Optional[int] = None,
attention_type: str = "default",
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads=64,
):
super().__init__()
self.sample_size = sample_size
if num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
)
if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
)
# input
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
if time_embedding_type == "fourier":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
if time_embed_dim % 2 != 0:
raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
self.time_proj = GaussianFourierProjection(
time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
)
timestep_input_dim = time_embed_dim
elif time_embedding_type == "positional":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
else:
raise ValueError(
f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
if encoder_hid_dim_type is None and encoder_hid_dim is not None:
encoder_hid_dim_type = "text_proj"
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
if encoder_hid_dim is None and encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
)
if encoder_hid_dim_type == "text_proj":
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
elif encoder_hid_dim_type == "text_image_proj":
# image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
self.encoder_hid_proj = TextImageProjection(
text_embed_dim=encoder_hid_dim,
image_embed_dim=cross_attention_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2
self.encoder_hid_proj = ImageProjection(
image_embed_dim=encoder_hid_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type is not None:
raise ValueError(
f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
)
else:
self.encoder_hid_proj = None
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
elif class_embed_type == "projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
)
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
# 2. it projects from an arbitrary input dimension.
#
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif class_embed_type == "simple_projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
)
self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
else:
self.class_embedding = None
if addition_embed_type == "text":
if encoder_hid_dim is not None:
text_time_embedding_from_dim = encoder_hid_dim
else:
text_time_embedding_from_dim = cross_attention_dim
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type == "text_image":
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
self.add_embedding = TextImageTimeEmbedding(
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
)
elif addition_embed_type == "text_time":
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif addition_embed_type == "image":
# Kandinsky 2.2
self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet
self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type is not None:
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
if time_embedding_act_fn is None:
self.time_embed_act = None
else:
self.time_embed_act = get_activation(time_embedding_act_fn)
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = only_cross_attention
only_cross_attention = [only_cross_attention] * len(down_block_types)
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = False
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(cross_attention_dim, int):
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
if isinstance(layers_per_block, int):
layers_per_block = [layers_per_block] * len(down_block_types)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block[i],
transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=blocks_time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim[i],
num_attention_heads=num_attention_heads[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock2DCrossAttn":
self.mid_block = UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block[-1],
in_channels=block_out_channels[-1],
temb_channels=blocks_time_embed_dim,
dropout=dropout,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim[-1],
num_attention_heads=num_attention_heads[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
attention_type=attention_type,
)
elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": | self.mid_block = UNetMidBlock2DSimpleCrossAttn( | 1 | 2023-12-01 10:54:04+00:00 | 12k |
alvinliu0/HumanGaussian | threestudio/systems/base.py | [
{
"identifier": "Exporter",
"path": "threestudio/models/exporters/base.py",
"snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError"
},
{
"identifier": "ExporterOutput",
"path": "threestudio/models/exporters/base.py",
"snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]"
},
{
"identifier": "parse_optimizer",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler"
},
{
"identifier": "Updateable",
"path": "threestudio/utils/base.py",
"snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass"
},
{
"identifier": "update_if_possible",
"path": "threestudio/utils/base.py",
"snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)"
},
{
"identifier": "parse_structured",
"path": "threestudio/utils/config.py",
"snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "cleanup",
"path": "threestudio/utils/misc.py",
"snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()"
},
{
"identifier": "get_device",
"path": "threestudio/utils/misc.py",
"snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")"
},
{
"identifier": "load_module_weights",
"path": "threestudio/utils/misc.py",
"snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]"
},
{
"identifier": "SaverMixin",
"path": "threestudio/utils/saving.py",
"snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n self._wandb_logger = WandbLogger(\n project=cfg_loggers.wandb.project, name=cfg_loggers.wandb.name\n )\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_rgb_image(save_path, img, data_format, data_range, name, step)\n return save_path\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ) -> str:\n save_path = self.get_save_path(filename)\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(save_path, img)\n return save_path\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap == \"jet\":\n img = (img * 255.0).astype(np.uint8)\n img = cv2.applyColorMap(img, cv2.COLORMAP_JET)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_grayscale_image(\n self,\n filename,\n img,\n data_range,\n cmap,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n save_path = self.get_save_path(filename)\n self._save_grayscale_image(save_path, img, data_range, cmap, name, step)\n return save_path\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n texts: Optional[List[float]] = None,\n ):\n save_path = self.get_save_path(filename)\n img = self.get_image_grid_(imgs, align=align)\n\n if texts is not None:\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n black, white = (0, 0, 0), (255, 255, 255)\n for i, text in enumerate(texts):\n draw.text((2, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i + 1), f\"{text}\", white)\n draw.text((2, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((0, (img.size[1] // len(texts)) * i - 1), f\"{text}\", white)\n draw.text((1, (img.size[1] // len(texts)) * i), f\"{text}\", black)\n img = np.asarray(img)\n\n cv2.imwrite(save_path, img)\n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(save_path), \"trainer/global_step\": step})\n return save_path\n\n def save_image(self, filename, img) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(save_path, img)\n return save_path\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False) -> str:\n save_path = self.get_save_path(filename)\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(save_path, imgs_full)\n return save_path\n\n def save_data(self, filename, data) -> str:\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n save_path = self.get_save_path(filename)\n np.savez(save_path, **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n save_path = self.get_save_path(filename)\n np.save(save_path, data)\n return save_path\n\n def save_state_dict(self, filename, data) -> str:\n save_path = self.get_save_path(filename)\n torch.save(data, save_path)\n return save_path\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ) -> str:\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n save_path = self.get_save_path(filename)\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps, palettesize=256)\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(save_path, imgs, fps=fps)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(save_path, format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n return save_path\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None) -> str:\n save_path = self.get_save_path(filename)\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(save_path)\n return save_path\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Pm: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_Pr: Optional[Float[Tensor, \"H W 1\"]] = None,\n map_format: str = \"jpg\",\n ) -> List[str]:\n save_paths: List[str] = []\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n mtl_save_paths = self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_Pm=self.convert_data(map_Pm),\n map_Pr=self.convert_data(map_Pr),\n map_format=map_format,\n )\n save_paths += mtl_save_paths\n obj_save_path = self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n save_paths.append(obj_save_path)\n return save_paths\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ) -> str:\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(obj_str)\n return save_path\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_Pm=None,\n map_Pr=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ) -> List[str]:\n mtl_save_path = self.get_save_path(filename)\n save_paths = [mtl_save_path]\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n if map_Kd is not None:\n map_Kd_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n )\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n map_Kd_save_path,\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n save_paths.append(map_Kd_save_path)\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n map_Ks_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n )\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n map_Ks_save_path,\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n save_paths.append(map_Ks_save_path)\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n map_Bump_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n )\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n map_Bump_save_path,\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n save_paths.append(map_Bump_save_path)\n if map_Pm is not None:\n map_Pm_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_metallic.{map_format}\"\n )\n mtl_str += f\"map_Pm texture_metallic.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pm_save_path,\n map_Pm,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_refl\",\n step=step,\n )\n save_paths.append(map_Pm_save_path)\n if map_Pr is not None:\n map_Pr_save_path = os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_roughness.{map_format}\"\n )\n mtl_str += f\"map_Pr texture_roughness.{map_format}\\n\"\n self._save_grayscale_image(\n map_Pr_save_path,\n map_Pr,\n data_range=(0, 1),\n cmap=None,\n name=f\"{matname}_Ns\",\n step=step,\n )\n save_paths.append(map_Pr_save_path)\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n return save_paths\n\n def save_file(self, filename, src_path) -> str:\n save_path = self.get_save_path(filename)\n shutil.copyfile(src_path, save_path)\n return save_path\n\n def save_json(self, filename, payload) -> str:\n save_path = self.get_save_path(filename)\n with open(save_path, \"w\") as f:\n f.write(json.dumps(payload))\n return save_path"
}
] | import os
import pytorch_lightning as pl
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import Updateable, update_if_possible
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
from threestudio.utils.config import load_config, parse_structured | 8,566 |
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
|
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): | state_dict, epoch, global_step = load_module_weights( | 10 | 2023-11-27 02:39:39+00:00 | 12k |
ShunyuanZheng/GPS-Gaussian | train_stage2.py | [
{
"identifier": "StereoHumanDataset",
"path": "lib/human_loader.py",
"snippet": "class StereoHumanDataset(Dataset):\n def __init__(self, opt, phase='train'):\n self.opt = opt\n self.use_processed_data = opt.use_processed_data\n self.phase = phase\n if self.phase == 'train':\n self.data_root = os.path.join(opt.data_root, 'train')\n elif self.phase == 'val':\n self.data_root = os.path.join(opt.data_root, 'val')\n elif self.phase == 'test':\n self.data_root = opt.test_data_root\n\n self.img_path = os.path.join(self.data_root, 'img/%s/%d.jpg')\n self.img_hr_path = os.path.join(self.data_root, 'img/%s/%d_hr.jpg')\n self.mask_path = os.path.join(self.data_root, 'mask/%s/%d.png')\n self.depth_path = os.path.join(self.data_root, 'depth/%s/%d.png')\n self.intr_path = os.path.join(self.data_root, 'parm/%s/%d_intrinsic.npy')\n self.extr_path = os.path.join(self.data_root, 'parm/%s/%d_extrinsic.npy')\n self.sample_list = sorted(list(os.listdir(os.path.join(self.data_root, 'img'))))\n\n if self.use_processed_data:\n self.local_data_root = os.path.join(opt.data_root, 'rectified_local', self.phase)\n self.local_img_path = os.path.join(self.local_data_root, 'img/%s/%d.jpg')\n self.local_mask_path = os.path.join(self.local_data_root, 'mask/%s/%d.png')\n self.local_flow_path = os.path.join(self.local_data_root, 'flow/%s/%d.npy')\n self.local_valid_path = os.path.join(self.local_data_root, 'valid/%s/%d.png')\n self.local_parm_path = os.path.join(self.local_data_root, 'parm/%s/%d_%d.json')\n\n if os.path.exists(self.local_data_root):\n assert len(os.listdir(os.path.join(self.local_data_root, 'img'))) == len(self.sample_list)\n logging.info(f\"Using local data in {self.local_data_root} ...\")\n else:\n self.save_local_stereo_data()\n\n def save_local_stereo_data(self):\n logging.info(f\"Generating data to {self.local_data_root} ...\")\n for sample_name in tqdm(self.sample_list):\n view0_data = self.load_single_view(sample_name, self.opt.source_id[0], hr_img=False,\n require_mask=True, require_pts=True)\n view1_data = self.load_single_view(sample_name, self.opt.source_id[1], hr_img=False,\n require_mask=True, require_pts=True)\n lmain_stereo_np = self.get_rectified_stereo_data(main_view_data=view0_data, ref_view_data=view1_data)\n\n for sub_dir in ['/img/', '/mask/', '/flow/', '/valid/', '/parm/']:\n Path(self.local_data_root + sub_dir + str(sample_name)).mkdir(exist_ok=True, parents=True)\n\n img0_save_name = self.local_img_path % (sample_name, self.opt.source_id[0])\n mask0_save_name = self.local_mask_path % (sample_name, self.opt.source_id[0])\n img1_save_name = self.local_img_path % (sample_name, self.opt.source_id[1])\n mask1_save_name = self.local_mask_path % (sample_name, self.opt.source_id[1])\n flow0_save_name = self.local_flow_path % (sample_name, self.opt.source_id[0])\n valid0_save_name = self.local_valid_path % (sample_name, self.opt.source_id[0])\n flow1_save_name = self.local_flow_path % (sample_name, self.opt.source_id[1])\n valid1_save_name = self.local_valid_path % (sample_name, self.opt.source_id[1])\n parm_save_name = self.local_parm_path % (sample_name, self.opt.source_id[0], self.opt.source_id[1])\n\n Image.fromarray(lmain_stereo_np['img0']).save(img0_save_name, quality=95)\n Image.fromarray(lmain_stereo_np['mask0']).save(mask0_save_name)\n Image.fromarray(lmain_stereo_np['img1']).save(img1_save_name, quality=95)\n Image.fromarray(lmain_stereo_np['mask1']).save(mask1_save_name)\n np.save(flow0_save_name, lmain_stereo_np['flow0'].astype(np.float16))\n Image.fromarray(lmain_stereo_np['valid0']).save(valid0_save_name)\n np.save(flow1_save_name, lmain_stereo_np['flow1'].astype(np.float16))\n Image.fromarray(lmain_stereo_np['valid1']).save(valid1_save_name)\n save_np_to_json(lmain_stereo_np['camera'], parm_save_name)\n\n logging.info(\"Generating data Done!\")\n\n def load_local_stereo_data(self, sample_name):\n img0_name = self.local_img_path % (sample_name, self.opt.source_id[0])\n mask0_name = self.local_mask_path % (sample_name, self.opt.source_id[0])\n img1_name = self.local_img_path % (sample_name, self.opt.source_id[1])\n mask1_name = self.local_mask_path % (sample_name, self.opt.source_id[1])\n flow0_name = self.local_flow_path % (sample_name, self.opt.source_id[0])\n flow1_name = self.local_flow_path % (sample_name, self.opt.source_id[1])\n valid0_name = self.local_valid_path % (sample_name, self.opt.source_id[0])\n valid1_name = self.local_valid_path % (sample_name, self.opt.source_id[1])\n parm_name = self.local_parm_path % (sample_name, self.opt.source_id[0], self.opt.source_id[1])\n\n stereo_data = {\n 'img0': read_img(img0_name),\n 'mask0': read_img(mask0_name),\n 'img1': read_img(img1_name),\n 'mask1': read_img(mask1_name),\n 'camera': load_json_to_np(parm_name),\n 'flow0': np.load(flow0_name),\n 'valid0': read_img(valid0_name),\n 'flow1': np.load(flow1_name),\n 'valid1': read_img(valid1_name)\n }\n\n return stereo_data\n\n def load_single_view(self, sample_name, source_id, hr_img=False, require_mask=True, require_pts=True):\n img_name = self.img_path % (sample_name, source_id)\n image_hr_name = self.img_hr_path % (sample_name, source_id)\n mask_name = self.mask_path % (sample_name, source_id)\n depth_name = self.depth_path % (sample_name, source_id)\n intr_name = self.intr_path % (sample_name, source_id)\n extr_name = self.extr_path % (sample_name, source_id)\n\n intr, extr = np.load(intr_name), np.load(extr_name)\n mask, pts = None, None\n if hr_img:\n img = read_img(image_hr_name)\n intr[:2] *= 2\n else:\n img = read_img(img_name)\n if require_mask:\n mask = read_img(mask_name)\n if require_pts and os.path.exists(depth_name):\n depth = read_depth(depth_name)\n pts = depth2pts(torch.FloatTensor(depth), torch.FloatTensor(extr), torch.FloatTensor(intr))\n\n return img, mask, intr, extr, pts\n\n def get_novel_view_tensor(self, sample_name, view_id):\n img, _, intr, extr, _ = self.load_single_view(sample_name, view_id, hr_img=self.opt.use_hr_img,\n require_mask=False, require_pts=False)\n width, height = img.shape[:2]\n img = torch.from_numpy(img).permute(2, 0, 1)\n img = img / 255.0\n\n R = np.array(extr[:3, :3], np.float32).reshape(3, 3).transpose(1, 0)\n T = np.array(extr[:3, 3], np.float32)\n\n FovX = focal2fov(intr[0, 0], width)\n FovY = focal2fov(intr[1, 1], height)\n projection_matrix = getProjectionMatrix(znear=self.opt.znear, zfar=self.opt.zfar, K=intr, h=height, w=width).transpose(0, 1)\n world_view_transform = torch.tensor(getWorld2View2(R, T, np.array(self.opt.trans), self.opt.scale)).transpose(0, 1)\n full_proj_transform = (world_view_transform.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)\n camera_center = world_view_transform.inverse()[3, :3]\n\n novel_view_data = {\n 'view_id': torch.IntTensor([view_id]),\n 'img': img,\n 'extr': torch.FloatTensor(extr),\n 'FovX': FovX,\n 'FovY': FovY,\n 'width': width,\n 'height': height,\n 'world_view_transform': world_view_transform,\n 'full_proj_transform': full_proj_transform,\n 'camera_center': camera_center\n }\n\n return novel_view_data\n\n def get_rectified_stereo_data(self, main_view_data, ref_view_data):\n img0, mask0, intr0, extr0, pts0 = main_view_data\n img1, mask1, intr1, extr1, pts1 = ref_view_data\n\n H, W = 1024, 1024\n r0, t0 = extr0[:3, :3], extr0[:3, 3:]\n r1, t1 = extr1[:3, :3], extr1[:3, 3:]\n inv_r0 = r0.T\n inv_t0 = - r0.T @ t0\n E0 = np.eye(4)\n E0[:3, :3], E0[:3, 3:] = inv_r0, inv_t0\n E1 = np.eye(4)\n E1[:3, :3], E1[:3, 3:] = r1, t1\n E = E1 @ E0\n R, T = E[:3, :3], E[:3, 3]\n dist0, dist1 = np.zeros(4), np.zeros(4)\n\n R0, R1, P0, P1, _, _, _ = cv2.stereoRectify(intr0, dist0, intr1, dist1, (W, H), R, T, flags=0)\n\n new_extr0 = R0 @ extr0\n new_intr0 = P0[:3, :3]\n new_extr1 = R1 @ extr1\n new_intr1 = P1[:3, :3]\n Tf_x = np.array(P1[0, 3])\n\n camera = {\n 'intr0': new_intr0,\n 'intr1': new_intr1,\n 'extr0': new_extr0,\n 'extr1': new_extr1,\n 'Tf_x': Tf_x\n }\n\n rectify_mat0_x, rectify_mat0_y = cv2.initUndistortRectifyMap(intr0, dist0, R0, P0, (W, H), cv2.CV_32FC1)\n new_img0 = cv2.remap(img0, rectify_mat0_x, rectify_mat0_y, cv2.INTER_LINEAR)\n new_mask0 = cv2.remap(mask0, rectify_mat0_x, rectify_mat0_y, cv2.INTER_LINEAR)\n rectify_mat1_x, rectify_mat1_y = cv2.initUndistortRectifyMap(intr1, dist1, R1, P1, (W, H), cv2.CV_32FC1)\n new_img1 = cv2.remap(img1, rectify_mat1_x, rectify_mat1_y, cv2.INTER_LINEAR)\n new_mask1 = cv2.remap(mask1, rectify_mat1_x, rectify_mat1_y, cv2.INTER_LINEAR)\n rectify0 = new_extr0, new_intr0, rectify_mat0_x, rectify_mat0_y\n rectify1 = new_extr1, new_intr1, rectify_mat1_x, rectify_mat1_y\n\n stereo_data = {\n 'img0': new_img0,\n 'mask0': new_mask0,\n 'img1': new_img1,\n 'mask1': new_mask1,\n 'camera': camera\n }\n\n if pts0 is not None:\n flow0, flow1 = stereo_pts2flow(pts0, pts1, rectify0, rectify1, Tf_x)\n\n kernel = np.ones((3, 3), dtype=np.uint8)\n flow_eroded, valid_eroded = [], []\n for (flow, new_mask) in [(flow0, new_mask0), (flow1, new_mask1)]:\n valid = (new_mask.copy()[:, :, 0] / 255.0).astype(np.float32)\n valid = cv2.erode(valid, kernel, 1)\n valid[valid >= 0.66] = 1.0\n valid[valid < 0.66] = 0.0\n flow *= valid\n valid *= 255.0\n flow_eroded.append(flow)\n valid_eroded.append(valid)\n\n stereo_data.update({\n 'flow0': flow_eroded[0],\n 'valid0': valid_eroded[0].astype(np.uint8),\n 'flow1': flow_eroded[1],\n 'valid1': valid_eroded[1].astype(np.uint8)\n })\n\n return stereo_data\n\n def stereo_to_dict_tensor(self, stereo_data, subject_name):\n img_tensor, mask_tensor = [], []\n for (img_view, mask_view) in [('img0', 'mask0'), ('img1', 'mask1')]:\n img = torch.from_numpy(stereo_data[img_view]).permute(2, 0, 1)\n img = 2 * (img / 255.0) - 1.0\n mask = torch.from_numpy(stereo_data[mask_view]).permute(2, 0, 1).float()\n mask = mask / 255.0\n\n img = img * mask\n mask[mask < 0.5] = 0.0\n mask[mask >= 0.5] = 1.0\n img_tensor.append(img)\n mask_tensor.append(mask)\n\n lmain_data = {\n 'img': img_tensor[0],\n 'mask': mask_tensor[0],\n 'intr': torch.FloatTensor(stereo_data['camera']['intr0']),\n 'ref_intr': torch.FloatTensor(stereo_data['camera']['intr1']),\n 'extr': torch.FloatTensor(stereo_data['camera']['extr0']),\n 'Tf_x': torch.FloatTensor(stereo_data['camera']['Tf_x'])\n }\n\n rmain_data = {\n 'img': img_tensor[1],\n 'mask': mask_tensor[1],\n 'intr': torch.FloatTensor(stereo_data['camera']['intr1']),\n 'ref_intr': torch.FloatTensor(stereo_data['camera']['intr0']),\n 'extr': torch.FloatTensor(stereo_data['camera']['extr1']),\n 'Tf_x': -torch.FloatTensor(stereo_data['camera']['Tf_x'])\n }\n\n if 'flow0' in stereo_data:\n flow_tensor, valid_tensor = [], []\n for (flow_view, valid_view) in [('flow0', 'valid0'), ('flow1', 'valid1')]:\n flow = torch.from_numpy(stereo_data[flow_view])\n flow = torch.unsqueeze(flow, dim=0)\n flow_tensor.append(flow)\n\n valid = torch.from_numpy(stereo_data[valid_view])\n valid = torch.unsqueeze(valid, dim=0)\n valid = valid / 255.0\n valid_tensor.append(valid)\n\n lmain_data['flow'], lmain_data['valid'] = flow_tensor[0], valid_tensor[0]\n rmain_data['flow'], rmain_data['valid'] = flow_tensor[1], valid_tensor[1]\n\n return {'name': subject_name, 'lmain': lmain_data, 'rmain': rmain_data}\n\n def get_item(self, index, novel_id=None):\n sample_id = index % len(self.sample_list)\n sample_name = self.sample_list[sample_id]\n\n if self.use_processed_data:\n stereo_np = self.load_local_stereo_data(sample_name)\n else:\n view0_data = self.load_single_view(sample_name, self.opt.source_id[0], hr_img=False,\n require_mask=True, require_pts=True)\n view1_data = self.load_single_view(sample_name, self.opt.source_id[1], hr_img=False,\n require_mask=True, require_pts=True)\n stereo_np = self.get_rectified_stereo_data(main_view_data=view0_data, ref_view_data=view1_data)\n dict_tensor = self.stereo_to_dict_tensor(stereo_np, sample_name)\n\n if novel_id:\n novel_id = np.random.choice(novel_id)\n dict_tensor.update({\n 'novel_view': self.get_novel_view_tensor(sample_name, novel_id)\n })\n\n return dict_tensor\n\n def get_test_item(self, index, source_id):\n sample_id = index % len(self.sample_list)\n sample_name = self.sample_list[sample_id]\n\n if self.use_processed_data:\n logging.error('test data loader not support processed data')\n\n view0_data = self.load_single_view(sample_name, source_id[0], hr_img=False, require_mask=True, require_pts=False)\n view1_data = self.load_single_view(sample_name, source_id[1], hr_img=False, require_mask=True, require_pts=False)\n lmain_intr_ori, lmain_extr_ori = view0_data[2], view0_data[3]\n rmain_intr_ori, rmain_extr_ori = view1_data[2], view1_data[3]\n stereo_np = self.get_rectified_stereo_data(main_view_data=view0_data, ref_view_data=view1_data)\n dict_tensor = self.stereo_to_dict_tensor(stereo_np, sample_name)\n\n dict_tensor['lmain']['intr_ori'] = torch.FloatTensor(lmain_intr_ori)\n dict_tensor['rmain']['intr_ori'] = torch.FloatTensor(rmain_intr_ori)\n dict_tensor['lmain']['extr_ori'] = torch.FloatTensor(lmain_extr_ori)\n dict_tensor['rmain']['extr_ori'] = torch.FloatTensor(rmain_extr_ori)\n\n img_len = 2048 if self.opt.use_hr_img else 1024\n novel_dict = {\n 'height': torch.IntTensor([img_len]),\n 'width': torch.IntTensor([img_len])\n }\n\n dict_tensor.update({\n 'novel_view': novel_dict\n })\n\n return dict_tensor\n\n def __getitem__(self, index):\n if self.phase == 'train':\n return self.get_item(index, novel_id=self.opt.train_novel_id)\n elif self.phase == 'val':\n return self.get_item(index, novel_id=self.opt.val_novel_id)\n\n def __len__(self):\n self.train_boost = 50\n self.val_boost = 200\n if self.phase == 'train':\n return len(self.sample_list) * self.train_boost\n elif self.phase == 'val':\n return len(self.sample_list) * self.val_boost\n else:\n return len(self.sample_list)"
},
{
"identifier": "RtStereoHumanModel",
"path": "lib/network.py",
"snippet": "class RtStereoHumanModel(nn.Module):\n def __init__(self, cfg, with_gs_render=False):\n super().__init__()\n self.cfg = cfg\n self.with_gs_render = with_gs_render\n self.train_iters = self.cfg.raft.train_iters\n self.val_iters = self.cfg.raft.val_iters\n\n self.img_encoder = UnetExtractor(in_channel=3, encoder_dim=self.cfg.raft.encoder_dims)\n self.raft_stereo = RAFTStereoHuman(self.cfg.raft)\n if self.with_gs_render:\n self.gs_parm_regresser = GSRegresser(self.cfg, rgb_dim=3, depth_dim=1)\n\n def forward(self, data, is_train=True):\n bs = data['lmain']['img'].shape[0]\n\n image = torch.cat([data['lmain']['img'], data['rmain']['img']], dim=0)\n flow = torch.cat([data['lmain']['flow'], data['rmain']['flow']], dim=0) if is_train else None\n valid = torch.cat([data['lmain']['valid'], data['rmain']['valid']], dim=0) if is_train else None\n\n with autocast(enabled=self.cfg.raft.mixed_precision):\n img_feat = self.img_encoder(image)\n\n if is_train:\n flow_predictions = self.raft_stereo(img_feat[2], iters=self.train_iters)\n flow_loss, metrics = sequence_loss(flow_predictions, flow, valid)\n flow_pred_lmain, flow_pred_rmain = torch.split(flow_predictions[-1], [bs, bs])\n\n if not self.with_gs_render:\n data['lmain']['flow_pred'] = flow_pred_lmain.detach()\n data['rmain']['flow_pred'] = flow_pred_rmain.detach()\n return data, flow_loss, metrics\n\n data['lmain']['flow_pred'] = flow_pred_lmain\n data['rmain']['flow_pred'] = flow_pred_rmain\n data = self.flow2gsparms(image, img_feat, data, bs)\n\n return data, flow_loss, metrics\n\n else:\n flow_up = self.raft_stereo(img_feat[2], iters=self.val_iters, test_mode=True)\n flow_loss, metrics = None, None\n\n data['lmain']['flow_pred'] = flow_up[0]\n data['rmain']['flow_pred'] = flow_up[1]\n\n if not self.with_gs_render:\n return data, flow_loss, metrics\n data = self.flow2gsparms(image, img_feat, data, bs)\n\n return data, flow_loss, metrics\n\n def flow2gsparms(self, lr_img, lr_img_feat, data, bs):\n for view in ['lmain', 'rmain']:\n data[view]['depth'] = flow2depth(data[view])\n data[view]['xyz'] = depth2pc(data[view]['depth'], data[view]['extr'], data[view]['intr']).view(bs, -1, 3)\n valid = data[view]['depth'] != 0.0\n data[view]['pts_valid'] = valid.view(bs, -1)\n\n # regress gaussian parms\n lr_depth = torch.concat([data['lmain']['depth'], data['rmain']['depth']], dim=0)\n rot_maps, scale_maps, opacity_maps = self.gs_parm_regresser(lr_img, lr_depth, lr_img_feat)\n\n data['lmain']['rot_maps'], data['rmain']['rot_maps'] = torch.split(rot_maps, [bs, bs])\n data['lmain']['scale_maps'], data['rmain']['scale_maps'] = torch.split(scale_maps, [bs, bs])\n data['lmain']['opacity_maps'], data['rmain']['opacity_maps'] = torch.split(opacity_maps, [bs, bs])\n\n return data"
},
{
"identifier": "ConfigStereoHuman",
"path": "config/stereo_human_config.py",
"snippet": "class ConfigStereoHuman:\r\n def __init__(self):\r\n self.cfg = CN()\r\n self.cfg.name = ''\r\n self.cfg.stage1_ckpt = None\r\n self.cfg.restore_ckpt = None\r\n self.cfg.lr = 0.0\r\n self.cfg.wdecay = 0.0\r\n self.cfg.batch_size = 0\r\n self.cfg.num_steps = 0\r\n\r\n self.cfg.dataset = CN()\r\n self.cfg.dataset.source_id = None\r\n self.cfg.dataset.train_novel_id = None\r\n self.cfg.dataset.val_novel_id = None\r\n self.cfg.dataset.use_hr_img = None\r\n self.cfg.dataset.use_processed_data = None\r\n self.cfg.dataset.data_root = ''\r\n # gsussian render settings\r\n self.cfg.dataset.bg_color = [0, 0, 0]\r\n self.cfg.dataset.zfar = 100.0\r\n self.cfg.dataset.znear = 0.01\r\n self.cfg.dataset.trans = [0.0, 0.0, 0.0]\r\n self.cfg.dataset.scale = 1.0\r\n\r\n self.cfg.raft = CN()\r\n self.cfg.raft.mixed_precision = None\r\n self.cfg.raft.train_iters = 0\r\n self.cfg.raft.val_iters = 0\r\n self.cfg.raft.corr_implementation = 'reg_cuda' # or 'reg'\r\n self.cfg.raft.corr_levels = 4\r\n self.cfg.raft.corr_radius = 4\r\n self.cfg.raft.n_downsample = 3\r\n self.cfg.raft.n_gru_layers = 1\r\n self.cfg.raft.slow_fast_gru = None\r\n self.cfg.raft.encoder_dims = [64, 96, 128]\r\n self.cfg.raft.hidden_dims = [128]*3\r\n\r\n self.cfg.gsnet = CN()\r\n self.cfg.gsnet.encoder_dims = None\r\n self.cfg.gsnet.decoder_dims = None\r\n self.cfg.gsnet.parm_head_dim = None\r\n\r\n self.cfg.record = CN()\r\n self.cfg.record.ckpt_path = None\r\n self.cfg.record.show_path = None\r\n self.cfg.record.logs_path = None\r\n self.cfg.record.file_path = None\r\n self.cfg.record.loss_freq = 0\r\n self.cfg.record.eval_freq = 0\r\n\r\n def get_cfg(self):\r\n return self.cfg.clone()\r\n \r\n def load(self, config_file):\r\n self.cfg.defrost()\r\n self.cfg.merge_from_file(config_file)\r\n self.cfg.freeze()\r"
},
{
"identifier": "Logger",
"path": "lib/train_recoder.py",
"snippet": "class Logger:\n def __init__(self, scheduler, cfg):\n self.scheduler = scheduler\n self.sum_freq = cfg.loss_freq\n self.log_dir = cfg.logs_path\n self.total_steps = 0\n self.running_loss = {}\n self.writer = SummaryWriter(log_dir=self.log_dir)\n\n def _print_training_status(self):\n metrics_data = [self.running_loss[k] / self.sum_freq for k in sorted(self.running_loss.keys())]\n training_str = \"[{:6d}, {:10.7f}] \".format(self.total_steps, self.scheduler.get_last_lr()[0])\n metrics_str = (\"{:10.4f}, \" * len(metrics_data)).format(*metrics_data)\n\n # print the training status\n logging.info(f\"Training Metrics ({self.total_steps}): {training_str + metrics_str}\")\n\n if self.writer is None:\n self.writer = SummaryWriter(log_dir=self.log_dir)\n\n for k in self.running_loss:\n self.writer.add_scalar(k, self.running_loss[k] / self.sum_freq, self.total_steps)\n self.running_loss[k] = 0.0\n\n def push(self, metrics):\n for key in metrics:\n if key not in self.running_loss:\n self.running_loss[key] = 0.0\n\n self.running_loss[key] += metrics[key]\n\n if self.total_steps and self.total_steps % self.sum_freq == 0:\n self._print_training_status()\n self.running_loss = {}\n\n self.total_steps += 1\n\n def write_dict(self, results, write_step):\n if self.writer is None:\n self.writer = SummaryWriter(log_dir=self.log_dir)\n\n for key in results:\n self.writer.add_scalar(key, results[key], write_step)\n\n def close(self):\n self.writer.close()"
},
{
"identifier": "file_backup",
"path": "lib/train_recoder.py",
"snippet": "def file_backup(exp_path, cfg, train_script):\n shutil.copy(train_script, exp_path)\n shutil.copytree('core', os.path.join(exp_path, 'core'), dirs_exist_ok=True)\n shutil.copytree('config', os.path.join(exp_path, 'config'), dirs_exist_ok=True)\n shutil.copytree('gaussian_renderer', os.path.join(exp_path, 'gaussian_renderer'), dirs_exist_ok=True)\n for sub_dir in ['lib']:\n files = os.listdir(sub_dir)\n for file in files:\n Path(os.path.join(exp_path, sub_dir)).mkdir(exist_ok=True, parents=True)\n if file[-3:] == '.py':\n shutil.copy(os.path.join(sub_dir, file), os.path.join(exp_path, sub_dir))\n\n json_file_name = exp_path + '/cfg.json'\n with open(json_file_name, 'w') as json_file:\n json.dump(cfg, json_file, indent=2)"
},
{
"identifier": "pts2render",
"path": "lib/GaussianRender.py",
"snippet": "def pts2render(data, bg_color):\n bs = data['lmain']['img'].shape[0]\n render_novel_list = []\n for i in range(bs):\n xyz_i_valid = []\n rgb_i_valid = []\n rot_i_valid = []\n scale_i_valid = []\n opacity_i_valid = []\n for view in ['lmain', 'rmain']:\n valid_i = data[view]['pts_valid'][i, :]\n xyz_i = data[view]['xyz'][i, :, :]\n rgb_i = data[view]['img'][i, :, :, :].permute(1, 2, 0).view(-1, 3)\n rot_i = data[view]['rot_maps'][i, :, :, :].permute(1, 2, 0).view(-1, 4)\n scale_i = data[view]['scale_maps'][i, :, :, :].permute(1, 2, 0).view(-1, 3)\n opacity_i = data[view]['opacity_maps'][i, :, :, :].permute(1, 2, 0).view(-1, 1)\n\n xyz_i_valid.append(xyz_i[valid_i].view(-1, 3))\n rgb_i_valid.append(rgb_i[valid_i].view(-1, 3))\n rot_i_valid.append(rot_i[valid_i].view(-1, 4))\n scale_i_valid.append(scale_i[valid_i].view(-1, 3))\n opacity_i_valid.append(opacity_i[valid_i].view(-1, 1))\n\n pts_xyz_i = torch.concat(xyz_i_valid, dim=0)\n pts_rgb_i = torch.concat(rgb_i_valid, dim=0)\n pts_rgb_i = pts_rgb_i * 0.5 + 0.5\n rot_i = torch.concat(rot_i_valid, dim=0)\n scale_i = torch.concat(scale_i_valid, dim=0)\n opacity_i = torch.concat(opacity_i_valid, dim=0)\n\n render_novel_i = render(data, i, pts_xyz_i, pts_rgb_i, rot_i, scale_i, opacity_i, bg_color=bg_color)\n render_novel_list.append(render_novel_i.unsqueeze(0))\n\n data['novel_view']['img_pred'] = torch.concat(render_novel_list, dim=0)\n return data"
},
{
"identifier": "l1_loss",
"path": "lib/loss.py",
"snippet": "def l1_loss(network_output, gt):\n return torch.abs((network_output - gt)).mean()"
},
{
"identifier": "ssim",
"path": "lib/loss.py",
"snippet": "def ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)"
},
{
"identifier": "psnr",
"path": "lib/loss.py",
"snippet": "def psnr(img1, img2):\n mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)\n return 20 * torch.log10(1.0 / torch.sqrt(mse))"
}
] | import logging
import numpy as np
import cv2
import os
import torch
import torch.optim as optim
import warnings
from pathlib import Path
from tqdm import tqdm
from datetime import datetime
from lib.human_loader import StereoHumanDataset
from lib.network import RtStereoHumanModel
from config.stereo_human_config import ConfigStereoHuman as config
from lib.train_recoder import Logger, file_backup
from lib.GaussianRender import pts2render
from lib.loss import l1_loss, ssim, psnr
from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader | 7,676 | from __future__ import print_function, division
warnings.filterwarnings("ignore", category=UserWarning)
class Trainer:
def __init__(self, cfg_file):
self.cfg = cfg_file
| from __future__ import print_function, division
warnings.filterwarnings("ignore", category=UserWarning)
class Trainer:
def __init__(self, cfg_file):
self.cfg = cfg_file
| self.model = RtStereoHumanModel(self.cfg, with_gs_render=True) | 1 | 2023-12-04 06:12:57+00:00 | 12k |
EricGuo5513/momask-codes | visualization/InverseKinematics.py | [
{
"identifier": "Animation",
"path": "visualization/Animation.py",
"snippet": "class Animation:\n \"\"\"\n Animation is a numpy-like wrapper for animation data\n\n Animation data consists of several arrays consisting\n of F frames and J joints.\n\n The animation is specified by\n\n rotations : (F, J) Quaternions | Joint Rotations\n positions : (F, J, 3) ndarray | Joint Positions\n\n The base pose is specified by\n\n orients : (J) Quaternions | Joint Orientations\n offsets : (J, 3) ndarray | Joint Offsets\n\n And the skeletal structure is specified by\n\n parents : (J) ndarray | Joint Parents\n \"\"\"\n\n def __init__(self, rotations, positions, orients, offsets, parents, names, frametime):\n\n self.rotations = rotations\n self.positions = positions\n self.orients = orients\n self.offsets = offsets\n self.parents = parents\n self.names = names\n self.frametime = frametime\n\n def __op__(self, op, other):\n return Animation(\n op(self.rotations, other.rotations),\n op(self.positions, other.positions),\n op(self.orients, other.orients),\n op(self.offsets, other.offsets),\n op(self.parents, other.parents))\n\n def __iop__(self, op, other):\n self.rotations = op(self.roations, other.rotations)\n self.positions = op(self.roations, other.positions)\n self.orients = op(self.orients, other.orients)\n self.offsets = op(self.offsets, other.offsets)\n self.parents = op(self.parents, other.parents)\n return self\n\n def __sop__(self, op):\n return Animation(\n op(self.rotations),\n op(self.positions),\n op(self.orients),\n op(self.offsets),\n op(self.parents))\n\n def __add__(self, other):\n return self.__op__(operator.add, other)\n\n def __sub__(self, other):\n return self.__op__(operator.sub, other)\n\n def __mul__(self, other):\n return self.__op__(operator.mul, other)\n\n def __div__(self, other):\n return self.__op__(operator.div, other)\n\n def __abs__(self):\n return self.__sop__(operator.abs)\n\n def __neg__(self):\n return self.__sop__(operator.neg)\n\n def __iadd__(self, other):\n return self.__iop__(operator.iadd, other)\n\n def __isub__(self, other):\n return self.__iop__(operator.isub, other)\n\n def __imul__(self, other):\n return self.__iop__(operator.imul, other)\n\n def __idiv__(self, other):\n return self.__iop__(operator.idiv, other)\n\n def __len__(self):\n return len(self.rotations)\n\n def __getitem__(self, k):\n if isinstance(k, tuple):\n return Animation(\n self.rotations[k],\n self.positions[k],\n self.orients[k[1:]],\n self.offsets[k[1:]],\n self.parents[k[1:]],\n self.names[k[1:]],\n self.frametime[k[1:]])\n else:\n return Animation(\n self.rotations[k],\n self.positions[k],\n self.orients,\n self.offsets,\n self.parents,\n self.names,\n self.frametime)\n\n def __setitem__(self, k, v):\n if isinstance(k, tuple):\n self.rotations.__setitem__(k, v.rotations)\n self.positions.__setitem__(k, v.positions)\n self.orients.__setitem__(k[1:], v.orients)\n self.offsets.__setitem__(k[1:], v.offsets)\n self.parents.__setitem__(k[1:], v.parents)\n else:\n self.rotations.__setitem__(k, v.rotations)\n self.positions.__setitem__(k, v.positions)\n self.orients.__setitem__(k, v.orients)\n self.offsets.__setitem__(k, v.offsets)\n self.parents.__setitem__(k, v.parents)\n\n @property\n def shape(self):\n return (self.rotations.shape[0], self.rotations.shape[1])\n\n def copy(self):\n return Animation(\n self.rotations.copy(), self.positions.copy(),\n self.orients.copy(), self.offsets.copy(),\n self.parents.copy(), self.names,\n self.frametime)\n\n def repeat(self, *args, **kw):\n return Animation(\n self.rotations.repeat(*args, **kw),\n self.positions.repeat(*args, **kw),\n self.orients, self.offsets, self.parents, self.frametime, self.names)\n\n def ravel(self):\n return np.hstack([\n self.rotations.log().ravel(),\n self.positions.ravel(),\n self.orients.log().ravel(),\n self.offsets.ravel()])\n\n @classmethod\n def unravel(cls, anim, shape, parents):\n nf, nj = shape\n rotations = anim[nf * nj * 0:nf * nj * 3]\n positions = anim[nf * nj * 3:nf * nj * 6]\n orients = anim[nf * nj * 6 + nj * 0:nf * nj * 6 + nj * 3]\n offsets = anim[nf * nj * 6 + nj * 3:nf * nj * 6 + nj * 6]\n return cls(\n Quaternions.exp(rotations), positions,\n Quaternions.exp(orients), offsets,\n parents.copy())"
},
{
"identifier": "AnimationStructure",
"path": "visualization/AnimationStructure.py",
"snippet": "def joints(parents):\ndef joints_list(parents):\ndef parents_list(parents):\ndef children_list(parents):\n def joint_children(i):\ndef descendants_list(parents):\n def joint_descendants(i):\ndef ancestors_list(parents):\n def joint_ancestors(i):\ndef mask(parents, filter):\ndef joints_mask(parents): return np.eye(len(parents)).astype(bool)\ndef children_mask(parents): return mask(parents, children_list)\ndef parents_mask(parents): return mask(parents, parents_list)\ndef descendants_mask(parents): return mask(parents, descendants_list)\ndef ancestors_mask(parents): return mask(parents, ancestors_list)\ndef joint_chain_ascend(parents, start, end):\ndef constraints(anim, **kwargs):\ndef graph(anim):\ndef distances(anim):\n def find_distance(distances, generated, prev, i, j):\ndef edges(parents):\ndef incidence(parents):"
},
{
"identifier": "Quaternions",
"path": "visualization/Quaternions.py",
"snippet": "class Quaternions:\n \"\"\"\n Quaternions is a wrapper around a numpy ndarray\n that allows it to act as if it were an narray of\n a quater data type.\n\n Therefore addition, subtraction, multiplication,\n division, negation, absolute, are all defined\n in terms of quater operations such as quater\n multiplication.\n\n This allows for much neater code and many routines\n which conceptually do the same thing to be written\n in the same way for point data and for rotation data.\n\n The Quaternions class has been desgined such that it\n should support broadcasting and slicing in all of the\n usual ways.\n \"\"\"\n\n def __init__(self, qs):\n if isinstance(qs, np.ndarray):\n if len(qs.shape) == 1: qs = np.array([qs])\n self.qs = qs\n return\n\n if isinstance(qs, Quaternions):\n self.qs = qs\n return\n\n raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))\n\n def __str__(self):\n return \"Quaternions(\" + str(self.qs) + \")\"\n\n def __repr__(self):\n return \"Quaternions(\" + repr(self.qs) + \")\"\n\n \"\"\" Helper Methods for Broadcasting and Data extraction \"\"\"\n\n @classmethod\n def _broadcast(cls, sqs, oqs, scalar=False):\n if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])\n\n ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])\n os = np.array(oqs.shape)\n\n if len(ss) != len(os):\n raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))\n\n if np.all(ss == os): return sqs, oqs\n\n if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):\n raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))\n\n sqsn, oqsn = sqs.copy(), oqs.copy()\n\n for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)\n for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)\n\n return sqsn, oqsn\n\n \"\"\" Adding Quaterions is just Defined as Multiplication \"\"\"\n\n def __add__(self, other):\n return self * other\n\n def __sub__(self, other):\n return self / other\n\n \"\"\" Quaterion Multiplication \"\"\"\n\n def __mul__(self, other):\n \"\"\"\n Quaternion multiplication has three main methods.\n\n When multiplying a Quaternions array by Quaternions\n normal quater multiplication is performed.\n\n When multiplying a Quaternions array by a vector\n array of the same shape, where the last axis is 3,\n it is assumed to be a Quaternion by 3D-Vector\n multiplication and the 3D-Vectors are rotated\n in space by the Quaternions.\n\n When multipplying a Quaternions array by a scalar\n or vector of different shape it is assumed to be\n a Quaternions by Scalars multiplication and the\n Quaternions are scaled using Slerp and the identity\n quaternions.\n \"\"\"\n\n \"\"\" If Quaternions type do Quaternions * Quaternions \"\"\"\n if isinstance(other, Quaternions):\n sqs, oqs = Quaternions._broadcast(self.qs, other.qs)\n\n q0 = sqs[..., 0];\n q1 = sqs[..., 1];\n q2 = sqs[..., 2];\n q3 = sqs[..., 3];\n r0 = oqs[..., 0];\n r1 = oqs[..., 1];\n r2 = oqs[..., 2];\n r3 = oqs[..., 3];\n\n qs = np.empty(sqs.shape)\n qs[..., 0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3\n qs[..., 1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2\n qs[..., 2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1\n qs[..., 3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0\n\n return Quaternions(qs)\n\n \"\"\" If array type do Quaternions * Vectors \"\"\"\n if isinstance(other, np.ndarray) and other.shape[-1] == 3:\n vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))\n\n return (self * (vs * -self)).imaginaries\n\n \"\"\" If float do Quaternions * Scalars \"\"\"\n if isinstance(other, np.ndarray) or isinstance(other, float):\n return Quaternions.slerp(Quaternions.id_like(self), self, other)\n\n raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))\n\n def __div__(self, other):\n \"\"\"\n When a Quaternion type is supplied, division is defined\n as multiplication by the inverse of that Quaternion.\n\n When a scalar or vector is supplied it is defined\n as multiplicaion of one over the supplied value.\n Essentially a scaling.\n \"\"\"\n\n if isinstance(other, Quaternions): return self * (-other)\n if isinstance(other, np.ndarray): return self * (1.0 / other)\n if isinstance(other, float): return self * (1.0 / other)\n raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))\n\n def __eq__(self, other):\n return self.qs == other.qs\n\n def __ne__(self, other):\n return self.qs != other.qs\n\n def __neg__(self):\n \"\"\" Invert Quaternions \"\"\"\n return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))\n\n def __abs__(self):\n \"\"\" Unify Quaternions To Single Pole \"\"\"\n qabs = self.normalized().copy()\n top = np.sum((qabs.qs) * np.array([1, 0, 0, 0]), axis=-1)\n bot = np.sum((-qabs.qs) * np.array([1, 0, 0, 0]), axis=-1)\n qabs.qs[top < bot] = -qabs.qs[top < bot]\n return qabs\n\n def __iter__(self):\n return iter(self.qs)\n\n def __len__(self):\n return len(self.qs)\n\n def __getitem__(self, k):\n return Quaternions(self.qs[k])\n\n def __setitem__(self, k, v):\n self.qs[k] = v.qs\n\n @property\n def lengths(self):\n return np.sum(self.qs ** 2.0, axis=-1) ** 0.5\n\n @property\n def reals(self):\n return self.qs[..., 0]\n\n @property\n def imaginaries(self):\n return self.qs[..., 1:4]\n\n @property\n def shape(self):\n return self.qs.shape[:-1]\n\n def repeat(self, n, **kwargs):\n return Quaternions(self.qs.repeat(n, **kwargs))\n\n def normalized(self):\n return Quaternions(self.qs / self.lengths[..., np.newaxis])\n\n def log(self):\n norm = abs(self.normalized())\n imgs = norm.imaginaries\n lens = np.sqrt(np.sum(imgs ** 2, axis=-1))\n lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)\n return imgs * lens[..., np.newaxis]\n\n def constrained(self, axis):\n\n rl = self.reals\n im = np.sum(axis * self.imaginaries, axis=-1)\n\n t1 = -2 * np.arctan2(rl, im) + np.pi\n t2 = -2 * np.arctan2(rl, im) - np.pi\n\n top = Quaternions.exp(axis[np.newaxis] * (t1[:, np.newaxis] / 2.0))\n bot = Quaternions.exp(axis[np.newaxis] * (t2[:, np.newaxis] / 2.0))\n img = self.dot(top) > self.dot(bot)\n\n ret = top.copy()\n ret[img] = top[img]\n ret[~img] = bot[~img]\n return ret\n\n def constrained_x(self):\n return self.constrained(np.array([1, 0, 0]))\n\n def constrained_y(self):\n return self.constrained(np.array([0, 1, 0]))\n\n def constrained_z(self):\n return self.constrained(np.array([0, 0, 1]))\n\n def dot(self, q):\n return np.sum(self.qs * q.qs, axis=-1)\n\n def copy(self):\n return Quaternions(np.copy(self.qs))\n\n def reshape(self, s):\n self.qs.reshape(s)\n return self\n\n def interpolate(self, ws):\n return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))\n\n def euler(self, order='xyz'): # fix the wrong convert, this should convert to world euler by default.\n\n q = self.normalized().qs\n q0 = q[..., 0]\n q1 = q[..., 1]\n q2 = q[..., 2]\n q3 = q[..., 3]\n es = np.zeros(self.shape + (3,))\n\n if order == 'xyz':\n es[..., 0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))\n es[..., 1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1, 1))\n es[..., 2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))\n elif order == 'yzx':\n es[..., 0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)\n es[..., 1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)\n es[..., 2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1, 1))\n else:\n raise NotImplementedError('Cannot convert from ordering %s' % order)\n\n \"\"\"\n\n # These conversion don't appear to work correctly for Maya.\n # http://bediyap.com/programming/convert-quaternion-to-euler-rotations/\n\n if order == 'xyz':\n es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\n es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))\n es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\n elif order == 'yzx':\n es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\n es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))\n es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\n elif order == 'zxy':\n es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\n es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))\n es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3) \n elif order == 'xzy':\n es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\n es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))\n es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\n elif order == 'yxz':\n es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)\n es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))\n es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\n elif order == 'zyx':\n es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)\n es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))\n es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)\n else:\n raise KeyError('Unknown ordering %s' % order)\n\n \"\"\"\n\n # https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp\n # Use this class and convert from matrix\n\n return es\n\n def average(self):\n\n if len(self.shape) == 1:\n\n import numpy.core.umath_tests as ut\n system = ut.matrix_multiply(self.qs[:, :, np.newaxis], self.qs[:, np.newaxis, :]).sum(axis=0)\n w, v = np.linalg.eigh(system)\n qiT_dot_qref = (self.qs[:, :, np.newaxis] * v[np.newaxis, :, :]).sum(axis=1)\n return Quaternions(v[:, np.argmin((1. - qiT_dot_qref ** 2).sum(axis=0))])\n\n else:\n\n raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')\n\n def angle_axis(self):\n\n norm = self.normalized()\n s = np.sqrt(1 - (norm.reals ** 2.0))\n s[s == 0] = 0.001\n\n angles = 2.0 * np.arccos(norm.reals)\n axis = norm.imaginaries / s[..., np.newaxis]\n\n return angles, axis\n\n def transforms(self):\n\n qw = self.qs[..., 0]\n qx = self.qs[..., 1]\n qy = self.qs[..., 2]\n qz = self.qs[..., 3]\n\n x2 = qx + qx;\n y2 = qy + qy;\n z2 = qz + qz;\n xx = qx * x2;\n yy = qy * y2;\n wx = qw * x2;\n xy = qx * y2;\n yz = qy * z2;\n wy = qw * y2;\n xz = qx * z2;\n zz = qz * z2;\n wz = qw * z2;\n\n m = np.empty(self.shape + (3, 3))\n m[..., 0, 0] = 1.0 - (yy + zz)\n m[..., 0, 1] = xy - wz\n m[..., 0, 2] = xz + wy\n m[..., 1, 0] = xy + wz\n m[..., 1, 1] = 1.0 - (xx + zz)\n m[..., 1, 2] = yz - wx\n m[..., 2, 0] = xz - wy\n m[..., 2, 1] = yz + wx\n m[..., 2, 2] = 1.0 - (xx + yy)\n\n return m\n\n def ravel(self):\n return self.qs.ravel()\n\n @classmethod\n def id(cls, n):\n\n if isinstance(n, tuple):\n qs = np.zeros(n + (4,))\n qs[..., 0] = 1.0\n return Quaternions(qs)\n\n if isinstance(n, int):\n qs = np.zeros((n, 4))\n qs[:, 0] = 1.0\n return Quaternions(qs)\n\n raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))\n\n @classmethod\n def id_like(cls, a):\n qs = np.zeros(a.shape + (4,))\n qs[..., 0] = 1.0\n return Quaternions(qs)\n\n @classmethod\n def exp(cls, ws):\n\n ts = np.sum(ws ** 2.0, axis=-1) ** 0.5\n ts[ts == 0] = 0.001\n ls = np.sin(ts) / ts\n\n qs = np.empty(ws.shape[:-1] + (4,))\n qs[..., 0] = np.cos(ts)\n qs[..., 1] = ws[..., 0] * ls\n qs[..., 2] = ws[..., 1] * ls\n qs[..., 3] = ws[..., 2] * ls\n\n return Quaternions(qs).normalized()\n\n @classmethod\n def slerp(cls, q0s, q1s, a):\n\n fst, snd = cls._broadcast(q0s.qs, q1s.qs)\n fst, a = cls._broadcast(fst, a, scalar=True)\n snd, a = cls._broadcast(snd, a, scalar=True)\n\n len = np.sum(fst * snd, axis=-1)\n\n neg = len < 0.0\n len[neg] = -len[neg]\n snd[neg] = -snd[neg]\n\n amount0 = np.zeros(a.shape)\n amount1 = np.zeros(a.shape)\n\n linear = (1.0 - len) < 0.01\n omegas = np.arccos(len[~linear])\n sinoms = np.sin(omegas)\n\n amount0[linear] = 1.0 - a[linear]\n amount1[linear] = a[linear]\n amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms\n amount1[~linear] = np.sin(a[~linear] * omegas) / sinoms\n\n return Quaternions(\n amount0[..., np.newaxis] * fst +\n amount1[..., np.newaxis] * snd)\n\n @classmethod\n def between(cls, v0s, v1s):\n a = np.cross(v0s, v1s)\n w = np.sqrt((v0s ** 2).sum(axis=-1) * (v1s ** 2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)\n return Quaternions(np.concatenate([w[..., np.newaxis], a], axis=-1)).normalized()\n\n @classmethod\n def from_angle_axis(cls, angles, axis):\n axis = axis / (np.sqrt(np.sum(axis ** 2, axis=-1)) + 1e-10)[..., np.newaxis]\n sines = np.sin(angles / 2.0)[..., np.newaxis]\n cosines = np.cos(angles / 2.0)[..., np.newaxis]\n return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))\n\n @classmethod\n def from_euler(cls, es, order='xyz', world=False):\n\n axis = {\n 'x': np.array([1, 0, 0]),\n 'y': np.array([0, 1, 0]),\n 'z': np.array([0, 0, 1]),\n }\n\n q0s = Quaternions.from_angle_axis(es[..., 0], axis[order[0]])\n q1s = Quaternions.from_angle_axis(es[..., 1], axis[order[1]])\n q2s = Quaternions.from_angle_axis(es[..., 2], axis[order[2]])\n\n return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))\n\n @classmethod\n def from_transforms(cls, ts):\n\n d0, d1, d2 = ts[..., 0, 0], ts[..., 1, 1], ts[..., 2, 2]\n\n q0 = (d0 + d1 + d2 + 1.0) / 4.0\n q1 = (d0 - d1 - d2 + 1.0) / 4.0\n q2 = (-d0 + d1 - d2 + 1.0) / 4.0\n q3 = (-d0 - d1 + d2 + 1.0) / 4.0\n\n q0 = np.sqrt(q0.clip(0, None))\n q1 = np.sqrt(q1.clip(0, None))\n q2 = np.sqrt(q2.clip(0, None))\n q3 = np.sqrt(q3.clip(0, None))\n\n c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)\n c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)\n c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)\n c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)\n\n q1[c0] *= np.sign(ts[c0, 2, 1] - ts[c0, 1, 2])\n q2[c0] *= np.sign(ts[c0, 0, 2] - ts[c0, 2, 0])\n q3[c0] *= np.sign(ts[c0, 1, 0] - ts[c0, 0, 1])\n\n q0[c1] *= np.sign(ts[c1, 2, 1] - ts[c1, 1, 2])\n q2[c1] *= np.sign(ts[c1, 1, 0] + ts[c1, 0, 1])\n q3[c1] *= np.sign(ts[c1, 0, 2] + ts[c1, 2, 0])\n\n q0[c2] *= np.sign(ts[c2, 0, 2] - ts[c2, 2, 0])\n q1[c2] *= np.sign(ts[c2, 1, 0] + ts[c2, 0, 1])\n q3[c2] *= np.sign(ts[c2, 2, 1] + ts[c2, 1, 2])\n\n q0[c3] *= np.sign(ts[c3, 1, 0] - ts[c3, 0, 1])\n q1[c3] *= np.sign(ts[c3, 2, 0] + ts[c3, 0, 2])\n q2[c3] *= np.sign(ts[c3, 2, 1] + ts[c3, 1, 2])\n\n qs = np.empty(ts.shape[:-2] + (4,))\n qs[..., 0] = q0\n qs[..., 1] = q1\n qs[..., 2] = q2\n qs[..., 3] = q3\n\n return cls(qs)"
}
] | import numpy as np
import scipy.linalg as linalg
import torch
from visualization import Animation
from visualization import AnimationStructure
from visualization.Quaternions import Quaternions
from torch import nn | 8,368 |
class BasicInverseKinematics:
"""
Basic Inverse Kinematics Solver
This is an extremely simple full body IK
solver.
It works given the following conditions:
* All joint targets must be specified
* All joint targets must be in reach
* All joint targets must not differ
extremely from the starting pose
* No bone length constraints can be violated
* The root translation and rotation are
set to good initial values
It works under the observation that if the
_directions_ the joints are pointing toward
match the _directions_ of the vectors between
the target joints then the pose should match
that of the target pose.
Therefore it iterates over joints rotating
each joint such that the vectors between it
and it's children match that of the target
positions.
Parameters
----------
animation : Animation
animation input
positions : (F, J, 3) ndarray
target positions for each frame F
and each joint J
iterations : int
Optional number of iterations.
If the above conditions are met
1 iteration should be enough,
therefore the default is 1
silent : bool
Optional if to suppress output
defaults to False
"""
def __init__(self, animation, positions, iterations=1, silent=True):
self.animation = animation
self.positions = positions
self.iterations = iterations
self.silent = silent
def __call__(self):
children = AnimationStructure.children_list(self.animation.parents)
for i in range(self.iterations):
for j in AnimationStructure.joints(self.animation.parents):
c = np.array(children[j])
if len(c) == 0: continue
|
class BasicInverseKinematics:
"""
Basic Inverse Kinematics Solver
This is an extremely simple full body IK
solver.
It works given the following conditions:
* All joint targets must be specified
* All joint targets must be in reach
* All joint targets must not differ
extremely from the starting pose
* No bone length constraints can be violated
* The root translation and rotation are
set to good initial values
It works under the observation that if the
_directions_ the joints are pointing toward
match the _directions_ of the vectors between
the target joints then the pose should match
that of the target pose.
Therefore it iterates over joints rotating
each joint such that the vectors between it
and it's children match that of the target
positions.
Parameters
----------
animation : Animation
animation input
positions : (F, J, 3) ndarray
target positions for each frame F
and each joint J
iterations : int
Optional number of iterations.
If the above conditions are met
1 iteration should be enough,
therefore the default is 1
silent : bool
Optional if to suppress output
defaults to False
"""
def __init__(self, animation, positions, iterations=1, silent=True):
self.animation = animation
self.positions = positions
self.iterations = iterations
self.silent = silent
def __call__(self):
children = AnimationStructure.children_list(self.animation.parents)
for i in range(self.iterations):
for j in AnimationStructure.joints(self.animation.parents):
c = np.array(children[j])
if len(c) == 0: continue
| anim_transforms = Animation.transforms_global(self.animation) | 0 | 2023-11-29 19:21:27+00:00 | 12k |
Doubiiu/DynamiCrafter | scripts/evaluation/inference.py | [
{
"identifier": "DDIMSampler",
"path": "lvdm/models/samplers/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.counter = 0\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n if self.model.use_dynamic_rescale:\n self.ddim_scale_arr = self.model.scale_arr[self.ddim_timesteps]\n self.ddim_scale_arr_prev = torch.cat([self.ddim_scale_arr[0:1], self.ddim_scale_arr[:-1]])\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n schedule_verbose=False,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n precision=None,\n fs=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n \n # check condition bs\n if conditioning is not None:\n if isinstance(conditioning, dict):\n try:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n except:\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\n \n # make shape\n if len(shape) == 3:\n C, H, W = shape\n size = (batch_size, C, H, W)\n elif len(shape) == 4:\n C, T, H, W = shape\n size = (batch_size, C, T, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n \n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n verbose=verbose,\n precision=precision,\n fs=fs,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,precision=None,fs=None,\n **kwargs):\n device = self.model.betas.device \n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n if precision is not None:\n if precision == 16:\n img = img.to(dtype=torch.float16)\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n \n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n if verbose:\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n else:\n iterator = time_range\n\n clean_cond = kwargs.pop(\"clean_cond\", False)\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n ## use mask to blend noised original latent (img_orig) & new sampled latent (img)\n if mask is not None:\n assert x0 is not None\n if clean_cond:\n img_orig = x0\n else:\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\n \n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n mask=mask,x0=x0,fs=fs,\n **kwargs)\n \n\n\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None,mask=None,x0=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n ### with unconditional condition\n if isinstance(c, torch.Tensor) or isinstance(c, dict):\n e_t_cond = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n else:\n raise NotImplementedError\n\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t_cond - e_t_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, e_t)\n\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, e_t)\n \n if self.model.use_dynamic_rescale:\n scale_t = torch.full(size, self.ddim_scale_arr[index], device=device)\n prev_scale_t = torch.full(size, self.ddim_scale_arr_prev[index], device=device)\n rescale = (prev_scale_t / scale_t)\n pred_x0 *= rescale\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)"
},
{
"identifier": "DDIMSampler",
"path": "lvdm/models/samplers/ddim_multiplecond.py",
"snippet": "class DDIMSampler(object):\r\n def __init__(self, model, schedule=\"linear\", **kwargs):\r\n super().__init__()\r\n self.model = model\r\n self.ddpm_num_timesteps = model.num_timesteps\r\n self.schedule = schedule\r\n self.counter = 0\r\n\r\n def register_buffer(self, name, attr):\r\n if type(attr) == torch.Tensor:\r\n if attr.device != torch.device(\"cuda\"):\r\n attr = attr.to(torch.device(\"cuda\"))\r\n setattr(self, name, attr)\r\n\r\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\r\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\r\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\r\n alphas_cumprod = self.model.alphas_cumprod\r\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\r\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\r\n\r\n self.register_buffer('betas', to_torch(self.model.betas))\r\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\r\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\r\n\r\n # calculations for diffusion q(x_t | x_{t-1}) and others\r\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\r\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\r\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\r\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\r\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\r\n\r\n # ddim sampling parameters\r\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\r\n ddim_timesteps=self.ddim_timesteps,\r\n eta=ddim_eta,verbose=verbose)\r\n self.register_buffer('ddim_sigmas', ddim_sigmas)\r\n self.register_buffer('ddim_alphas', ddim_alphas)\r\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\r\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\r\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\r\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\r\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\r\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\r\n\r\n @torch.no_grad()\r\n def sample(self,\r\n S,\r\n batch_size,\r\n shape,\r\n conditioning=None,\r\n callback=None,\r\n normals_sequence=None,\r\n img_callback=None,\r\n quantize_x0=False,\r\n eta=0.,\r\n mask=None,\r\n x0=None,\r\n temperature=1.,\r\n noise_dropout=0.,\r\n score_corrector=None,\r\n corrector_kwargs=None,\r\n verbose=True,\r\n schedule_verbose=False,\r\n x_T=None,\r\n log_every_t=100,\r\n unconditional_guidance_scale=1.,\r\n unconditional_conditioning=None,\r\n precision=None,\r\n fs=None,\r\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\r\n **kwargs\r\n ):\r\n \r\n # check condition bs\r\n if conditioning is not None:\r\n if isinstance(conditioning, dict):\r\n try:\r\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\r\n except:\r\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\r\n\r\n if cbs != batch_size:\r\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\r\n else:\r\n if conditioning.shape[0] != batch_size:\r\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\r\n\r\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\r\n \r\n # make shape\r\n if len(shape) == 3:\r\n C, H, W = shape\r\n size = (batch_size, C, H, W)\r\n elif len(shape) == 4:\r\n C, T, H, W = shape\r\n size = (batch_size, C, T, H, W)\r\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\r\n \r\n samples, intermediates = self.ddim_sampling(conditioning, size,\r\n callback=callback,\r\n img_callback=img_callback,\r\n quantize_denoised=quantize_x0,\r\n mask=mask, x0=x0,\r\n ddim_use_original_steps=False,\r\n noise_dropout=noise_dropout,\r\n temperature=temperature,\r\n score_corrector=score_corrector,\r\n corrector_kwargs=corrector_kwargs,\r\n x_T=x_T,\r\n log_every_t=log_every_t,\r\n unconditional_guidance_scale=unconditional_guidance_scale,\r\n unconditional_conditioning=unconditional_conditioning,\r\n verbose=verbose,\r\n precision=precision,\r\n fs=fs,\r\n **kwargs)\r\n return samples, intermediates\r\n\r\n @torch.no_grad()\r\n def ddim_sampling(self, cond, shape,\r\n x_T=None, ddim_use_original_steps=False,\r\n callback=None, timesteps=None, quantize_denoised=False,\r\n mask=None, x0=None, img_callback=None, log_every_t=100,\r\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\r\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,precision=None,fs=None,\r\n **kwargs):\r\n device = self.model.betas.device \r\n b = shape[0]\r\n if x_T is None:\r\n img = torch.randn(shape, device=device)\r\n else:\r\n img = x_T\r\n if precision is not None:\r\n if precision == 16:\r\n img = img.to(dtype=torch.float16)\r\n\r\n \r\n if timesteps is None:\r\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\r\n elif timesteps is not None and not ddim_use_original_steps:\r\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\r\n timesteps = self.ddim_timesteps[:subset_end]\r\n \r\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\r\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\r\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\r\n if verbose:\r\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\r\n else:\r\n iterator = time_range\r\n\r\n clean_cond = kwargs.pop(\"clean_cond\", False)\r\n for i, step in enumerate(iterator):\r\n index = total_steps - i - 1\r\n ts = torch.full((b,), step, device=device, dtype=torch.long)\r\n\r\n ## use mask to blend noised original latent (img_orig) & new sampled latent (img)\r\n if mask is not None:\r\n assert x0 is not None\r\n if clean_cond:\r\n img_orig = x0\r\n else:\r\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\r\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\r\n \r\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\r\n quantize_denoised=quantize_denoised, temperature=temperature,\r\n noise_dropout=noise_dropout, score_corrector=score_corrector,\r\n corrector_kwargs=corrector_kwargs,\r\n unconditional_guidance_scale=unconditional_guidance_scale,\r\n unconditional_conditioning=unconditional_conditioning,\r\n mask=mask,x0=x0,fs=fs,\r\n **kwargs)\r\n \r\n\r\n\r\n img, pred_x0 = outs\r\n if callback: callback(i)\r\n if img_callback: img_callback(pred_x0, i)\r\n\r\n if index % log_every_t == 0 or index == total_steps - 1:\r\n intermediates['x_inter'].append(img)\r\n intermediates['pred_x0'].append(pred_x0)\r\n\r\n return img, intermediates\r\n\r\n @torch.no_grad()\r\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\r\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\r\n unconditional_guidance_scale=1., unconditional_conditioning=None,\r\n uc_type=None, cfg_img=None,mask=None,x0=None, **kwargs):\r\n b, *_, device = *x.shape, x.device\r\n if x.dim() == 5:\r\n is_video = True\r\n else:\r\n is_video = False\r\n if cfg_img is None:\r\n cfg_img = unconditional_guidance_scale\r\n\r\n unconditional_conditioning_img_nonetext = kwargs['unconditional_conditioning_img_nonetext']\r\n\r\n \r\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\r\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\r\n else:\r\n ### with unconditional condition\r\n e_t_cond = self.model.apply_model(x, t, c, **kwargs)\r\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\r\n e_t_uncond_img = self.model.apply_model(x, t, unconditional_conditioning_img_nonetext, **kwargs)\r\n # text cfg\r\n e_t = e_t_uncond + cfg_img * (e_t_uncond_img - e_t_uncond) + unconditional_guidance_scale * (e_t_cond - e_t_uncond_img)\r\n\r\n if self.model.parameterization == \"v\":\r\n e_t = self.model.predict_eps_from_z_and_v(x, t, e_t)\r\n\r\n\r\n if score_corrector is not None:\r\n assert self.model.parameterization == \"eps\"\r\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\r\n\r\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\r\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\r\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\r\n sigmas = self.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\r\n # select parameters corresponding to the currently considered timestep\r\n \r\n if is_video:\r\n size = (b, 1, 1, 1, 1)\r\n else:\r\n size = (b, 1, 1, 1)\r\n a_t = torch.full(size, alphas[index], device=device)\r\n a_prev = torch.full(size, alphas_prev[index], device=device)\r\n sigma_t = torch.full(size, sigmas[index], device=device)\r\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\r\n\r\n # current prediction for x_0\r\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\r\n\r\n if quantize_denoised:\r\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\r\n # direction pointing to x_t\r\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\r\n\r\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\r\n if noise_dropout > 0.:\r\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\r\n \r\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\r\n\r\n return x_prev, pred_x0\r\n\r\n @torch.no_grad()\r\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\r\n use_original_steps=False, callback=None):\r\n\r\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\r\n timesteps = timesteps[:t_start]\r\n\r\n time_range = np.flip(timesteps)\r\n total_steps = timesteps.shape[0]\r\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\r\n\r\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\r\n x_dec = x_latent\r\n for i, step in enumerate(iterator):\r\n index = total_steps - i - 1\r\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\r\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\r\n unconditional_guidance_scale=unconditional_guidance_scale,\r\n unconditional_conditioning=unconditional_conditioning)\r\n if callback: callback(i)\r\n return x_dec\r\n\r\n @torch.no_grad()\r\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\r\n # fast, but does not allow for exact reconstruction\r\n # t serves as an index to gather the correct alphas\r\n if use_original_steps:\r\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\r\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\r\n else:\r\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\r\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\r\n\r\n if noise is None:\r\n noise = torch.randn_like(x0)\r\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\r\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)"
},
{
"identifier": "instantiate_from_config",
"path": "utils/utils.py",
"snippet": "def instantiate_from_config(config):\r\n if not \"target\" in config:\r\n if config == '__is_first_stage__':\r\n return None\r\n elif config == \"__is_unconditional__\":\r\n return None\r\n raise KeyError(\"Expected key `target` to instantiate.\")\r\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))\r"
}
] | import argparse, os, sys, glob
import datetime, time
import torch
import torchvision
import torchvision.transforms as transforms
from omegaconf import OmegaConf
from tqdm import tqdm
from einops import rearrange, repeat
from collections import OrderedDict
from pytorch_lightning import seed_everything
from PIL import Image
from lvdm.models.samplers.ddim import DDIMSampler
from lvdm.models.samplers.ddim_multiplecond import DDIMSampler as DDIMSampler_multicond
from utils.utils import instantiate_from_config
| 8,644 | file_list.sort()
return file_list
def load_model_checkpoint(model, ckpt):
state_dict = torch.load(ckpt, map_location="cpu")
if "state_dict" in list(state_dict.keys()):
state_dict = state_dict["state_dict"]
model.load_state_dict(state_dict, strict=True)
else:
# deepspeed
new_pl_sd = OrderedDict()
for key in state_dict['module'].keys():
new_pl_sd[key[16:]]=state_dict['module'][key]
model.load_state_dict(new_pl_sd)
print('>>> model checkpoint loaded.')
return model
def load_prompts(prompt_file):
f = open(prompt_file, 'r')
prompt_list = []
for idx, line in enumerate(f.readlines()):
l = line.strip()
if len(l) != 0:
prompt_list.append(l)
f.close()
return prompt_list
def load_data_prompts(data_dir, video_size=(256,256), video_frames=16, gfi=False):
transform = transforms.Compose([
transforms.Resize(min(video_size)),
transforms.CenterCrop(video_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
## load prompts
prompt_file = get_filelist(data_dir, ['txt'])
assert len(prompt_file) > 0, "Error: found NO prompt file!"
###### default prompt
default_idx = 0
default_idx = min(default_idx, len(prompt_file)-1)
if len(prompt_file) > 1:
print(f"Warning: multiple prompt files exist. The one {os.path.split(prompt_file[default_idx])[1]} is used.")
## only use the first one (sorted by name) if multiple exist
## load video
file_list = get_filelist(data_dir, ['jpg', 'png', 'jpeg', 'JPEG', 'PNG'])
# assert len(file_list) == n_samples, "Error: data and prompts are NOT paired!"
data_list = []
filename_list = []
prompt_list = load_prompts(prompt_file[default_idx])
n_samples = len(prompt_list)
for idx in range(n_samples):
image = Image.open(file_list[idx]).convert('RGB')
image_tensor = transform(image).unsqueeze(1) # [c,1,h,w]
frame_tensor = repeat(image_tensor, 'c t h w -> c (repeat t) h w', repeat=video_frames)
data_list.append(frame_tensor)
_, filename = os.path.split(file_list[idx])
filename_list.append(filename)
return filename_list, data_list, prompt_list
def save_results(prompt, samples, filename, fakedir, fps=8, loop=False):
filename = filename.split('.')[0]+'.mp4'
prompt = prompt[0] if isinstance(prompt, list) else prompt
## save video
videos = [samples]
savedirs = [fakedir]
for idx, video in enumerate(videos):
if video is None:
continue
# b,c,t,h,w
video = video.detach().cpu()
video = torch.clamp(video.float(), -1., 1.)
n = video.shape[0]
video = video.permute(2, 0, 1, 3, 4) # t,n,c,h,w
if loop:
video = video[:-1,...]
frame_grids = [torchvision.utils.make_grid(framesheet, nrow=int(n), padding=0) for framesheet in video] #[3, 1*h, n*w]
grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [t, 3, h, n*w]
grid = (grid + 1.0) / 2.0
grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1)
path = os.path.join(savedirs[idx], filename)
torchvision.io.write_video(path, grid, fps=fps, video_codec='h264', options={'crf': '10'}) ## crf indicates the quality
def save_results_seperate(prompt, samples, filename, fakedir, fps=10, loop=False):
prompt = prompt[0] if isinstance(prompt, list) else prompt
## save video
videos = [samples]
savedirs = [fakedir]
for idx, video in enumerate(videos):
if video is None:
continue
# b,c,t,h,w
video = video.detach().cpu()
if loop: # remove the last frame
video = video[:,:,:-1,...]
video = torch.clamp(video.float(), -1., 1.)
n = video.shape[0]
for i in range(n):
grid = video[i,...]
grid = (grid + 1.0) / 2.0
grid = (grid * 255).to(torch.uint8).permute(1, 2, 3, 0) #thwc
path = os.path.join(savedirs[idx].replace('samples', 'samples_separate'), f'{filename.split(".")[0]}_sample{i}.mp4')
torchvision.io.write_video(path, grid, fps=fps, video_codec='h264', options={'crf': '10'})
def get_latent_z(model, videos):
b, c, t, h, w = videos.shape
x = rearrange(videos, 'b c t h w -> (b t) c h w')
z = model.encode_first_stage(x)
z = rearrange(z, '(b t) c h w -> b c t h w', b=b, t=t)
return z
def image_guided_synthesis(model, prompts, videos, noise_shape, n_samples=1, ddim_steps=50, ddim_eta=1., \
unconditional_guidance_scale=1.0, cfg_img=None, fs=None, text_input=False, multiple_cond_cfg=False, loop=False, gfi=False, **kwargs):
|
sys.path.insert(1, os.path.join(sys.path[0], '..', '..'))
def get_filelist(data_dir, postfixes):
patterns = [os.path.join(data_dir, f"*.{postfix}") for postfix in postfixes]
file_list = []
for pattern in patterns:
file_list.extend(glob.glob(pattern))
file_list.sort()
return file_list
def load_model_checkpoint(model, ckpt):
state_dict = torch.load(ckpt, map_location="cpu")
if "state_dict" in list(state_dict.keys()):
state_dict = state_dict["state_dict"]
model.load_state_dict(state_dict, strict=True)
else:
# deepspeed
new_pl_sd = OrderedDict()
for key in state_dict['module'].keys():
new_pl_sd[key[16:]]=state_dict['module'][key]
model.load_state_dict(new_pl_sd)
print('>>> model checkpoint loaded.')
return model
def load_prompts(prompt_file):
f = open(prompt_file, 'r')
prompt_list = []
for idx, line in enumerate(f.readlines()):
l = line.strip()
if len(l) != 0:
prompt_list.append(l)
f.close()
return prompt_list
def load_data_prompts(data_dir, video_size=(256,256), video_frames=16, gfi=False):
transform = transforms.Compose([
transforms.Resize(min(video_size)),
transforms.CenterCrop(video_size),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
## load prompts
prompt_file = get_filelist(data_dir, ['txt'])
assert len(prompt_file) > 0, "Error: found NO prompt file!"
###### default prompt
default_idx = 0
default_idx = min(default_idx, len(prompt_file)-1)
if len(prompt_file) > 1:
print(f"Warning: multiple prompt files exist. The one {os.path.split(prompt_file[default_idx])[1]} is used.")
## only use the first one (sorted by name) if multiple exist
## load video
file_list = get_filelist(data_dir, ['jpg', 'png', 'jpeg', 'JPEG', 'PNG'])
# assert len(file_list) == n_samples, "Error: data and prompts are NOT paired!"
data_list = []
filename_list = []
prompt_list = load_prompts(prompt_file[default_idx])
n_samples = len(prompt_list)
for idx in range(n_samples):
image = Image.open(file_list[idx]).convert('RGB')
image_tensor = transform(image).unsqueeze(1) # [c,1,h,w]
frame_tensor = repeat(image_tensor, 'c t h w -> c (repeat t) h w', repeat=video_frames)
data_list.append(frame_tensor)
_, filename = os.path.split(file_list[idx])
filename_list.append(filename)
return filename_list, data_list, prompt_list
def save_results(prompt, samples, filename, fakedir, fps=8, loop=False):
filename = filename.split('.')[0]+'.mp4'
prompt = prompt[0] if isinstance(prompt, list) else prompt
## save video
videos = [samples]
savedirs = [fakedir]
for idx, video in enumerate(videos):
if video is None:
continue
# b,c,t,h,w
video = video.detach().cpu()
video = torch.clamp(video.float(), -1., 1.)
n = video.shape[0]
video = video.permute(2, 0, 1, 3, 4) # t,n,c,h,w
if loop:
video = video[:-1,...]
frame_grids = [torchvision.utils.make_grid(framesheet, nrow=int(n), padding=0) for framesheet in video] #[3, 1*h, n*w]
grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [t, 3, h, n*w]
grid = (grid + 1.0) / 2.0
grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1)
path = os.path.join(savedirs[idx], filename)
torchvision.io.write_video(path, grid, fps=fps, video_codec='h264', options={'crf': '10'}) ## crf indicates the quality
def save_results_seperate(prompt, samples, filename, fakedir, fps=10, loop=False):
prompt = prompt[0] if isinstance(prompt, list) else prompt
## save video
videos = [samples]
savedirs = [fakedir]
for idx, video in enumerate(videos):
if video is None:
continue
# b,c,t,h,w
video = video.detach().cpu()
if loop: # remove the last frame
video = video[:,:,:-1,...]
video = torch.clamp(video.float(), -1., 1.)
n = video.shape[0]
for i in range(n):
grid = video[i,...]
grid = (grid + 1.0) / 2.0
grid = (grid * 255).to(torch.uint8).permute(1, 2, 3, 0) #thwc
path = os.path.join(savedirs[idx].replace('samples', 'samples_separate'), f'{filename.split(".")[0]}_sample{i}.mp4')
torchvision.io.write_video(path, grid, fps=fps, video_codec='h264', options={'crf': '10'})
def get_latent_z(model, videos):
b, c, t, h, w = videos.shape
x = rearrange(videos, 'b c t h w -> (b t) c h w')
z = model.encode_first_stage(x)
z = rearrange(z, '(b t) c h w -> b c t h w', b=b, t=t)
return z
def image_guided_synthesis(model, prompts, videos, noise_shape, n_samples=1, ddim_steps=50, ddim_eta=1., \
unconditional_guidance_scale=1.0, cfg_img=None, fs=None, text_input=False, multiple_cond_cfg=False, loop=False, gfi=False, **kwargs):
| ddim_sampler = DDIMSampler(model) if not multiple_cond_cfg else DDIMSampler_multicond(model)
| 1 | 2023-11-27 12:34:23+00:00 | 12k |
dvlab-research/LLMGA | llmga/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py | [
{
"identifier": "TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS",
"path": "llmga/diffusers/tests/pipelines/pipeline_params.py",
"snippet": "TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS = frozenset([\"prompt\", \"image\", \"negative_prompt\"])"
},
{
"identifier": "TEXT_GUIDED_IMAGE_VARIATION_PARAMS",
"path": "llmga/diffusers/tests/pipelines/pipeline_params.py",
"snippet": "TEXT_GUIDED_IMAGE_VARIATION_PARAMS = frozenset(\n [\n \"prompt\",\n \"image\",\n \"height\",\n \"width\",\n \"guidance_scale\",\n \"negative_prompt\",\n \"prompt_embeds\",\n \"negative_prompt_embeds\",\n ]\n)"
},
{
"identifier": "PipelineKarrasSchedulerTesterMixin",
"path": "llmga/diffusers/tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineKarrasSchedulerTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n def test_karras_schedulers_shape(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n # make sure that PNDM does not need warm-up\n pipe.scheduler.register_to_config(skip_prk_steps=True)\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"num_inference_steps\"] = 2\n\n if \"strength\" in inputs:\n inputs[\"num_inference_steps\"] = 4\n inputs[\"strength\"] = 0.5\n\n outputs = []\n for scheduler_enum in KarrasDiffusionSchedulers:\n if \"KDPM2\" in scheduler_enum.name:\n inputs[\"num_inference_steps\"] = 5\n\n scheduler_cls = getattr(diffusers, scheduler_enum.name)\n pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config)\n output = pipe(**inputs)[0]\n outputs.append(output)\n\n if \"KDPM2\" in scheduler_enum.name:\n inputs[\"num_inference_steps\"] = 2\n\n assert check_same_shape(outputs)"
},
{
"identifier": "PipelineLatentTesterMixin",
"path": "llmga/diffusers/tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineLatentTesterMixin:\n \"\"\"\n This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes.\n It provides a set of common tests for PyTorch pipeline that has vae, e.g.\n equivalence of different input and output types, etc.\n \"\"\"\n\n @property\n def image_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_params` in the child test class. \"\n \"`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results\"\n )\n\n @property\n def image_latents_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_latents_params` in the child test class. \"\n \"`image_latents_params` are tested for if passing latents directly are producing same results\"\n )\n\n def get_dummy_inputs_by_type(self, device, seed=0, input_image_type=\"pt\", output_type=\"np\"):\n inputs = self.get_dummy_inputs(device, seed)\n\n def convert_to_pt(image):\n if isinstance(image, torch.Tensor):\n input_image = image\n elif isinstance(image, np.ndarray):\n input_image = VaeImageProcessor.numpy_to_pt(image)\n elif isinstance(image, PIL.Image.Image):\n input_image = VaeImageProcessor.pil_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pt(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {type(image)}\")\n return input_image\n\n def convert_pt_to_type(image, input_image_type):\n if input_image_type == \"pt\":\n input_image = image\n elif input_image_type == \"np\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n elif input_image_type == \"pil\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pil(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {input_image_type}.\")\n return input_image\n\n for image_param in self.image_params:\n if image_param in inputs.keys():\n inputs[image_param] = convert_pt_to_type(\n convert_to_pt(inputs[image_param]).to(device), input_image_type\n )\n\n inputs[\"output_type\"] = output_type\n\n return inputs\n\n def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4):\n self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff)\n\n def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type=\"pt\"):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n output_pt = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pt\")\n )[0]\n output_np = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"np\")\n )[0]\n output_pil = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pil\")\n )[0]\n\n max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max()\n self.assertLess(\n max_diff, expected_max_diff, \"`output_type=='pt'` generate different results from `output_type=='np'`\"\n )\n\n max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max()\n self.assertLess(max_diff, 2.0, \"`output_type=='pil'` generate different results from `output_type=='np'`\")\n\n def test_pt_np_pil_inputs_equivalent(self):\n if len(self.image_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"np\"))[0]\n out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pil\"))[0]\n\n max_diff = np.abs(out_input_pt - out_input_np).max()\n self.assertLess(max_diff, 1e-4, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n max_diff = np.abs(out_input_pil - out_input_np).max()\n self.assertLess(max_diff, 1e-2, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n\n def test_latents_input(self):\n if len(self.image_latents_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n\n vae = components[\"vae\"]\n inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\")\n generator = inputs[\"generator\"]\n for image_param in self.image_latents_params:\n if image_param in inputs.keys():\n inputs[image_param] = (\n vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor\n )\n out_latents_inputs = pipe(**inputs)[0]\n\n max_diff = np.abs(out - out_latents_inputs).max()\n self.assertLess(max_diff, 1e-4, \"passing latents as image input generate different result from passing image\")"
},
{
"identifier": "PipelineTesterMixin",
"path": "llmga/diffusers/tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline,\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n # Canonical parameters that are passed to `__call__` regardless\n # of the type of pipeline. They are always optional and have common\n # sense default values.\n required_optional_params = frozenset(\n [\n \"num_inference_steps\",\n \"num_images_per_prompt\",\n \"generator\",\n \"latents\",\n \"output_type\",\n \"return_dict\",\n \"callback\",\n \"callback_steps\",\n ]\n )\n\n # set these parameters to False in the child class if the pipeline does not support the corresponding functionality\n test_attention_slicing = True\n\n test_xformers_attention = True\n\n def get_generator(self, seed):\n device = torch_device if torch_device != \"mps\" else \"cpu\"\n generator = torch.Generator(device).manual_seed(seed)\n return generator\n\n @property\n def pipeline_class(self) -> Union[Callable, DiffusionPipeline]:\n raise NotImplementedError(\n \"You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_components(self):\n raise NotImplementedError(\n \"You need to implement `get_dummy_components(self)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_inputs(self, device, seed=0):\n raise NotImplementedError(\n \"You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `params` in the child test class. \"\n \"`params` are checked for if all values are present in `__call__`'s signature.\"\n \" You can set `params` using one of the common set of parameters defined in `pipeline_params.py`\"\n \" e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to \"\n \"image pipelines, including prompts and prompt embedding overrides.\"\n \"If your pipeline's set of arguments has minor changes from one of the common sets of arguments, \"\n \"do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline \"\n \"with non-configurable height and width arguments should set the attribute as \"\n \"`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def batch_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `batch_params` in the child test class. \"\n \"`batch_params` are the parameters required to be batched when passed to the pipeline's \"\n \"`__call__` method. `pipeline_params.py` provides some common sets of parameters such as \"\n \"`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's \"\n \"set of batch arguments has minor changes from one of the common sets of batch arguments, \"\n \"do not make modifications to the existing common sets of batch arguments. I.e. a text to \"\n \"image pipeline `negative_prompt` is not batched should set the attribute as \"\n \"`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def tearDown(self):\n # clean up the VRAM after each test in case of CUDA runtime errors\n super().tearDown()\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_save_load_local(self, expected_max_difference=5e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n logger = logging.get_logger(\"diffusers.pipelines.pipeline_utils\")\n logger.setLevel(diffusers.logging.INFO)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n\n with CaptureLogger(logger) as cap_logger:\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n\n for name in pipe_loaded.components.keys():\n if name not in pipe_loaded._optional_components:\n assert name in str(cap_logger)\n\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_pipeline_call_signature(self):\n self.assertTrue(\n hasattr(self.pipeline_class, \"__call__\"), f\"{self.pipeline_class} should have a `__call__` method\"\n )\n\n parameters = inspect.signature(self.pipeline_class.__call__).parameters\n\n optional_parameters = set()\n\n for k, v in parameters.items():\n if v.default != inspect._empty:\n optional_parameters.add(k)\n\n parameters = set(parameters.keys())\n parameters.remove(\"self\")\n parameters.discard(\"kwargs\") # kwargs can be added if arguments of pipeline call function are deprecated\n\n remaining_required_parameters = set()\n\n for param in self.params:\n if param not in parameters:\n remaining_required_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_parameters) == 0,\n f\"Required parameters not present: {remaining_required_parameters}\",\n )\n\n remaining_required_optional_parameters = set()\n\n for param in self.required_optional_params:\n if param not in optional_parameters:\n remaining_required_optional_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_optional_parameters) == 0,\n f\"Required optional parameters not present: {remaining_required_optional_parameters}\",\n )\n\n def test_inference_batch_consistent(self, batch_sizes=[2]):\n self._test_inference_batch_consistent(batch_sizes=batch_sizes)\n\n def _test_inference_batch_consistent(\n self, batch_sizes=[2], additional_params_copy_to_batched_inputs=[\"num_inference_steps\"]\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # prepare batched inputs\n batched_inputs = []\n for batch_size in batch_sizes:\n batched_input = {}\n batched_input.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n # make unequal batch sizes\n batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n\n # make last batch super long\n batched_input[name][-1] = 100 * \"very long\"\n\n else:\n batched_input[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_input[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_input[\"batch_size\"] = batch_size\n\n batched_inputs.append(batched_input)\n\n logger.setLevel(level=diffusers.logging.WARNING)\n for batch_size, batched_input in zip(batch_sizes, batched_inputs):\n output = pipe(**batched_input)\n assert len(output[0]) == batch_size\n\n def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4):\n self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)\n\n def _test_inference_batch_single_identical(\n self,\n batch_size=2,\n expected_max_diff=1e-4,\n additional_params_copy_to_batched_inputs=[\"num_inference_steps\"],\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for components in pipe.components.values():\n if hasattr(components, \"set_default_attn_processor\"):\n components.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is has been used in self.get_dummy_inputs\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # batchify inputs\n batched_inputs = {}\n batched_inputs.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n batched_inputs[name][-1] = 100 * \"very long\"\n\n else:\n batched_inputs[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_inputs[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_inputs[\"batch_size\"] = batch_size\n\n for arg in additional_params_copy_to_batched_inputs:\n batched_inputs[arg] = inputs[arg]\n\n output = pipe(**inputs)\n output_batch = pipe(**batched_inputs)\n\n assert output_batch[0].shape[0] == batch_size\n\n max_diff = np.abs(output_batch[0][0] - output[0][0]).max()\n assert max_diff < expected_max_diff\n\n def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n output = pipe(**self.get_dummy_inputs(generator_device))[0]\n output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_tuple)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_components_function(self):\n init_components = self.get_dummy_components()\n init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))}\n\n pipe = self.pipeline_class(**init_components)\n\n self.assertTrue(hasattr(pipe, \"components\"))\n self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_float16_inference(self, expected_max_diff=5e-2):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n components = self.get_dummy_components()\n pipe_fp16 = self.pipeline_class(**components)\n for component in pipe_fp16.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe_fp16.to(torch_device, torch.float16)\n pipe_fp16.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in inputs:\n inputs[\"generator\"] = self.get_generator(0)\n\n output = pipe(**inputs)[0]\n\n fp16_inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in fp16_inputs:\n fp16_inputs[\"generator\"] = self.get_generator(0)\n\n output_fp16 = pipe_fp16(**fp16_inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()\n self.assertLess(max_diff, expected_max_diff, \"The outputs of the fp16 and fp32 pipelines are too different.\")\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_save_load_float16(self, expected_max_diff=1e-2):\n components = self.get_dummy_components()\n for name, module in components.items():\n if hasattr(module, \"half\"):\n components[name] = module.to(torch_device).half()\n\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for name, component in pipe_loaded.components.items():\n if hasattr(component, \"dtype\"):\n self.assertTrue(\n component.dtype == torch.float16,\n f\"`{name}.dtype` switched from `float16` to {component.dtype} after loading.\",\n )\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(\n max_diff, expected_max_diff, \"The output of the fp16 pipeline changed after saving and loading.\"\n )\n\n def test_save_load_optional_components(self, expected_max_difference=1e-4):\n if not hasattr(self.pipeline_class, \"_optional_components\"):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n # set all optional components to None\n for optional_component in pipe._optional_components:\n setattr(pipe, optional_component, None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for optional_component in pipe._optional_components:\n self.assertTrue(\n getattr(pipe_loaded, optional_component) is None,\n f\"`{optional_component}` did not stay set to None after loading.\",\n )\n\n inputs = self.get_dummy_inputs(generator_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"CUDA and CPU are required to switch devices\")\n def test_to_device(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n pipe.to(\"cpu\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cpu\" for device in model_devices))\n\n output_cpu = pipe(**self.get_dummy_inputs(\"cpu\"))[0]\n self.assertTrue(np.isnan(output_cpu).sum() == 0)\n\n pipe.to(\"cuda\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cuda\" for device in model_devices))\n\n output_cuda = pipe(**self.get_dummy_inputs(\"cuda\"))[0]\n self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)\n\n def test_to_dtype(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes))\n\n pipe.to(torch_dtype=torch.float16)\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))\n\n def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3):\n self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff)\n\n def _test_attention_slicing_forward_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3\n ):\n if not self.test_attention_slicing:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_slicing = pipe(**inputs)[0]\n\n pipe.enable_attention_slicing(slice_size=1)\n inputs = self.get_dummy_inputs(generator_device)\n output_with_slicing = pipe(**inputs)[0]\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_slicing) - to_np(output_without_slicing)).max()\n self.assertLess(max_diff, expected_max_diff, \"Attention slicing should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(output_with_slicing[0], output_without_slicing[0])\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.14.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.14.0` or higher\",\n )\n def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_sequential_cpu_offload()\n\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.17.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.17.0` or higher\",\n )\n def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):\n generator_device = \"cpu\"\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_model_cpu_offload()\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_xformers_available(),\n reason=\"XFormers attention is only available with CUDA and `xformers` installed\",\n )\n def test_xformers_attention_forwardGenerator_pass(self):\n self._test_xformers_attention_forwardGenerator_pass()\n\n def _test_xformers_attention_forwardGenerator_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4\n ):\n if not self.test_xformers_attention:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_without_offload = pipe(**inputs)[0]\n output_without_offload = (\n output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload\n )\n\n pipe.enable_xformers_memory_efficient_attention()\n inputs = self.get_dummy_inputs(torch_device)\n output_with_offload = pipe(**inputs)[0]\n output_with_offload = (\n output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload\n )\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"XFormers attention should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0])\n\n def test_progress_bar(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n\n inputs = self.get_dummy_inputs(torch_device)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n stderr = stderr.getvalue()\n # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,\n # so we just match \"5\" in \"#####| 1/5 [00:01<00:00]\"\n max_steps = re.search(\"/(.*?) \", stderr).group(1)\n self.assertTrue(max_steps is not None and len(max_steps) > 0)\n self.assertTrue(\n f\"{max_steps}/{max_steps}\" in stderr, \"Progress bar should be enabled and stopped at the max step\"\n )\n\n pipe.set_progress_bar_config(disable=True)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n self.assertTrue(stderr.getvalue() == \"\", \"Progress bar should be disabled\")\n\n def test_num_images_per_prompt(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"num_images_per_prompt\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n batch_sizes = [1, 2]\n num_images_per_prompts = [1, 2]\n\n for batch_size in batch_sizes:\n for num_images_per_prompt in num_images_per_prompts:\n inputs = self.get_dummy_inputs(torch_device)\n\n for key in inputs.keys():\n if key in self.batch_params:\n inputs[key] = batch_size * [inputs[key]]\n\n images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]\n\n assert images.shape[0] == batch_size * num_images_per_prompt\n\n def test_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n\n inputs[\"guidance_scale\"] = 1.0\n out_no_cfg = pipe(**inputs)[0]\n\n inputs[\"guidance_scale\"] = 7.5\n out_cfg = pipe(**inputs)[0]\n\n assert out_cfg.shape == out_no_cfg.shape"
}
] | import gc
import random
import unittest
import numpy as np
import torch
import diffusers
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin | 8,576 | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
enable_full_determinism()
def check_same_shape(tensor_list):
shapes = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:])
class StableDiffusionLatentUpscalePipelineFastTests(
| # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
enable_full_determinism()
def check_same_shape(tensor_list):
shapes = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:])
class StableDiffusionLatentUpscalePipelineFastTests( | PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase | 2 | 2023-11-27 18:46:55+00:00 | 12k |
HumanAIGC/Cloth2Tex | phase1_inference.py | [
{
"identifier": "ClothRenderer",
"path": "renderer/cloth_renderer.py",
"snippet": "class ClothRenderer(object):\n \n def __init__(self, objfile, resolution=512, focal_distance=1.6, scale_factor=1):\n self.device = torch.device(\"cuda:0\")\n\n self.img_size = resolution\n self.render_size = resolution\n self.renderer, self.renderer_silhouette = self.__get_renderer(self.render_size, focal_distance)\n \n print(\"[Cloth2Tex]\", objfile)\n obj_filename = os.path.join(objfile)\n verts, faces, aux = load_obj(\n obj_filename,\n device=self.device,\n load_textures=True)\n self.faces = faces.verts_idx\n self.verts = verts\n self.aux = aux\n \n self.verts = self.normalize_vertex(verts.clone()) * scale_factor\n \n self.center = verts.mean(0)\n self.scale = max((verts - self.center).abs().max(0)[0])\n self.landmark_cam = OrthogonalCamera(rotation=self.cameras.R.cuda(), translation=self.cameras.T.cuda()).to(self.device)\n \n _keys = []\n if len(aux.texture_images.keys()) > 0:\n for _ in aux.texture_images.keys():\n _keys.append(_)\n self.tex_lst = [aux.texture_images[i] for i in _keys]\n texture_image = self.tex_lst[0]\n \n \n self.verts_uvs = aux.verts_uvs[None, ...] # (1, V, 2)\n faces_uvs = faces.textures_idx[None, ...] # (1, F, 3)\n tex_maps = aux.texture_images\n\n # Canonical Mesh\n texture_image = texture_image[None, ...].to(self.device) # (1, H, W, 3)\n self.texture = TexturesUV(maps=texture_image, faces_uvs=self.faces[None], verts_uvs=self.verts_uvs)\n self.canonical_mesh = Meshes([self.verts], [self.faces], self.texture)\n \n def normalize_vertex(self, verts):\n # Normalizing\n N = verts.shape[0]\n center = verts.mean(0)\n scale = max((verts - center).abs().max(0)[0])\n \n verts = verts - center\n verts = verts * (1/float(scale))\n \n return verts\n \n def denormalize_vertex(self, verts):\n \n out = self.scale*verts + self.center\n \n return out\n \n def render_silhouette(self, verts, side='back', landmark=True, vertex_number=[[], []]):\n vert_lst_front = vertex_number[0]\n vert_lst_back = vertex_number[1]\n \n tmp_verts = verts.clone()\n mesh = Meshes([tmp_verts], [self.faces], self.texture)\n meshes = mesh.extend(2)\n \n # Get a batch(2) of viewing angles. \n elev = torch.linspace(180, -180, 2)\n azim = torch.linspace(0, 0, 2)\n \n focal_length = torch.linspace(-1, 1, 2)\n R, T = look_at_view_transform(dist=focal_length, elev=elev, azim=azim)\n cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n target_images, fragments = self.renderer_silhouette(meshes, cameras=cameras)\n \n if landmark is True:\n # project normalized vertex to image space(fix vertex)\n specific_verts_2d_front = self.landmark_cam(verts[vert_lst_front].unsqueeze(0))[0]\n # conversion from OpenGL coordinate to OpenCV coordinate\n specific_verts_2d_front[:,] = -specific_verts_2d_front[:,]\n # conversion from [-1,1] to [0,512]\n specific_verts_2d_front = (specific_verts_2d_front+1)/2*self.render_size\n \n # project normalized vertex to image space(fix vertex)\n specific_verts_2d_back = self.landmark_cam(verts[vert_lst_back].unsqueeze(0))[0]\n # conversion from OpenGL coordinate to OpenCV coordinate\n specific_verts_2d_back[:,] = -specific_verts_2d_back[:,]\n # conversion from [-1,1] to [0,512]\n specific_verts_2d_back = (specific_verts_2d_back+1)/2*self.render_size\n \n if side == 'front':\n return target_images[0], [specific_verts_2d_front]\n elif side == 'back':\n return target_images[1], [specific_verts_2d_back]\n else:\n return target_images, [specific_verts_2d_front, specific_verts_2d_back]\n \n return target_images, fragments\n \n def render_image(self, texture_image):\n texture = TexturesUV(maps=texture_image, faces_uvs=self.faces[None], verts_uvs=self.verts_uvs)\n \n tmp_verts = self.verts.clone()\n mesh = Meshes([tmp_verts], [self.faces.clone()], texture)\n meshes = mesh.extend(2)\n \n # Get a batch(2) of viewing angles. \n elev = torch.linspace(180, -180, 2)\n azim = torch.linspace(0, 0, 2)\n \n focal_length = torch.linspace(-1, 1, 2)\n R, T = look_at_view_transform(dist=focal_length, elev=elev, azim=azim)\n cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n target_images = self.renderer(meshes, cameras=cameras)\n target_masks, _ = self.renderer_silhouette(meshes, cameras=cameras)\n \n return target_images, target_masks\n \n \n def __get_renderer(self, render_size, focal_distance=2):\n \n lights = PointLights(device=self.device, location=[[0.0, 0.0, -3.0]],\n ambient_color=((1,1,1),),diffuse_color=((0,0,0),),specular_color=((0,0,0),))\n \n self.focal_distance = focal_distance\n R, T = look_at_view_transform(focal_distance, -180, 0) # 180 -> -180\n cameras = FoVPerspectiveCameras(device=self.device, R=R, T=T) # silhouette only!\n # cameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n \n self.cameras = cameras\n \n raster_settings = RasterizationSettings(\n image_size=render_size, \n blur_radius=0.0, \n faces_per_pixel=1, \n )\n sigma = 1e-4\n gamma = 1e-4\n blend_params = BlendParams(sigma=sigma, gamma=gamma, background_color=(255, 255, 255))\n \n renderer = MeshRenderer(\n rasterizer=MeshRasterizer(\n cameras=cameras,\n raster_settings=raster_settings\n ),\n shader = SoftPhongShader(\n device=self.device, \n cameras=cameras,\n lights=lights,\n # blend_params=blend_params\n )\n )\n \n # ref: https://github.com/facebookresearch/pytorch3d/issues/470\n sigma = 1e-8\n gamma = 1e-8\n blend_params = BlendParams(sigma=sigma, gamma=gamma, background_color=(0, 0, 0))\n raster_settings = RasterizationSettings(\n image_size=render_size, \n blur_radius=np.log(1. / 1e-8 - 1.)*sigma, # blur_radius=np.log(1. / 1e-8 - 1.)*sigma, \n faces_per_pixel=10, \n bin_size=None, \n max_faces_per_bin=None\n )\n \n renderer_silhouette = MeshRendererWithFragments(\n rasterizer=MeshRasterizer(\n cameras=cameras, \n raster_settings=raster_settings\n ),\n shader=SoftSilhouetteShader(blend_params=blend_params)\n # shader=SoftSilhouetteShader(blend_params=blend_params)\n )\n\n return renderer, renderer_silhouette"
},
{
"identifier": "extract_ampl_phase",
"path": "utils/frequency.py",
"snippet": "def extract_ampl_phase(input_img):\n \n fft_img = torch.fft.rfftn(input_img.clone())\n fft_im = torch.stack((fft_img.real, fft_img.imag), -1)\n \n # fft_im: size should be bx3xhxwx2\n fft_amp = fft_im[:,:,:,:,0]**2 + fft_im[:,:,:,:,1]**2\n fft_amp = torch.sqrt(fft_amp) # amplitude\n fft_pha = torch.atan2( fft_im[:,:,:,:,1], fft_im[:,:,:,:,0]) # phase\n return fft_amp, fft_pha"
},
{
"identifier": "Binarize",
"path": "utils/binary_function.py",
"snippet": "class Binarize(Function):\n clip_value = 1\n\n @staticmethod\n def forward(ctx, inp):\n ctx.save_for_backward(inp)\n\n output = inp.sign()\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n inp: Tensor = ctx.saved_tensors[0]\n\n clipped = inp.abs() <= Binarize.clip_value\n\n output = torch.zeros(inp.size()).to(grad_output.device)\n output[clipped] = 1\n output[~clipped] = 0\n\n return output * grad_output"
},
{
"identifier": "TVLoss",
"path": "utils/tvl_loss.py",
"snippet": "class TVLoss(nn.Module):\n def __init__(self, weight=1):\n super(TVLoss,self).__init__()\n self.TVLoss_weight = weight\n\n def forward(self, x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self._tensor_size(x[:,:,1:,:])\n count_w = self._tensor_size(x[:,:,:,1:])\n h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()\n w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()\n \n # 2023.03.29 +2nearest\n # h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum() + torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:])[:, :, ::2, :],2).sum()\n # w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum() + torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1])[:, :, :, ::2],2).sum()\n \n return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size\n\n def _tensor_size(self,t):\n return t.size()[1]*t.size()[2]*t.size()[3]"
},
{
"identifier": "TVMaskLoss",
"path": "utils/tvl_loss.py",
"snippet": "class TVMaskLoss(nn.Module):\n def __init__(self, weight=1):\n super(TVMaskLoss,self).__init__()\n self.TVMaskLoss_weight = weight\n self.non_idx = None\n\n def forward(self, mask, x):\n if self.non_idx is None:\n non_idx = mask.nonzero()\n self.non_idx = non_idx.split(1, dim=1)\n \n tmp_mask = torch.ones(1,3,512,512).cuda()\n tmp_mask[self.non_idx] = 0 # 排除非UV区域.\n \n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n \n x = x * tmp_mask\n \n count_h = self._tensor_size(x[:,:,1:,:])\n count_w = self._tensor_size(x[:,:,:,1:])\n # h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()\n # w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()\n \n # 2023.03.29 +2nearest\n h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum() + torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:])[:, :, ::2, :],2).sum()\n w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum() + torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1])[:, :, :, ::2],2).sum()\n return self.TVMaskLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size\n\n def _tensor_size(self,t):\n return t.size()[1]*t.size()[2]*t.size()[3]"
},
{
"identifier": "DeformationGraph",
"path": "lib/deformation_graph.py",
"snippet": "class DeformationGraph(nn.Module):\n \n def __init__(self, vert_number=9648, radius=0.015, k=9, sampling_strategy='qslim'): \n super().__init__()\n \n self.radius = radius\n self.k = k\n self.max_neigh_num = 40\n self.sampling_strategy = sampling_strategy\n self.one_ring_neigh = []\n self.nodes_idx = None\n self.weights = None\n self.influence_nodes_idx = []\n self.dists = []\n \n self.vert_number = vert_number\n\n def construct_graph(self, category_name, vertices=None, faces=None):\n \n transform_fp = \"transform_{}.pkl\".format(category_name)\n if self.sampling_strategy == 'qslim':\n m = Mesh(v=vertices, f=faces)\n if os.path.exists(transform_fp):\n with open(transform_fp, 'rb') as f:\n tmp = pickle.load(f, encoding='latin1')\n M, A, D = tmp['M'], tmp['A'], tmp['D']\n else:\n M, A, D = generate_transform_matrices(m, [20, 20])\n tmp = {'M': M, 'A': A, 'D': D}\n with open(transform_fp, 'wb') as fp:\n pickle.dump(tmp, fp)\n # import pdb; pdb.set_trace()\n nodes_v = M[1].v\n self.nodes_idx = D[0].nonzero()[1]\n adj_mat = A[1].toarray()\n \n for i in range(adj_mat.shape[0]):\n self.one_ring_neigh.append(adj_mat[i].nonzero()[0].tolist() + [i]*(self.max_neigh_num-len(adj_mat[i].nonzero()[0])))\n self.one_ring_neigh = torch.tensor(self.one_ring_neigh).cuda() \n\n # construct kd tree\n kdtree = KDTree(nodes_v)\n \n for vert in vertices:\n dist, idx = kdtree.query(vert, k=self.k)\n self.dists.append(dist)\n self.influence_nodes_idx.append(idx)\n \n self.weights = -np.log(np.array(self.dists)+eps)\n \n # weights normalization\n self.weights = torch.tensor(self.weights/col(self.weights.sum(1))).cuda()\n self.influence_nodes_idx = torch.tensor(self.influence_nodes_idx).cuda()\n \n def forward(self, vertices, opt_d_rotations, opt_d_translations):\n \n opt_d_rotmat = batch_rodrigues(opt_d_rotations[0]).unsqueeze(0) # 1 * N_c * 3 * 3\n nodes = vertices[self.nodes_idx, ...]\n \n opt_d_rotmat = opt_d_rotmat.cuda()\n opt_d_translations = opt_d_translations.cuda()\n\n influence_nodes_v = nodes[self.influence_nodes_idx.reshape((-1,))]# .reshape((28944(self.k * 9648),3,3))\n opt_d_r = opt_d_rotmat[0, self.influence_nodes_idx.reshape((-1,)), ...]# .reshape((28944,3,3,3)) \n opt_d_t = opt_d_translations[0, self.influence_nodes_idx.reshape((-1,)), ...]# .reshape((28944,3,3))\n \n warpped_vertices = (torch.einsum('bij, bkj->bki', opt_d_r.cuda(), (vertices.repeat_interleave(self.k, dim=0) - influence_nodes_v).unsqueeze(1)).squeeze(1) \\\n + influence_nodes_v + opt_d_t.cuda()).reshape((self.vert_number, self.k, 3)) * (self.weights.unsqueeze(-1))\n warpped_vertices = warpped_vertices.sum(axis=1).float()\n\n diff_term = (nodes + opt_d_translations[0].cuda()).repeat_interleave(self.max_neigh_num, dim=0) - \\\n (nodes[self.one_ring_neigh.reshape((-1,))] + opt_d_translations[0][self.one_ring_neigh.reshape((-1,))].cuda()) - \\\n torch.einsum('bij, bkj->bki', opt_d_rotmat[0].repeat_interleave(self.max_neigh_num, dim=0).cuda(), \\\n (nodes.repeat_interleave(self.max_neigh_num, dim=0) - nodes[self.one_ring_neigh.reshape((-1,))]).unsqueeze(1)).squeeze(1)\n arap_loss = torch.sum(diff_term ** 2) / self.nodes_idx.shape[0]\n \n return warpped_vertices.unsqueeze(0), arap_loss"
},
{
"identifier": "generate_transform_matrices_coma",
"path": "lib/mesh_sampling.py",
"snippet": "def generate_transform_matrices_coma(mesh, factors):\n \"\"\"Generates len(factors) meshes, each of them is scaled by factors[i] and\n computes the transformations between them.\n Returns:\n M: a set of meshes downsampled from mesh by a factor specified in factors.\n A: Adjacency matrix for each of the meshes\n D: csc_matrix Downsampling transforms between each of the meshes\n U: Upsampling transforms between each of the meshes\n F: a list of faces\n \"\"\"\n\n factors = map(lambda x: 1.0 / x, factors)\n M, A, D, U, F = [], [], [], [], []\n F.append(mesh.f) # F[0]\n A.append(get_vert_connectivity(mesh.v, mesh.f).astype('float32')) # A[0]\n M.append(mesh) # M[0]\n\n for factor in factors:\n ds_f, ds_D = qslim_decimator_transformer(M[-1], factor=factor)\n D.append(ds_D.astype('float32'))\n new_mesh_v = ds_D.dot(M[-1].v)\n new_mesh = Mesh(v=new_mesh_v, f=ds_f)\n F.append(new_mesh.f)\n M.append(new_mesh)\n A.append(\n get_vert_connectivity(new_mesh.v, new_mesh.f).tocoo())\n U.append(setup_deformation_transfer(M[-1], M[-2]).astype('float32'))\n\n return M, A, D, U, F"
},
{
"identifier": "to_edge_index",
"path": "lib/utils_dg.py",
"snippet": "def to_edge_index(mat):\n return torch.LongTensor(np.vstack(mat.nonzero()))"
},
{
"identifier": "to_sparse",
"path": "lib/utils_dg.py",
"snippet": "def to_sparse(spmat):\n return torch.sparse.FloatTensor(\n torch.LongTensor([spmat.tocoo().row,\n spmat.tocoo().col]),\n torch.FloatTensor(spmat.tocoo().data), torch.Size(spmat.tocoo().shape))"
},
{
"identifier": "get_vert_connectivity",
"path": "lib/utils_dg.py",
"snippet": "def get_vert_connectivity(mesh_v, mesh_f):\n \"\"\"Returns a sparse matrix (of size #verts x #verts) where each nonzero\n element indicates a neighborhood relation. For example, if there is a\n nonzero element in position (15,12), that means vertex 15 is connected\n by an edge to vertex 12.\"\"\"\n\n vpv = sp.csc_matrix((len(mesh_v),len(mesh_v)))\n\n # for each column in the faces...\n for i in range(3):\n IS = mesh_f[:,i]\n JS = mesh_f[:,(i+1)%3]\n data = np.ones(len(IS))\n ij = np.vstack((row(IS.flatten()), row(JS.flatten())))\n mtx = sp.csc_matrix((data, ij), shape=vpv.shape)\n vpv = vpv + mtx + mtx.T\n\n return vpv"
},
{
"identifier": "scipy_to_torch_sparse",
"path": "lib/utils_dg.py",
"snippet": "def scipy_to_torch_sparse(scp_matrix):\n values = scp_matrix.data\n indices = np.vstack((scp_matrix.row, scp_matrix.col))\n i = torch.LongTensor(indices)\n v = torch.FloatTensor(values)\n shape = scp_matrix.shape\n\n sparse_tensor = torch.sparse.FloatTensor(i, v, torch.Size(shape))\n return sparse_tensor"
},
{
"identifier": "DeformGraphModel",
"path": "models/deform_model.py",
"snippet": "class DeformGraphModel(torch.nn.Module):\n def __init__(self, deform_graph, renderer, binarization, canonical_mesh, std_lst, lr_rate=5e-4, savedir=\"1017\"):\n super(DeformGraphModel, self).__init__()\n \n self.device = torch.device(\"cuda:0\")\n \n self.deform_graph = deform_graph\n self.cloth_renderer = renderer\n self.binarization = binarization\n self.canonical_mesh = canonical_mesh\n \n self.step_size = lr_rate\n \n self.device = torch.device(\"cuda:0\")\n self.std_lst = std_lst[0]\n self.savedir = savedir\n # self.std_lst_b = std_lst[1]\n \n def iterative_deformgraph(self,\n batch_id,\n vertex_number,\n inputs,\n contours,\n verts,\n opt_d_rotations,\n opt_d_translations,\n times=101):\n \n verts_for_dg = verts.detach()\n verts_for_dg.requires_grad = False\n \n surface_optimizer = torch.optim.Adam([\n {'params': [opt_d_rotations]},\n {'params': [opt_d_translations]}\n ], lr=self.step_size)\n \n w_dg = 50\n w_kp = 0.001\n w_lap = 100\n w_norm = 10\n w_arap = 50\n w_edge = 1\n \n min_loss = 10000\n loop = tqdm(range(times))\n \n inputs_front, inputs_back = inputs[0].to(self.device).float(), inputs[1].to(self.device).float()\n landmark_front, landmark_back = contours[0].to(self.device).float(), contours[1].to(self.device).float() # landmark (2023.02.15)\n \n \n for i in loop:\n surface_optimizer.zero_grad()\n \n # arap: as rigid as possible\n warpped_vertices, loss_arap = self.deform_graph(verts_for_dg, opt_d_rotations, opt_d_translations)\n warpped_vertices = warpped_vertices.squeeze()\n \n src_mesh = Meshes([warpped_vertices], [self.cloth_renderer.faces], self.cloth_renderer.texture)\n \n # front&back\n masks = torch.stack([inputs_front[0], inputs_back[0]]).squeeze()\n \n # mn\n if landmark_back.shape[1] < landmark_front.shape[1]:\n _cc = [landmark_back, torch.zeros(1,1,1,2).cuda()] # original\n # _cc = [landmark_back, torch.zeros(1,1,2).cuda()] # blender\n landmark_back = torch.cat(_cc, 1)\n \n landmarks_canon = torch.stack([landmark_front.squeeze(), landmark_back.squeeze()])\n \n render_mask, specific_verts_2d = self.cloth_renderer.render_silhouette(warpped_vertices, side='both', landmark=True, vertex_number=vertex_number)\n \n # mn\n if specific_verts_2d[0].shape[0] != specific_verts_2d[1].shape[0]:\n _dd = [specific_verts_2d[1], torch.zeros(1,2).cuda()]\n specific_verts_2d[1] = torch.cat(_dd, 0)\n \n render_mask = render_mask[..., 3]\n render_mask_out = self.binarization(render_mask)\n \n loss_dg = nn.MSELoss()(render_mask_out, masks) + 0.3 * mask_iou(render_mask_out, masks) # [2, 512, 512] [2, 512, 512]\n loss_kp = nn.MSELoss()(torch.stack(specific_verts_2d), landmarks_canon)\n edge_mask = edge_extraction(masks)[:, 0].float()\n edge_render_mask = edge_extraction(render_mask_out)[:, 0].float()\n \n loss_edge = nn.L1Loss()(edge_render_mask*render_mask_out, edge_mask)\n \n loss_lap = mesh_laplacian_smoothing(src_mesh, method=\"uniform\")\n loss_norm = mesh_normal_consistency(src_mesh)\n \n # loss = w_dg*loss_dg + w_kp*loss_kp + w_norm*loss_norm + w_arap*loss_arap + w_edge*loss_edge\n loss = w_dg*loss_dg + w_kp*loss_kp + w_norm*loss_norm + w_arap*loss_arap + w_edge*loss_edge # + w_lap*loss_lap + w_norm*loss_norm\n \n loss.backward()\n surface_optimizer.step()\n \n with torch.no_grad():\n render_mask, specific_verts_2d = self.cloth_renderer.render_silhouette(warpped_vertices, side='both', landmark=True, vertex_number=vertex_number)\n f_render_mask, b_render_mask = render_mask[0, ..., 3], render_mask[1, ..., 3]\n f_render_mask, b_render_mask = self.binarization(f_render_mask), self.binarization(b_render_mask)\n \n _f_2d, _b_2d = specific_verts_2d[0].cpu().numpy().copy(), specific_verts_2d[1].cpu().numpy().copy()\n \n loop.set_description('[Total]{0:.2f}[Mask]{1:.2f}[Nor]{2:.2f}[KP]{3:.2f}[ARAP]{4:.2f}[Edge]{5:.2f}'.format(loss, w_dg * loss_dg, w_norm*loss_norm, w_kp*loss_kp, w_arap*loss_arap, w_edge*loss_edge))\n \n if float(loss) < min_loss:\n min_loss = float(loss)\n \n aaa1 = f_render_mask.detach().cpu().numpy() * 255.\n aaa2 = b_render_mask.detach().cpu().numpy() * 255.\n \n bbb1 = inputs_front[0][0].unsqueeze(-1).cpu().numpy() * 255.\n bbb2 = inputs_back[0][0].unsqueeze(-1).cpu().numpy() * 255.\n \n if len(aaa1.shape) == 2:\n aaa1 = np.expand_dims(aaa1, -1)\n aaa2 = np.expand_dims(aaa2, -1)\n \n ccc1 = aaa1 * 0.4 + bbb1\n ccc2 = aaa2 * 0.4 + bbb2\n cv2.putText(ccc1, \"front\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n cv2.putText(ccc2, \"back\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_f_2d):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n for iii, vvvv in enumerate(landmarks_canon[0]):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_b_2d):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(landmarks_canon[1]):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (193, 33, 240), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n \n cv2.imwrite(\"experiments/{0}/{1}_step2_min.jpg\".format(self.savedir, batch_id), cv2.hconcat([(ccc1.astype(np.uint8)), ccc2.astype(np.uint8)]))\n \n \n ddd1, ddd2 = edge_render_mask[0].unsqueeze(-1).cpu().numpy() * 255., edge_render_mask[1].unsqueeze(-1).cpu().numpy() * 255.\n cv2.imwrite(\"experiments/{0}/{1}_step2_edge.jpg\".format(self.savedir, batch_id), cv2.hconcat([(ddd1.astype(np.uint8)), ddd2.astype(np.uint8)]))\n \n minimum_vertices = warpped_vertices.clone()\n best_opt_d_rot = opt_d_rotations.clone()\n best_opt_d_trans = opt_d_translations.clone()\n \n # if i >= 50:\n # if i % 50 == 0:\n # save_obj(\"experiments/batch_result/mesh/0505_{}.obj\".format(i), warpped_vertices.detach(), self.cloth_renderer.faces)\n # else:\n # if i % 5 == 0:\n # save_obj(\"experiments/batch_result/mesh/0505_{}.obj\".format(i), warpped_vertices.detach(), self.cloth_renderer.faces) \n\n if i % 500 == 0:\n aaa1 = f_render_mask.detach().cpu().numpy() * 255.\n aaa2 = b_render_mask.detach().cpu().numpy() * 255.\n \n bbb1 = inputs_front[0][0].unsqueeze(-1).cpu().numpy() * 255.\n bbb2 = inputs_back[0][0].unsqueeze(-1).cpu().numpy() * 255.\n \n if len(aaa1.shape) == 2:\n aaa1 = np.expand_dims(aaa1, -1)\n aaa2 = np.expand_dims(aaa2, -1)\n \n ccc1 = aaa1 * 0.4 + bbb1\n ccc2 = aaa2 * 0.4 + bbb2\n cv2.putText(ccc1, \"front\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n cv2.putText(ccc2, \"back\", (int(10), int(40)), cv2.FONT_HERSHEY_SIMPLEX, 1, (193, 33, 240), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_f_2d):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n for iii, vvvv in enumerate(landmarks_canon[0]):\n cv2.circle(ccc1, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc1, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(_b_2d):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 100, 90), 2, cv2.LINE_AA) \n \n for iii, vvvv in enumerate(landmarks_canon[1]):\n if int(vvvv[0]) != 0:\n cv2.circle(ccc2, (int(vvvv[0]), int(vvvv[1])), 3, (80, 40, 200), -1)\n cv2.putText(ccc2, self.std_lst[iii], (int(vvvv[0]), int(vvvv[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (80, 40, 200), 2, cv2.LINE_AA) \n \n cv2.imwrite(\"experiments/{0}/{1}_step2_{2}.jpg\".format(self.savedir, batch_id, i), cv2.hconcat([(ccc1.astype(np.uint8)), ccc2.astype(np.uint8)]))\n \n \n print(\"[cloth2tex] [deformation graph parameter]\", opt_d_rotations.shape, opt_d_translations.shape)\n return minimum_vertices, best_opt_d_rot, best_opt_d_trans\n \n def forward(self, x):\n out = self.linear(x)\n # out = self.sigmoid(out)\n return out"
}
] | import argparse
import datetime
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pickle
import os
import os.path as osp
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import thinplate as tps
import time
import matplotlib.pyplot as plt
import importlib
import random
import json
import cv2
from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names
from renderer.cloth_renderer import ClothRenderer
from PIL import Image
from utils.frequency import extract_ampl_phase
from utils.binary_function import Binarize
from utils.tvl_loss import TVLoss, TVMaskLoss
from tqdm import tqdm
from pytorch3d.io import load_obj, save_obj
from itertools import chain
from pytorch3d.structures import Meshes
from pytorch3d.transforms import RotateAxisAngle
from pytorch3d.loss import (
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
from lib.deformation_graph import DeformationGraph
from lib.mesh_sampling import generate_transform_matrices_coma
from lib.utils_dg import to_edge_index, to_sparse, get_vert_connectivity, scipy_to_torch_sparse
from models import DeformGraphModel
from torch_geometric.transforms import FaceToEdge
from torch_geometric.data import Data
from psbody.mesh import Mesh
from torch_geometric.io import read_ply | 8,910 | # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer----------------------
self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor)
if os.path.exists(os.path.join("experiments", savedir)):
pass
else:
os.makedirs(os.path.join("experiments", savedir))
self.savedir = savedir
self.uv = torch.ones((1, 512, 512, 3)).cuda()
self.uv.requires_grad = True
self.optimizer = optim.Adam([self.uv], lr=5e-3, betas=(0.5, 0.999))
# define loss
self.criterion = nn.MSELoss() # nn.L1Loss() nn.MSELoss()
self.mse = nn.MSELoss()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# You can choose TVMaskLoss and test if it is suitable for your case.
self.tvl_loss = TVLoss(weight=1) # TVMaskLoss(weight=1) or TVLoss(weight=1)
# self.tvl_loss = TVMaskLoss(weight=1)
self.canonical_mesh = self.cloth_renderer.canonical_mesh
self.deform_verts = self.cloth_renderer.verts.to(self.device)
self.deform_verts.requires_grad = False
| # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer----------------------
self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor)
if os.path.exists(os.path.join("experiments", savedir)):
pass
else:
os.makedirs(os.path.join("experiments", savedir))
self.savedir = savedir
self.uv = torch.ones((1, 512, 512, 3)).cuda()
self.uv.requires_grad = True
self.optimizer = optim.Adam([self.uv], lr=5e-3, betas=(0.5, 0.999))
# define loss
self.criterion = nn.MSELoss() # nn.L1Loss() nn.MSELoss()
self.mse = nn.MSELoss()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# You can choose TVMaskLoss and test if it is suitable for your case.
self.tvl_loss = TVLoss(weight=1) # TVMaskLoss(weight=1) or TVLoss(weight=1)
# self.tvl_loss = TVMaskLoss(weight=1)
self.canonical_mesh = self.cloth_renderer.canonical_mesh
self.deform_verts = self.cloth_renderer.verts.to(self.device)
self.deform_verts.requires_grad = False
| self.deform_graph = DeformationGraph(vert_number=verts_num) | 5 | 2023-12-01 06:03:38+00:00 | 12k |
sherwinbahmani/4dfy | threestudio/systems/base.py | [
{
"identifier": "Exporter",
"path": "threestudio/models/exporters/base.py",
"snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n material: BaseMaterial,\n background: BaseBackground,\n ) -> None:\n @dataclass\n class SubModules:\n geometry: BaseImplicitGeometry\n material: BaseMaterial\n background: BaseBackground\n\n self.sub_modules = SubModules(geometry, material, background)\n\n @property\n def geometry(self) -> BaseImplicitGeometry:\n return self.sub_modules.geometry\n\n @property\n def material(self) -> BaseMaterial:\n return self.sub_modules.material\n\n @property\n def background(self) -> BaseBackground:\n return self.sub_modules.background\n\n def __call__(self, *args, **kwargs) -> List[ExporterOutput]:\n raise NotImplementedError"
},
{
"identifier": "ExporterOutput",
"path": "threestudio/models/exporters/base.py",
"snippet": "class ExporterOutput:\n save_name: str\n save_type: str\n params: Dict[str, Any]"
},
{
"identifier": "parse_optimizer",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler",
"path": "threestudio/systems/utils.py",
"snippet": "def parse_scheduler(config, optimizer):\n interval = config.get(\"interval\", \"epoch\")\n assert interval in [\"epoch\", \"step\"]\n if config.name == \"SequentialLR\":\n scheduler = {\n \"scheduler\": lr_scheduler.SequentialLR(\n optimizer,\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ],\n milestones=config.milestones,\n ),\n \"interval\": interval,\n }\n elif config.name == \"ChainedScheduler\":\n scheduler = {\n \"scheduler\": lr_scheduler.ChainedScheduler(\n [\n parse_scheduler(conf, optimizer)[\"scheduler\"]\n for conf in config.schedulers\n ]\n ),\n \"interval\": interval,\n }\n else:\n scheduler = {\n \"scheduler\": get_scheduler(config.name)(optimizer, **config.args),\n \"interval\": interval,\n }\n return scheduler"
},
{
"identifier": "Updateable",
"path": "threestudio/utils/base.py",
"snippet": "class Updateable:\n def do_update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ):\n for attr in self.__dir__():\n if attr.startswith(\"_\"):\n continue\n try:\n module = getattr(self, attr)\n except:\n continue # ignore attributes like property, which can't be retrived using getattr?\n if isinstance(module, Updateable):\n module.do_update_step(\n epoch, global_step, on_load_weights=on_load_weights\n )\n self.update_step(epoch, global_step, on_load_weights=on_load_weights)\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n # override this method to implement custom update logic\n # if on_load_weights is True, you should be careful doing things related to model evaluations,\n # as the models and tensors are not guarenteed to be on the same device\n pass"
},
{
"identifier": "update_if_possible",
"path": "threestudio/utils/base.py",
"snippet": "def update_if_possible(module: Any, epoch: int, global_step: int) -> None:\n if isinstance(module, Updateable):\n module.do_update_step(epoch, global_step)"
},
{
"identifier": "parse_structured",
"path": "threestudio/utils/config.py",
"snippet": "def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:\n scfg = OmegaConf.structured(fields(**cfg))\n return scfg"
},
{
"identifier": "C",
"path": "threestudio/utils/misc.py",
"snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value"
},
{
"identifier": "cleanup",
"path": "threestudio/utils/misc.py",
"snippet": "def cleanup():\n gc.collect()\n torch.cuda.empty_cache()\n tcnn.free_temporary_memory()"
},
{
"identifier": "get_device",
"path": "threestudio/utils/misc.py",
"snippet": "def get_device():\n return torch.device(f\"cuda:{get_rank()}\")"
},
{
"identifier": "load_module_weights",
"path": "threestudio/utils/misc.py",
"snippet": "def load_module_weights(\n path, module_name=None, ignore_modules=None, map_location=None\n) -> Tuple[dict, int, int]:\n if module_name is not None and ignore_modules is not None:\n raise ValueError(\"module_name and ignore_modules cannot be both set\")\n if map_location is None:\n map_location = get_device()\n\n ckpt = torch.load(path, map_location=map_location)\n state_dict = ckpt[\"state_dict\"]\n state_dict_to_load = state_dict\n\n if ignore_modules is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n ignore = any(\n [k.startswith(ignore_module + \".\") for ignore_module in ignore_modules]\n )\n if ignore:\n continue\n state_dict_to_load[k] = v\n\n if module_name is not None:\n state_dict_to_load = {}\n for k, v in state_dict.items():\n m = re.match(rf\"^{module_name}\\.(.*)$\", k)\n if m is None:\n continue\n state_dict_to_load[m.group(1)] = v\n\n return state_dict_to_load, ckpt[\"epoch\"], ckpt[\"global_step\"]"
},
{
"identifier": "SaverMixin",
"path": "threestudio/utils/saving.py",
"snippet": "class SaverMixin:\n _save_dir: Optional[str] = None\n _wandb_logger: Optional[WandbLogger] = None\n\n def set_save_dir(self, save_dir: str):\n self._save_dir = save_dir\n\n def get_save_dir(self):\n if self._save_dir is None:\n raise ValueError(\"Save dir is not set\")\n return self._save_dir\n\n def convert_data(self, data):\n if data is None:\n return None\n elif isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError(\n \"Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting\",\n type(data),\n )\n\n def get_save_path(self, filename):\n save_path = os.path.join(self.get_save_dir(), filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n\n def create_loggers(self, cfg_loggers: DictConfig) -> None:\n if \"wandb\" in cfg_loggers.keys() and cfg_loggers.wandb.enable:\n if 'save_dir' in cfg_loggers.wandb:\n save_dir = cfg_loggers.wandb.save_dir\n else:\n save_dir = None\n self._wandb_logger = WandbLogger(project=cfg_loggers.wandb.project, save_dir=save_dir)\n\n def get_loggers(self) -> List:\n if self._wandb_logger:\n return [self._wandb_logger]\n else:\n return []\n\n DEFAULT_RGB_KWARGS = {\"data_format\": \"HWC\", \"data_range\": (0, 1)}\n DEFAULT_UV_KWARGS = {\n \"data_format\": \"HWC\",\n \"data_range\": (0, 1),\n \"cmap\": \"checkerboard\",\n }\n DEFAULT_GRAYSCALE_KWARGS = {\"data_range\": None, \"cmap\": \"jet\"}\n DEFAULT_GRID_KWARGS = {\"align\": \"max\"}\n\n def get_rgb_image_(self, img, data_format, data_range, rgba=False):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n if img.dtype != np.uint8:\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (\n (img - data_range[0]) / (data_range[1] - data_range[0]) * 255.0\n ).astype(np.uint8)\n nc = 4 if rgba else 3\n imgs = [img[..., start : start + nc] for start in range(0, img.shape[-1], nc)]\n imgs = [\n img_\n if img_.shape[-1] == nc\n else np.concatenate(\n [\n img_,\n np.zeros(\n (img_.shape[0], img_.shape[1], nc - img_.shape[2]),\n dtype=img_.dtype,\n ),\n ],\n axis=-1,\n )\n for img_ in imgs\n ]\n img = np.concatenate(imgs, axis=1)\n if rgba:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n else:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def _save_rgb_image(\n self,\n filename,\n img,\n data_format,\n data_range,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n img = self.get_rgb_image_(img, data_format, data_range)\n cv2.imwrite(filename, img)\n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Image(self.get_save_path(filename)),\n \"trainer/global_step\": step,\n }\n )\n\n def save_rgb_image(\n self,\n filename,\n img,\n data_format=DEFAULT_RGB_KWARGS[\"data_format\"],\n data_range=DEFAULT_RGB_KWARGS[\"data_range\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n self._save_rgb_image(\n self.get_save_path(filename), img, data_format, data_range, name, step\n )\n\n def get_uv_image_(self, img, data_format, data_range, cmap):\n img = self.convert_data(img)\n assert data_format in [\"CHW\", \"HWC\"]\n if data_format == \"CHW\":\n img = img.transpose(1, 2, 0)\n img = img.clip(min=data_range[0], max=data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [\"checkerboard\", \"color\"]\n if cmap == \"checkerboard\":\n n_grid = 64\n mask = (img * n_grid).astype(int)\n mask = (mask[..., 0] + mask[..., 1]) % 2 == 0\n img = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 255\n img[mask] = np.array([255, 0, 255], dtype=np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif cmap == \"color\":\n img_ = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n img_[..., 0] = (img[..., 0] * 255).astype(np.uint8)\n img_[..., 1] = (img[..., 1] * 255).astype(np.uint8)\n img_ = cv2.cvtColor(img_, cv2.COLOR_RGB2BGR)\n img = img_\n return img\n\n def save_uv_image(\n self,\n filename,\n img,\n data_format=DEFAULT_UV_KWARGS[\"data_format\"],\n data_range=DEFAULT_UV_KWARGS[\"data_range\"],\n cmap=DEFAULT_UV_KWARGS[\"cmap\"],\n ):\n img = self.get_uv_image_(img, data_format, data_range, cmap)\n cv2.imwrite(self.get_save_path(filename), img)\n\n def get_grayscale_image_(self, img, data_range, cmap):\n img = self.convert_data(img)\n img = np.nan_to_num(img)\n if data_range is None:\n img = (img - img.min()) / (img.max() - img.min())\n elif data_range == 'nonzero':\n perc_min = self.visu_perc_min_depth\n perc_max = self.visu_perc_max_depth\n img = img.clip(perc_min, perc_max)\n img = (img - perc_min) / (perc_max - perc_min)\n img = 1-img\n else:\n img = img.clip(data_range[0], data_range[1])\n img = (img - data_range[0]) / (data_range[1] - data_range[0])\n assert cmap in [None, \"jet\", \"magma\", \"spectral\", \"inferno\"]\n if cmap == None:\n img = (img * 255.0).astype(np.uint8)\n img = np.repeat(img[..., None], 3, axis=2)\n elif cmap in [\"jet\", \"inferno\"]:\n img = (img * 255.0).astype(np.uint8)\n if cmap == \"jet\":\n color_map = cv2.COLORMAP_JET\n elif cmap == \"inferno\":\n color_map = cv2.COLORMAP_INFERNO\n img = cv2.applyColorMap(img, color_map)\n elif cmap == \"magma\":\n img = 1.0 - img\n base = cm.get_cmap(\"magma\")\n num_bins = 256\n colormap = LinearSegmentedColormap.from_list(\n f\"{base.name}{num_bins}\", base(np.linspace(0, 1, num_bins)), num_bins\n )(np.linspace(0, 1, num_bins))[:, :3]\n a = np.floor(img * 255.0)\n b = (a + 1).clip(max=255.0)\n f = img * 255.0 - a\n a = a.astype(np.uint16).clip(0, 255)\n b = b.astype(np.uint16).clip(0, 255)\n img = colormap[a] + (colormap[b] - colormap[a]) * f[..., None]\n img = (img * 255.0).astype(np.uint8)\n elif cmap == \"spectral\":\n colormap = plt.get_cmap(\"Spectral\")\n\n def blend_rgba(image):\n image = image[..., :3] * image[..., -1:] + (\n 1.0 - image[..., -1:]\n ) # blend A to RGB\n return image\n\n img = colormap(img)\n img = blend_rgba(img)\n img = (img * 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return img\n\n def save_grayscale_image(\n self,\n filename,\n img,\n data_range=DEFAULT_GRAYSCALE_KWARGS[\"data_range\"],\n cmap=DEFAULT_GRAYSCALE_KWARGS[\"cmap\"],\n ):\n img = self.get_grayscale_image_(img, data_range, cmap)\n cv2.imwrite(self.get_save_path(filename), img)\n\n def get_image_grid_(self, imgs, align):\n if isinstance(imgs[0], list):\n return np.concatenate(\n [self.get_image_grid_(row, align) for row in imgs], axis=0\n )\n cols = []\n for col in imgs:\n assert col[\"type\"] in [\"rgb\", \"uv\", \"grayscale\"]\n if col[\"type\"] == \"rgb\":\n rgb_kwargs = self.DEFAULT_RGB_KWARGS.copy()\n rgb_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_rgb_image_(col[\"img\"], **rgb_kwargs))\n elif col[\"type\"] == \"uv\":\n uv_kwargs = self.DEFAULT_UV_KWARGS.copy()\n uv_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_uv_image_(col[\"img\"], **uv_kwargs))\n elif col[\"type\"] == \"grayscale\":\n grayscale_kwargs = self.DEFAULT_GRAYSCALE_KWARGS.copy()\n grayscale_kwargs.update(col[\"kwargs\"])\n cols.append(self.get_grayscale_image_(col[\"img\"], **grayscale_kwargs))\n\n if align == \"max\":\n h = max([col.shape[0] for col in cols])\n w = max([col.shape[1] for col in cols])\n elif align == \"min\":\n h = min([col.shape[0] for col in cols])\n w = min([col.shape[1] for col in cols])\n elif isinstance(align, int):\n h = align\n w = align\n elif (\n isinstance(align, tuple)\n and isinstance(align[0], int)\n and isinstance(align[1], int)\n ):\n h, w = align\n else:\n raise ValueError(\n f\"Unsupported image grid align: {align}, should be min, max, int or (int, int)\"\n )\n\n for i in range(len(cols)):\n if cols[i].shape[0] != h or cols[i].shape[1] != w:\n cols[i] = cv2.resize(cols[i], (w, h), interpolation=cv2.INTER_LINEAR)\n if len(cols[i].shape) != 3:\n cols[i] = cols[i].squeeze(3)\n\n return np.concatenate(cols, axis=1)\n\n def save_image_grid(\n self,\n filename,\n imgs,\n align=DEFAULT_GRID_KWARGS[\"align\"],\n name: Optional[str] = None,\n step: Optional[int] = None,\n video: bool = False,\n fps: int = 16,\n ):\n if video:\n B, H, W, C = imgs[0][\"img\"].shape\n for i, img_frames in enumerate(imgs):\n imgs[i]['img'] = torch.cat([img for img in img_frames['img']], 1)\n img = self.get_image_grid_(imgs, align=align)\n filepath = self.get_save_path(filename)\n cv2.imwrite(filepath, img)\n if video:\n img_video = np.stack([img[:, i*W:(i+1)*W] for i in range(B)])\n img_video = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in img_video]\n # imageio.mimsave(self.get_save_path(filename.replace(\".png\", \".mp4\")), img_video, fps=fps)\n torchvision.io.write_video(self.get_save_path(filename.replace(\".png\", \".mp4\")), img_video, fps=fps)\n \n if name and self._wandb_logger:\n wandb.log({name: wandb.Image(filepath), \"trainer/global_step\": step})\n\n def save_image(self, filename, img):\n img = self.convert_data(img)\n assert img.dtype == np.uint8 or img.dtype == np.uint16\n if img.ndim == 3 and img.shape[-1] == 3:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif img.ndim == 3 and img.shape[-1] == 4:\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGRA)\n cv2.imwrite(self.get_save_path(filename), img)\n\n def save_cubemap(self, filename, img, data_range=(0, 1), rgba=False):\n img = self.convert_data(img)\n assert img.ndim == 4 and img.shape[0] == 6 and img.shape[1] == img.shape[2]\n\n imgs_full = []\n for start in range(0, img.shape[-1], 3):\n img_ = img[..., start : start + 3]\n img_ = np.stack(\n [\n self.get_rgb_image_(img_[i], \"HWC\", data_range, rgba=rgba)\n for i in range(img_.shape[0])\n ],\n axis=0,\n )\n size = img_.shape[1]\n placeholder = np.zeros((size, size, 3), dtype=np.float32)\n img_full = np.concatenate(\n [\n np.concatenate(\n [placeholder, img_[2], placeholder, placeholder], axis=1\n ),\n np.concatenate([img_[1], img_[4], img_[0], img_[5]], axis=1),\n np.concatenate(\n [placeholder, img_[3], placeholder, placeholder], axis=1\n ),\n ],\n axis=0,\n )\n imgs_full.append(img_full)\n\n imgs_full = np.concatenate(imgs_full, axis=1)\n cv2.imwrite(self.get_save_path(filename), imgs_full)\n\n def save_data(self, filename, data):\n data = self.convert_data(data)\n if isinstance(data, dict):\n if not filename.endswith(\".npz\"):\n filename += \".npz\"\n np.savez(self.get_save_path(filename), **data)\n else:\n if not filename.endswith(\".npy\"):\n filename += \".npy\"\n np.save(self.get_save_path(filename), data)\n\n def save_state_dict(self, filename, data):\n torch.save(data, self.get_save_path(filename))\n\n def save_img_sequence(\n self,\n filename,\n img_dir,\n matcher,\n save_format=\"mp4\",\n fps=30,\n name: Optional[str] = None,\n step: Optional[int] = None,\n ):\n assert save_format in [\"gif\", \"mp4\"]\n if not filename.endswith(save_format):\n filename += f\".{save_format}\"\n matcher = re.compile(matcher)\n img_dir = os.path.join(self.get_save_dir(), img_dir)\n imgs = []\n for f in os.listdir(img_dir):\n if matcher.search(f):\n imgs.append(f)\n imgs = sorted(imgs, key=lambda f: int(matcher.search(f).groups()[0]))\n imgs = [cv2.imread(os.path.join(img_dir, f)) for f in imgs]\n\n if save_format == \"gif\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n imageio.mimsave(\n self.get_save_path(filename), imgs, fps=fps, palettesize=256\n )\n elif save_format == \"mp4\":\n imgs = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in imgs]\n # imageio.mimsave(self.get_save_path(filename), imgs, fps=fps)\n torchvision.io.write_video(self.get_save_path(filename), imgs, fps=fps)\n \n if name and self._wandb_logger:\n wandb.log(\n {\n name: wandb.Video(self.get_save_path(filename), format=\"mp4\"),\n \"trainer/global_step\": step,\n }\n )\n\n def save_mesh(self, filename, v_pos, t_pos_idx, v_tex=None, t_tex_idx=None):\n v_pos = self.convert_data(v_pos)\n t_pos_idx = self.convert_data(t_pos_idx)\n mesh = trimesh.Trimesh(vertices=v_pos, faces=t_pos_idx)\n mesh.export(self.get_save_path(filename))\n\n def save_obj(\n self,\n filename: str,\n mesh: Mesh,\n save_mat: bool = False,\n save_normal: bool = False,\n save_uv: bool = False,\n save_vertex_color: bool = False,\n map_Kd: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Ks: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_Bump: Optional[Float[Tensor, \"H W 3\"]] = None,\n map_format: str = \"jpg\",\n ) -> None:\n if not filename.endswith(\".obj\"):\n filename += \".obj\"\n v_pos, t_pos_idx = self.convert_data(mesh.v_pos), self.convert_data(\n mesh.t_pos_idx\n )\n v_nrm, v_tex, t_tex_idx, v_rgb = None, None, None, None\n if save_normal:\n v_nrm = self.convert_data(mesh.v_nrm)\n if save_uv:\n v_tex, t_tex_idx = self.convert_data(mesh.v_tex), self.convert_data(\n mesh.t_tex_idx\n )\n if save_vertex_color:\n v_rgb = self.convert_data(mesh.v_rgb)\n matname, mtllib = None, None\n if save_mat:\n matname = \"default\"\n mtl_filename = filename.replace(\".obj\", \".mtl\")\n mtllib = os.path.basename(mtl_filename)\n self._save_mtl(\n mtl_filename,\n matname,\n map_Kd=self.convert_data(map_Kd),\n map_Ks=self.convert_data(map_Ks),\n map_Bump=self.convert_data(map_Bump),\n map_format=map_format,\n )\n self._save_obj(\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=v_nrm,\n v_tex=v_tex,\n t_tex_idx=t_tex_idx,\n v_rgb=v_rgb,\n matname=matname,\n mtllib=mtllib,\n )\n\n def _save_obj(\n self,\n filename,\n v_pos,\n t_pos_idx,\n v_nrm=None,\n v_tex=None,\n t_tex_idx=None,\n v_rgb=None,\n matname=None,\n mtllib=None,\n ):\n obj_str = \"\"\n if matname is not None:\n obj_str += f\"mtllib {mtllib}\\n\"\n obj_str += f\"g object\\n\"\n obj_str += f\"usemtl {matname}\\n\"\n for i in range(len(v_pos)):\n obj_str += f\"v {v_pos[i][0]} {v_pos[i][1]} {v_pos[i][2]}\"\n if v_rgb is not None:\n obj_str += f\" {v_rgb[i][0]} {v_rgb[i][1]} {v_rgb[i][2]}\"\n obj_str += \"\\n\"\n if v_nrm is not None:\n for v in v_nrm:\n obj_str += f\"vn {v[0]} {v[1]} {v[2]}\\n\"\n if v_tex is not None:\n for v in v_tex:\n obj_str += f\"vt {v[0]} {1.0 - v[1]}\\n\"\n\n for i in range(len(t_pos_idx)):\n obj_str += \"f\"\n for j in range(3):\n obj_str += f\" {t_pos_idx[i][j] + 1}/\"\n if v_tex is not None:\n obj_str += f\"{t_tex_idx[i][j] + 1}\"\n obj_str += \"/\"\n if v_nrm is not None:\n obj_str += f\"{t_pos_idx[i][j] + 1}\"\n obj_str += \"\\n\"\n\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(obj_str)\n\n def _save_mtl(\n self,\n filename,\n matname,\n Ka=(0.0, 0.0, 0.0),\n Kd=(1.0, 1.0, 1.0),\n Ks=(0.0, 0.0, 0.0),\n map_Kd=None,\n map_Ks=None,\n map_Bump=None,\n map_format=\"jpg\",\n step: Optional[int] = None,\n ):\n mtl_str = f\"newmtl {matname}\\n\"\n mtl_str += f\"Ka {Ka[0]} {Ka[1]} {Ka[2]}\\n\"\n mtl_save_path = self.get_save_path(filename)\n if map_Kd is not None:\n mtl_str += f\"map_Kd texture_kd.{map_format}\\n\"\n self._save_rgb_image(\n os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_kd.{map_format}\"\n ),\n map_Kd,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Kd\",\n step=step,\n )\n else:\n mtl_str += f\"Kd {Kd[0]} {Kd[1]} {Kd[2]}\\n\"\n if map_Ks is not None:\n mtl_str += f\"map_Ks texture_ks.{map_format}\\n\"\n self._save_rgb_image(\n os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_ks.{map_format}\"\n ),\n map_Ks,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Ks\",\n step=step,\n )\n else:\n mtl_str += f\"Ks {Ks[0]} {Ks[1]} {Ks[2]}\\n\"\n if map_Bump is not None:\n mtl_str += f\"map_Bump texture_nrm.{map_format}\\n\"\n self._save_rgb_image(\n os.path.join(\n os.path.dirname(mtl_save_path), f\"texture_nrm.{map_format}\"\n ),\n map_Bump,\n data_format=\"HWC\",\n data_range=(0, 1),\n name=f\"{matname}_Bump\",\n step=step,\n )\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(mtl_str)\n\n def save_file(self, filename, src_path):\n shutil.copyfile(src_path, self.get_save_path(filename))\n\n def save_json(self, filename, payload):\n with open(self.get_save_path(filename), \"w\") as f:\n f.write(json.dumps(payload))"
}
] | import os
import pytorch_lightning as pl
import threestudio
from dataclasses import dataclass, field
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import Updateable, update_if_possible
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
from threestudio.utils.config import load_config, parse_structured | 9,485 |
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "validation")
self.dataset = self.trainer.val_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "test")
self.dataset = self.trainer.test_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "predict")
self.dataset = self.trainer.predict_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
pass
def on_before_optimizer_step(self, optimizer):
"""
# some gradient-related debugging goes here, example:
from lightning.pytorch.utilities import grad_norm
norms = grad_norm(self.geometry, norm_type=2)
print(norms)
"""
pass
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
simultan: bool = False
guidance_type_image: str = ""
guidance_image: dict = field(default_factory=dict)
prompt_processor_type_image: str = ""
prompt_processor_image: dict = field(default_factory=dict)
guidance_type_multi_view: str = ""
guidance_multi_view: dict = field(default_factory=dict)
prompt_processor_type_multi_view: str = ""
prompt_processor_multi_view: dict = field(default_factory=dict)
guidance_type_video: str = ""
guidance_video: dict = field(default_factory=dict)
prompt_processor_type_video: str = ""
prompt_processor_video: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
|
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_validation_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
cleanup()
def on_validation_epoch_end(self):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def on_test_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_test_epoch_end(self):
pass
def predict_step(self, batch, batch_idx):
raise NotImplementedError
def on_predict_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_predict_epoch_end(self):
pass
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "validation")
self.dataset = self.trainer.val_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "test")
self.dataset = self.trainer.test_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "predict")
self.dataset = self.trainer.predict_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
pass
def on_before_optimizer_step(self, optimizer):
"""
# some gradient-related debugging goes here, example:
from lightning.pytorch.utilities import grad_norm
norms = grad_norm(self.geometry, norm_type=2)
print(norms)
"""
pass
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
simultan: bool = False
guidance_type_image: str = ""
guidance_image: dict = field(default_factory=dict)
prompt_processor_type_image: str = ""
prompt_processor_image: dict = field(default_factory=dict)
guidance_type_multi_view: str = ""
guidance_multi_view: dict = field(default_factory=dict)
prompt_processor_type_multi_view: str = ""
prompt_processor_multi_view: dict = field(default_factory=dict)
guidance_type_video: str = ""
guidance_video: dict = field(default_factory=dict)
prompt_processor_type_video: str = ""
prompt_processor_video: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry | prev_geometry = prev_geometry.to(get_device()) | 9 | 2023-11-29 05:15:56+00:00 | 12k |
rlawjdghek/StableVITON | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
},
{
"identifier": "default",
"path": "ldm/util.py",
"snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d"
},
{
"identifier": "ismap",
"path": "ldm/util.py",
"snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)"
},
{
"identifier": "isimage",
"path": "ldm/util.py",
"snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)"
},
{
"identifier": "mean_flat",
"path": "ldm/util.py",
"snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))"
},
{
"identifier": "count_params",
"path": "ldm/util.py",
"snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params"
},
{
"identifier": "instantiate_from_config",
"path": "ldm/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "LitEma",
"path": "ldm/modules/ema.py",
"snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)"
},
{
"identifier": "normal_kl",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )"
},
{
"identifier": "DiagonalGaussianDistribution",
"path": "ldm/modules/distributions/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean"
},
{
"identifier": "IdentityFirstStage",
"path": "ldm/models/autoencoder.py",
"snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x"
},
{
"identifier": "AutoencoderKL",
"path": "ldm/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.lossconfig = lossconfig\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = torch.nn.Identity()\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n def init_loss(self):\n self.loss = instantiate_from_config(self.lossconfig)\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx):\n real_img = self.get_input(batch, self.image_key)\n recon, posterior = self(real_img)\n loss = self.loss(real_img, recon, posterior)\n return loss\n \n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.decoder.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x"
},
{
"identifier": "make_beta_schedule",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n print(f\"beta scheduler name : {schedule}\")\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()"
},
{
"identifier": "extract_into_tensor",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))"
},
{
"identifier": "noise_like",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "zero_module",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "conv_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "DDIMSampler",
"path": "ldm/models/diffusion/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates, cond_output_dict = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates, cond_output_dict\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0, cond_output_dict = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n if cond_output_dict is not None:\n cond_output = cond_output_dict[\"cond_output\"] \n if self.model.use_noisy_cond:\n b = cond_output.shape[0]\n\n alphas = self.model.alphas_cumprod if ddim_use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if ddim_use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if ddim_use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if ddim_use_original_steps else self.ddim_sigmas\n\n device = cond_output.device\n a_t = torch.full((b, 1, 1, 1), alphas[0], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[0], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[0], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[0], device=device)\n\n c = cond_output_dict[\"cond_input\"]\n e_t = cond_output\n pred_c0 = (c - sqrt_one_minus_at * e_t) / a_t.sqrt()\n dir_ct = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(c.shape, device, False) * temperature\n\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n cond_output = a_prev.sqrt() * pred_c0 + dir_ct + noise \n cond_output_dict[f\"cond_sample\"] = cond_output\n return img, intermediates, cond_output_dict\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output, cond_output_dict = self.model.apply_model(x, t, c)\n else:\n # x_in = torch.cat([x] * 2)\n # t_in = torch.cat([t] * 2)\n # if isinstance(c, dict):\n # assert isinstance(unconditional_conditioning, dict)\n # c_in = dict()\n # for k in c:\n # if isinstance(c[k], list):\n # c_in[k] = [torch.cat([\n # unconditional_conditioning[k][i],\n # c[k][i]]) for i in range(len(c[k]))]\n # else:\n # c_in[k] = torch.cat([\n # unconditional_conditioning[k],\n # c[k]])\n # elif isinstance(c, list):\n # c_in = list()\n # assert isinstance(unconditional_conditioning, list)\n # for i in range(len(c)):\n # c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n # else:\n # c_in = torch.cat([unconditional_conditioning, c])\n x_in = x\n t_in = t\n model_t, cond_output_dict_cond = self.model.apply_model(x_in, t_in, c)\n model_uncond, cond_output_dict_uncond = self.model.apply_model(x_in, t_in, unconditional_conditioning)\n if isinstance(model_t, tuple):\n model_t, _ = model_t\n if isinstance(model_uncond, tuple):\n model_uncond, _ = model_uncond\n if cond_output_dict_cond is not None:\n cond_output_dict = dict()\n for k in cond_output_dict_cond.keys():\n cond_output_dict[k] = torch.cat([cond_output_dict_uncond[k], cond_output_dict_cond[k]])\n else:\n cond_output_dict = None\n # model_output, cond_output_dict = self.model.apply_model(x_in, t_in, c_in)\n # model_uncond, model_t = model_output.chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0, cond_output_dict\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)[0]\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)[0]\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec"
}
] | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
import torchvision.transforms as T
import random
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from torchvision.transforms.functional import resize
from diffusers.models.autoencoder_kl import AutoencoderKLOutput
from diffusers.models.vae import DecoderOutput
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like, zero_module, conv_nd
from ldm.models.diffusion.ddim import DDIMSampler | 10,659 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
l_cond_simple_weight=1.0,
l_cond_recon_weight=1.0,
**kwargs
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.unet_config = unet_config
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711))
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
self.l_cond_simple_weight = l_cond_simple_weight
self.l_cond_recon_weight = l_cond_recon_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
l_cond_simple_weight=1.0,
l_cond_recon_weight=1.0,
**kwargs
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.unet_config = unet_config
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711))
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
self.l_cond_simple_weight = l_cond_simple_weight
self.l_cond_recon_weight = l_cond_recon_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit | if reset_ema: assert exists(ckpt_path) | 1 | 2023-12-02 05:56:58+00:00 | 12k |
ContextualAI/HALOs | trainers.py | [
{
"identifier": "AutoModelForCausalLMWithValueHead",
"path": "models.py",
"snippet": "class AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):\n r\"\"\"\n An autoregressive model with a value head in addition to the language model head.\n\n Class attributes:\n - **transformers_parent_class** (`transformers.PreTrainedModel`) -- The parent class of the wrapped model. This\n should be set to `transformers.AutoModelForCausalLM` for this class.\n - **lm_head_namings** (`tuple`) -- A tuple of strings that are used to identify the language model head of the\n wrapped model. This is set to `(\"lm_head\", \"embed_out\")` for this class but can be changed for other models\n in the future\n - **supported_args** (`tuple`) -- A tuple of strings that are used to identify the arguments that are supported\n by the `ValueHead` class. Currently, the supported args are:\n - **summary_dropout_prob** (`float`, `optional`, defaults to `None`) -- The dropout probability for the\n `ValueHead` class.\n - **v_head_initializer_range** (`float`, `optional`, defaults to `0.2`) -- The initializer range for the\n `ValueHead` if a specific initialization strategy is selected.\n - **v_head_init_strategy** (`str`, `optional`, defaults to `None`) -- The initialization strategy for the\n `ValueHead`. Currently, the supported strategies are:\n - **`None`** -- Initializes the weights of the `ValueHead` with a random distribution. This is the default\n strategy.\n - **\"normal\"** -- Initializes the weights of the `ValueHead` with a normal distribution.\n\n \"\"\"\n transformers_parent_class = AutoModelForCausalLM\n lm_head_namings = [\"lm_head\", \"embed_out\"]\n supported_args = (\n \"summary_dropout_prob\",\n \"v_head_initializer_range\",\n \"v_head_init_strategy\",\n )\n\n def __init__(self, pretrained_model, *args, **kwargs):\n r\"\"\"\n Initializes the model.\n\n Args:\n pretrained_model (`transformers.PreTrainedModel`):\n The model to wrap. It should be a causal language model such as GPT2.\n or any model mapped inside the `AutoModelForCausalLM` class.\n kwargs (`dict`, `optional`):\n Additional keyword arguments, that are passed to the `ValueHead` class.\n \"\"\"\n super().__init__(pretrained_model)\n v_head_kwargs, other_kwargs = self._split_kwargs(kwargs)\n \n if not any(hasattr(self.pretrained_model, attribute) for attribute in self.lm_head_namings):\n raise ValueError(\"The model does not have a language model head, please use a model that has one.\")\n\n self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs)\n self._init_weights(**v_head_kwargs)\n\n def _init_weights(self, **kwargs):\n r\"\"\"\n Initializes the weights of the value head. The default initialization strategy is random.\n Users can pass a different initialization strategy by passing the `v_head_init_strategy` argument\n when calling `.from_pretrained`. Supported strategies are:\n - `normal`: initializes the weights with a normal distribution.\n\n Args:\n **kwargs (`dict`, `optional`):\n Additional keyword arguments, that are passed to the `ValueHead` class. These arguments\n can contain the `v_head_init_strategy` argument as well as the `v_head_initializer_range`\n argument.\n \"\"\"\n initializer_range = kwargs.pop(\"v_head_initializer_range\", 0.2)\n # random init by default\n init_strategy = kwargs.pop(\"v_head_init_strategy\", None)\n if init_strategy is None:\n # do nothing\n pass\n elif init_strategy == \"normal\":\n def weights_init(m):\n if isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=initializer_range)\n m.bias.data.zero_()\n\n self.summary.apply(weights_init)\n\n def forward(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n **kwargs,\n ):\n r\"\"\"\n Applies a forward pass to the wrapped model and returns the logits of the value head.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, `optional`):\n Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model\n (see `past_key_values` input) to speed up sequential decoding.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n kwargs (`dict`, `optional`):\n Additional keyword arguments, that are passed to the wrapped model.\n \"\"\"\n kwargs[\"output_hidden_states\"] = True # this had already been set in the LORA / PEFT examples\n kwargs[\"past_key_values\"] = past_key_values\n\n base_model_output = self.pretrained_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n **kwargs,\n )\n\n last_hidden_state = base_model_output.hidden_states[-1]\n lm_logits = base_model_output.logits\n loss = base_model_output.loss\n\n # force upcast in fp32 if logits are in half-precision\n if lm_logits.dtype != torch.float32:\n lm_logits = lm_logits.float()\n\n value = self.v_head(last_hidden_state).squeeze(-1)\n\n return (lm_logits, loss, value)\n\n def generate(self, *args, **kwargs):\n r\"\"\"\n A simple wrapper around the `generate` method of the wrapped model.\n Please refer to the [`generate`](https://huggingface.co/docs/transformers/internal/generation_utils)\n method of the wrapped model for more information about the supported arguments.\n\n Args:\n *args (`list`, *optional*):\n Positional arguments passed to the `generate` method of the wrapped model.\n **kwargs (`dict`, *optional*):\n Keyword arguments passed to the `generate` method of the wrapped model.\n \"\"\"\n return self.pretrained_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n r\"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"\n pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs)\n \n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n pretrained_model_state_dict[f\"v_head.{k}\"] = v\n return pretrained_model_state_dict\n\n def push_to_hub(self, *args, **kwargs):\n setattr(self.pretrained_model, \"v_head\", self.v_head)\n return self.pretrained_model.push_to_hub(*args, **kwargs)\n\n def post_init(self, state_dict):\n r\"\"\"\n We add the state dictionary of the value head to the state dictionary of the wrapped model\n by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the\n keys of the value head state dictionary.\n \"\"\"\n for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict"
},
{
"identifier": "slice_and_move_batch_for_device",
"path": "utils.py",
"snippet": "def slice_and_move_batch_for_device(batch: Dict, rank: int, world_size: int, device: str) -> Dict:\n \"\"\"Slice a batch into chunks, and move each chunk to the specified device.\"\"\"\n chunk_size = len(list(batch.values())[0]) // world_size\n start = chunk_size * rank\n end = chunk_size * (rank + 1)\n sliced = {k: v[start:end] for k, v in batch.items()}\n on_device = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in sliced.items()}\n return on_device"
},
{
"identifier": "formatted_dict",
"path": "utils.py",
"snippet": "def formatted_dict(d: Dict) -> Dict:\n \"\"\"Format a dictionary for printing.\"\"\"\n return {k: (f\"{v:.5g}\" if type(v) == float else v) for k, v in d.items()}"
},
{
"identifier": "all_gather_if_needed",
"path": "utils.py",
"snippet": "def all_gather_if_needed(values: torch.Tensor, rank: int, world_size: int) -> torch.Tensor:\n \"\"\"Gather and stack/cat values from all processes, if there are multiple processes.\"\"\"\n if world_size == 1:\n return values\n\n device = torch.device('cuda', rank)\n all_values = [torch.empty_like(values).to(device) for _ in range(world_size)]\n dist.all_gather(all_values, values)\n cat_function = torch.cat if values.dim() > 0 else torch.stack\n return cat_function(all_values, dim=0)"
},
{
"identifier": "pad_to_length",
"path": "utils.py",
"snippet": "def pad_to_length(tensor: torch.Tensor, length: int, pad_value: Union[int, float], dim: int = -1) -> torch.Tensor:\n if tensor.size(dim) >= length:\n return tensor\n else:\n pad_size = list(tensor.shape)\n pad_size[dim] = length - tensor.size(dim)\n return torch.cat([tensor, pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device)], dim=dim)"
},
{
"identifier": "get_block_class_from_model",
"path": "utils.py",
"snippet": "def get_block_class_from_model(model: torch.nn.Module, block_class_name: str) -> torch.nn.Module:\n \"\"\"Get the class of a block from a model, using the block's class name.\"\"\"\n for module in model.modules():\n if module.__class__.__name__ == block_class_name:\n return module.__class__\n raise ValueError(f\"Could not find block class {block_class_name} in model {model}\")"
},
{
"identifier": "rank0_print",
"path": "utils.py",
"snippet": "def rank0_print(*args, **kwargs):\n \"\"\"Print, but only on rank 0.\"\"\"\n if not dist.is_initialized() or dist.get_rank() == 0:\n print(*args, **kwargs)"
},
{
"identifier": "get_batch_logps",
"path": "utils.py",
"snippet": "def get_batch_logps(logits: torch.FloatTensor, labels: torch.LongTensor, average_log_prob: bool = False, token_level: bool = False):\n \"\"\"Compute the log probabilities of the given labels under the given logits.\n\n Args:\n logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)\n labels: Labels for which to compute the log probabilities. Label tokens with a value of -100 are ignored. Shape: (batch_size, sequence_length)\n average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.\n token_level: If true, return the token-level log probabilities (do not aggregate across tokens)\n\n Returns:\n The relevant log probabilities. Of shape (batch_size,) by default and shape (batch size, sequence length) if token_level.\n \"\"\"\n assert logits.shape[:-1] == labels.shape\n\n labels = labels[:, 1:].clone()\n logits = logits[:, :-1, :]\n loss_mask = (labels != -100)\n\n # dummy token; we'll ignore the losses on these tokens later\n labels[labels == -100] = 0\n distribution_logps = logits.log_softmax(-1)\n\n per_token_logps = torch.gather(distribution_logps, dim=2, index=labels.unsqueeze(2)).squeeze(2)\n\n if token_level: \n return (per_token_logps * loss_mask)\n elif average_log_prob:\n return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)\n else:\n return (per_token_logps * loss_mask).sum(-1)"
},
{
"identifier": "masked_mean",
"path": "utils.py",
"snippet": "def masked_mean(values, mask, axis=None):\n \"\"\"Compute mean of tensor with a masked values.\"\"\"\n if axis is not None:\n return (values * mask).sum(axis=axis) / mask.sum(axis=axis)\n else:\n return (values * mask).sum() / mask.sum()"
},
{
"identifier": "masked_var",
"path": "utils.py",
"snippet": "def masked_var(values, mask, unbiased=True):\n \"\"\"Compute variance of tensor with masked values.\"\"\"\n mean = masked_mean(values, mask)\n centered_values = values - mean\n variance = masked_mean(centered_values**2, mask)\n return variance"
},
{
"identifier": "entropy_from_logits",
"path": "utils.py",
"snippet": "def entropy_from_logits(logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculate entropy from logits.\n \n Args:\n logits: tensor of shape (batch_size, sequence length, vocab)\n mask: tensor of shape (batch_size, sequence length)\n \n Returns:\n The average tokenwise entropy across all non-masked tokens (of shape (1,)).\n \"\"\"\n pd = torch.nn.functional.softmax(logits, dim=-1)\n entropy = masked_mean(torch.logsumexp(logits, axis=-1) - torch.sum(pd * logits, axis=-1), mask)\n return entropy"
},
{
"identifier": "delete_dict",
"path": "utils.py",
"snippet": "def delete_dict(d: Dict):\n \"\"\"Delete all items inside the dict.\"\"\"\n for k in list(d.keys()):\n del d[k]"
},
{
"identifier": "rowwise_product",
"path": "utils.py",
"snippet": "def rowwise_product(mat: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculate the row-wise product over all the elements that have not been masked out.\n\n Args:\n mat: tensor of shape (batch_size, sequence length)\n mask: tensor of shape (batch_size, sequence length) \n\n Returns:\n Matrix of batch size. \n \"\"\"\n mat = mat.clone()\n indices = (mask == 0).long().nonzero()\n mat[indices[:,0], indices[:,1]] = 1\n return mat.prod(dim=1)"
}
] | import torch
import torch.nn.functional as F
import torch.nn as nn
import transformers
import gc
import torch.distributed as dist
import tensor_parallel as tp
import contextlib
import dataloader
import numpy as np
import wandb
import tqdm
import random
import os
import time
import json
import functools
from models import AutoModelForCausalLMWithValueHead
from omegaconf import OmegaConf, DictConfig
from transformers import AutoTokenizer
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
MixedPrecision,
StateDictType,
BackwardPrefetch,
ShardingStrategy,
CPUOffload,
)
from torch.distributed.fsdp.api import FullStateDictConfig, FullOptimStateDictConfig
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy, size_based_auto_wrap_policy
from utils import (
slice_and_move_batch_for_device,
formatted_dict,
all_gather_if_needed,
pad_to_length,
get_block_class_from_model,
rank0_print,
get_batch_logps,
masked_mean,
masked_var,
entropy_from_logits,
delete_dict,
rowwise_product,
)
from collections import defaultdict
from typing import Optional, Dict, List, Union, Tuple
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper,
apply_activation_checkpointing,
CheckpointImpl,
) | 7,859 | L(x, y) := 1 - sigmoid(beta * (0 - [log p_policy(y|x) - log p_reference(y|x)]))
"""
chosen_logratios = (policy_chosen_logps - reference_chosen_logps)
rejected_logratios = (policy_rejected_logps - reference_rejected_logps)
losses = torch.cat((1 - F.sigmoid(self.config.loss.beta * (chosen_logratios - 0)), 1 - F.sigmoid(self.config.loss.beta * (0 - rejected_logratios))), 0)
chosen_rewards = self.config.loss.beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = self.config.loss.beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses, chosen_rewards, rejected_rewards
class PPOTrainer(BasicTrainer):
"""One-step, offline variant of PPO."""
def forward(self, model: AutoModelForCausalLMWithValueHead, batch: Dict[str, Union[List, torch.LongTensor]], is_policy: bool=True) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Run the given model on the given batch of inputs.
Args:
model: model to run forward pass on
batch: input batch (forward pass will be run on keys with prefix 'chosen')
masks: binary-valued tensor shape (batch size, sequence length)
is_policy: whether the model is the policy or reference
Returns:
all_logps: batch log probabilities at the token level of shape (batch size, sequence length)
all_logits: corresponding logits of shape (batch size, sequence length)
all_values: values predicted for each token, of shape (batch size, sequence length)
"""
if is_policy:
# here the prefix 'chosen' is a misnomer, since it can refer to the dispreferred generations
# the 'status' field contains the actual status of the generations
all_logits, _, all_values = model(batch['target_combined_input_ids'], attention_mask=batch['target_combined_attention_mask'])
all_values = all_values[:, :-1].contiguous().to(self.rank)
else:
all_logits = model(batch['target_combined_input_ids'], attention_mask=batch['target_combined_attention_mask'], use_cache=(not self.is_mistral)).logits.to(self.policy_dtype)
all_values = None
all_logps = get_batch_logps(all_logits.to(self.policy_dtype), batch['target_labels'], average_log_prob=False, token_level=True)
# Returned tensors will have sequence length that is one less than the inputs (to account for label shifting).
all_logits = all_logits[:, :-1].contiguous().to(self.rank)
all_logps = all_logps.contiguous().to(self.rank)
return all_logps, all_logits, all_values
def compute_advantages(self, values: torch.FloatTensor, rewards: torch.FloatTensor, masks: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""
Estimate the advantages and rewards for every token taken.
Args:
values: the estimated values of the tokens. Should already be detached from graph.
rewards: signal from the environment as to whether the generation is good or bad.
In the basic implementation, this is only one nonzero reward, on the last unpadded token of each sequence.
torch tensor of shape (batch size, sequence length)
masks: torch tensor of shape (batch size, sequence length); 1 if token should be considered and 0 otherwise
Returns:
advantages: torch tensor of shape (batch size, sequence length)
returns: Also called 'rewards-to-go'.
Only tokens after the current token are used to calculate this: http://rail.eecs.berkeley.edu/deeprlcourse/static/slides/lec-5.pdf
torch tensor of shape (batch size, sequence length)
"""
values = values * masks
rewards = rewards * masks
gae = 0 # generalized advantage estimation
seq_len = rewards.shape[-1]
advantages_reversed = []
discounted_future_reward = torch.zeros_like(rewards[:,0])
discounted_future_rewards_reversed = []
for t in reversed(range(seq_len)):
# see https://towardsdatascience.com/proximal-policy-optimization-tutorial-part-2-2-gae-and-ppo-loss-fe1b3c5549e8
delta = rewards[:, t] + self.config.loss.gamma * (values[:, t + 1] if t < seq_len - 1 else 0.0) - values[:, t]
gae = delta + self.config.loss.gamma * self.config.loss.lam * gae
advantages_reversed.append(gae)
discounted_future_rewards_reversed.append(discounted_future_reward)
discounted_future_reward = rewards[:, t] + self.config.loss.gamma * discounted_future_reward
advantages = (torch.stack(advantages_reversed[::-1]).transpose(0, 1) * masks)
returns = (advantages + values).contiguous().to(self.rank)
discounted_future_rewards = (torch.stack(discounted_future_rewards_reversed[::-1]).transpose(0, 1) * masks).contiguous().to(self.rank)
# normalizing advantages leads to more stable learning
mean_adv, var_adv = masked_mean(advantages, masks), masked_var(advantages, masks)
normalized_advantages = (advantages - mean_adv) * torch.rsqrt(var_adv + 1e-8)
normalized_advantages = (normalized_advantages * masks).detach().contiguous().to(self.rank)
return normalized_advantages, returns, discounted_future_rewards
def loss(self, batch: Dict, episode: Dict) -> Tuple[torch.FloatTensor, Dict]:
"""
Given the batch statistics and the current episode's values, calculate the loss and return some loss statistics.
Args:
batch: dictionary containing batch data (shoud have keys 'values', 'returns', 'advantages', 'logprobs', 'masks')
episode: dictionary containing the episode data (should have keys 'logits', 'values', 'logprobs')
Returns:
loss: combined policy and critic loss of shape (1,)
loss_stats: dictionary of episode/batch statistics
"""
value_losses = (episode['values'] - batch['discounted_future_rewards'].detach()) ** 2
critic_loss = 0.5 * masked_mean(value_losses, batch['masks'])
ratio = torch.exp(episode['logprobs'] - batch['logprobs'])
policy_losses = -batch['advantages'] * ratio
policy_losses_clipped = -batch['advantages'] * torch.clamp(ratio, self.config.loss.cliprange, 1 / self.config.loss.cliprange)
policy_loss = masked_mean(torch.max(policy_losses, policy_losses_clipped), batch['masks'])
KL_penalty = masked_mean(batch['logprobs'] - episode['logprobs'], batch['masks'])
loss = policy_loss + self.config.loss.critic_coef * critic_loss + self.config.loss.KL_coef * KL_penalty
loss_stats = {
'loss/total' : loss.detach(),
'loss/critic' : critic_loss.detach(),
'loss/policy' : policy_loss.detach(),
'clipfrac/policy' : masked_mean(torch.gt(policy_losses_clipped, policy_losses).float(), batch['masks']).detach(),
| # Copyright (c) 2023 Contextual AI, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Extendable Trainer classes for aligning LLMs.
The specific class that should be used should be specified in the loss file under config/loss.
The BasicTrainer contains the core methods (e.g., sharding, basic training loop, etc.).
The SFTTrainer, PairedPreferenceTrainer, and UnpairedPreferenceTrainer all subclass BasicTrainer
and override the get_batch_metrics() and (optionally) forward() methods.
The trainer for each loss should subclass either PairedPreferenceTrainer or UnpairedPreferenceTrainer.
"""
torch.backends.cuda.matmul.allow_tf32 = True
class BasicTrainer(object):
def __init__(self,
tokenizer: AutoTokenizer,
config: DictConfig,
train_iterator: dataloader.DataLoader,
eval_iterator: dataloader.DataLoader,
policy: nn.Module,
reference_model: Optional[nn.Module] = None,
rank: int = 0,
world_size: int = 1,
fsdp: bool = False,
):
"""A trainer for a language model, supporting either SFT, HALO, or offline PPO training.
"""
self.seed = config.seed
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
self.rank = rank
self.device = torch.device('cuda', self.rank)
self.world_size = world_size
self.config = config
self.run_dir = config.local_run_dir
self.fsdp = fsdp
self.tokenizer = tokenizer
self.policy = policy
self.policy_dtype = getattr(torch, config.model.policy_dtype)
self.reference_model = reference_model
self.example_counter = 0
self.batch_counter = 0
self.train_iterator = train_iterator
self.eval_iterator = eval_iterator
self.eval_batches = list(self.eval_iterator)
rank0_print(f'Loaded {len(self.eval_batches)} eval batches of size {config.model.eval_batch_size}')
if self.fsdp:
self.shard()
self.is_mistral = 'mistral' in self.config.model.name_or_path.lower()
def shard(self):
"""
Shard the policy model and reference model (if applicable) using FDSP.
"""
assert self.config.model.block_name is not None, 'must specify model.block_name (e.g., GPT2Block or GPTNeoXLayer) for FSDP'
wrap_class = get_block_class_from_model(self.policy.pretrained_model if self.config.loss.name == 'ppo' else self.policy, self.config.model.block_name)
model_auto_wrap_policy = functools.partial(transformer_auto_wrap_policy, transformer_layer_cls={wrap_class},)
shared_fsdp_kwargs = dict(
auto_wrap_policy=model_auto_wrap_policy,
sharding_strategy=ShardingStrategy.FULL_SHARD,
cpu_offload=CPUOffload(offload_params=False),
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
device_id=self.rank,
ignored_modules=None,
limit_all_gathers=False,
use_orig_params=False,
sync_module_states=False
)
rank0_print('Sharding models...')
mp_dtype = getattr(torch, self.config.model.fsdp_policy_mp) if self.config.model.fsdp_policy_mp is not None else None
policy_mp_policy = MixedPrecision(param_dtype=mp_dtype, reduce_dtype=mp_dtype, buffer_dtype=mp_dtype)
if self.config.loss.name == 'ppo':
self.policy.pretrained_model = FSDP(self.policy.pretrained_model, **shared_fsdp_kwargs, mixed_precision=policy_mp_policy)
# shard the value head according to size
v_head_shared_fsdp_kwargs = dict(
auto_wrap_policy=functools.partial(size_based_auto_wrap_policy, min_num_params=100),
sharding_strategy=ShardingStrategy.FULL_SHARD,
cpu_offload=CPUOffload(offload_params=False),
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
device_id=self.rank,
ignored_modules=None,
limit_all_gathers=False,
use_orig_params=False,
sync_module_states=False
)
self.policy.v_head = FSDP(self.policy.v_head, **v_head_shared_fsdp_kwargs)
else:
self.policy = FSDP(self.policy, **shared_fsdp_kwargs, mixed_precision=policy_mp_policy)
if self.reference_model is not None:
self.reference_model = FSDP(self.reference_model, **shared_fsdp_kwargs, mixed_precision=policy_mp_policy)
if self.config.model.activation_checkpointing:
rank0_print('Attempting to enable activation checkpointing...')
try:
# use activation checkpointing, according to:
# https://pytorch.org/blog/scaling-multimodal-foundation-models-in-torchmultimodal-with-pytorch-distributed/
# first, verify we have FSDP activation support ready by importing:
except Exception as e:
rank0_print('FSDP activation checkpointing not available:', e)
else:
check_fn = lambda submodule: isinstance(submodule, wrap_class)
rank0_print('Applying activation checkpointing wrapper to policy...')
if self.config.loss.name == 'ppo':
apply_activation_checkpointing(self.policy.pretrained_model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
else:
apply_activation_checkpointing(self.policy, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
if self.reference_model is not None:
apply_activation_checkpointing(self.reference_model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
rank0_print('FSDP activation checkpointing enabled!')
print('Loaded model on rank', self.rank)
dist.barrier()
def get_batch_samples(self, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]:
"""Generate samples from the policy."""
ctx = lambda: (FSDP.summon_full_params(self.policy, writeback=False, recurse=False) if self.fsdp else contextlib.nullcontext())
with ctx():
policy_output = self.policy.generate(
batch['prompt_input_ids'],
attention_mask=batch['prompt_attention_mask'],
max_length=self.config.model.max_length,
do_sample=True,
pad_token_id=self.tokenizer.pad_token_id,
top_p=self.config.top_p,
)
policy_output = pad_to_length(policy_output, self.config.model.max_length, self.tokenizer.pad_token_id)
policy_output = all_gather_if_needed(policy_output, self.rank, self.world_size)
policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True)
return policy_output_decoded
def loss(self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""
Args:
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,)
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,)
Returns:
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
The losses tensor contains the losses, one for each example (sif chosen_only or rejected_only, only n/2 losses).
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively, for reporting.
Note that rejected responses do not factor into the loss, only the reward calculation.
"""
raise NotImplementedError
def get_batch_metrics(self, batch: Dict[str, Union[List, torch.LongTensor]], mode: str=None) -> Tuple[torch.FloatTensor, Dict]:
"""Compute the loss and other metrics for the given batch of inputs.
Arg:
batch: dictionary of inputs for the batch (what is required will vary depending on the trainer)
mode: one of 'train', 'eval', 'sample'
"""
raise NotImplementedError
def eval(self) -> Dict[str, Dict]:
"""
Run evaluation on all the examples in the test data and return the metrics from get_batch_metrics.
This is close-ended evaluation and measures the performance of a single model on a single dataset.
It does not compare two models to eacch other.
Returns:
A dict of form:
{
'metadata': the Hydra config
'results': a dict of batch metrics (averaged across all of the test data)
}
"""
rank0_print(f'Running evaluation')
self.policy.eval()
if self.reference_model is not None:
self.reference_model.eval()
all_eval_metrics = defaultdict(list)
for eval_batch in (tqdm.tqdm(self.eval_batches, desc='Computing eval metrics') if self.rank == 0 else self.eval_batches):
local_eval_batch = slice_and_move_batch_for_device(eval_batch, self.rank, self.world_size, self.rank)
with torch.no_grad():
_, eval_metrics = self.get_batch_metrics(local_eval_batch, mode='eval')
for k, v in eval_metrics.items():
all_eval_metrics[k].extend(v)
mean_eval_metrics = {}
for k, v in all_eval_metrics.items():
if len(v) > 0:
mean_eval_metrics[k] = sum(v) / len(v)
results = {
'metadata': OmegaConf.to_object(self.config),
'results': formatted_dict(mean_eval_metrics),
}
return results
def sample(self) -> List[Dict[str, str]]:
"""
Generate samples from the policy model.
Returns:
A list of samples, each of which is of the form:
{
'prompt': the input
'chosen': the generation chosen by the human for the given prompt
'policy': the generation from the policy model
}
"""
all_policy_samples, all_prompts, all_chosen = [], [], []
samples = []
self.policy.eval()
if self.reference_model is not None:
self.reference_model.eval()
for eval_batch in self.eval_batches:
local_eval_batch = slice_and_move_batch_for_device(eval_batch, self.rank, self.world_size, self.rank)
policy_samples = self.get_batch_samples(local_eval_batch)
chosen_samples = []
# for DPO-like losses, chosen_text is the field that will contain the text; target_text for all other losses
# be sure to remove EOS token if present
for x in (eval_batch['target_text'] if 'target_text' in eval_batch else eval_batch['chosen_text']):
if self.tokenizer.eos_token in x:
x = x[:x.rfind(self.tokenizer.eos_token)]
chosen_samples.append(x)
all_prompts.extend(eval_batch['prompt_text'])
all_chosen.extend(chosen_samples)
all_policy_samples.extend(policy_samples)
if self.config.n_samples is not None and len(all_prompts) > self.config.n_samples:
break
else:
rank0_print(f"Generated {len(all_prompts)} samples ...")
for i in range(len(all_prompts)):
samples.append({
'prompt' : all_prompts[i],
'chosen' : all_chosen[i],
'policy' : all_policy_samples[i][len(all_prompts[i]):],
})
return samples
def train(self):
"""Begin either SFT or HALO training, with periodic evaluation. This is subclassed when implementing PPO."""
rank0_print(f'Using {self.config.optimizer} optimizer with learning rate {self.config.lr}')
self.optimizer = getattr(torch.optim, self.config.optimizer)(self.policy.parameters(), lr=self.config.lr)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda step: min(1.0, (step + 1) / (self.config.warmup_steps + 1)))
if self.reference_model is not None:
self.reference_model.eval()
last_log = None
gradients_accumulated = 0
batch_metrics = defaultdict(list)
for batch in self.train_iterator:
# EVALUATION
if self.example_counter % self.config.eval_every == 0 and (self.example_counter > 0 or self.config.do_first_eval):
rank0_print(f'Running evaluation after {self.example_counter} train examples')
self.policy.eval()
all_eval_metrics = defaultdict(list)
for eval_batch in (tqdm.tqdm(self.eval_batches, desc='Computing eval metrics') if self.rank == 0 else self.eval_batches):
local_eval_batch = slice_and_move_batch_for_device(eval_batch, self.rank, self.world_size, self.rank)
with torch.no_grad():
_, eval_metrics = self.get_batch_metrics(local_eval_batch, mode='eval')
for k, v in eval_metrics.items():
all_eval_metrics[k].extend(v)
delete_dict(local_eval_batch)
mean_eval_metrics = {}
for k, v in all_eval_metrics.items():
if len(v) > 0:
mean_eval_metrics[k] = sum(v) / len(v)
rank0_print(f'eval after {self.example_counter}: {formatted_dict(mean_eval_metrics)}')
if self.config.wandb.enabled and self.rank == 0:
wandb.log(mean_eval_metrics, step=self.example_counter)
if self.example_counter > 0:
if self.config.debug:
rank0_print('skipping save in debug mode')
elif self.config.intermediate_checkpoints:
output_dir = os.path.join(self.run_dir, f'step-{self.example_counter}')
rank0_print(f'creating checkpoint to write to {output_dir}...')
self.save(output_dir, mean_eval_metrics)
delete_dict(all_eval_metrics)
delete_dict(mean_eval_metrics)
#### TRAINING
self.policy.train()
start_time = time.time()
local_microbatch = slice_and_move_batch_for_device(batch, self.rank, self.world_size, self.rank)
loss, metrics = self.get_batch_metrics(local_microbatch)
(loss / self.config.model.gradient_accumulation_steps).backward()
for k, v in metrics.items():
batch_metrics[k].extend(v)
gradients_accumulated += 1
if gradients_accumulated == self.config.model.gradient_accumulation_steps:
grad_norm = self.clip_gradient()
batch_metrics['grad_norm'].append(grad_norm)
self.optimizer.step()
self.optimizer.zero_grad()
self.scheduler.step()
gradients_accumulated = 0
step_time = time.time() - start_time
examples_per_second = self.config.model.batch_size / step_time
batch_metrics['examples_per_second'].append(examples_per_second)
self.batch_counter += 1
self.example_counter += self.config.model.batch_size
delete_dict(local_microbatch)
delete_dict(metrics)
if gradients_accumulated == 0 and (last_log is None or time.time() - last_log > self.config.minimum_log_interval_secs):
mean_train_metrics = {}
for k, v in batch_metrics.items():
if len(v) > 0:
mean_train_metrics[k] = sum(v) / len(v)
mean_train_metrics['counters/examples'] = self.example_counter
mean_train_metrics['counters/updates'] = self.batch_counter
rank0_print(f'train stats after {self.example_counter} examples: {formatted_dict(mean_train_metrics)}')
if self.config.wandb.enabled and self.rank == 0:
wandb.log(mean_train_metrics, step=self.example_counter)
last_log = time.time()
delete_dict(batch_metrics)
delete_dict(mean_train_metrics)
delete_dict(batch)
batch_metrics = defaultdict(list)
# explicitly empty cache if less than 100MB available
r = torch.cuda.memory_reserved(self.rank)
a = torch.cuda.memory_allocated(self.rank)
if (r - a) / 1024 < 100:
gc.collect()
torch.cuda.empty_cache()
else:
rank0_print(f'skipping logging after {self.example_counter} examples to avoid logging too frequently')
def clip_gradient(self):
"""Clip the gradient norm of the parameters of a non-FSDP policy."""
return torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.config.model.max_grad_norm).item()
def write_state_dict(self, step: int, state: Dict[str, torch.Tensor], metrics: Dict, filename: str, dir_name: Optional[str] = None):
"""Write a checkpoint to disk.
Args:
step : current training step
state: current state of training (model or optimizer, if applicable)
metrics: dictionary of metrics to save
dir_name: directory in which to save
"""
if dir_name is None:
dir_name = os.path.join(self.run_dir, f'LATEST')
os.makedirs(dir_name, exist_ok=True)
output_path = os.path.join(dir_name, filename)
rank0_print(f'writing checkpoint to {output_path}...')
torch.save({
'step_idx': step,
'state': state,
'metrics': metrics if metrics is not None else {},
}, output_path)
def save(self, output_dir: Optional[str] = None, metrics: Optional[Dict] = None, save_model_only: bool=True):
"""
Save tokenizer, policy model, optimizer, scheduler state to disk, gathering from all processes
and saving only on the rank 0 process.
"""
if self.fsdp:
save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(self.policy, StateDictType.FULL_STATE_DICT, state_dict_config=save_policy):
policy_state_dict = self.policy.state_dict()
if self.rank == 0:
self.write_state_dict(self.example_counter, policy_state_dict, metrics, 'policy.pt', output_dir)
self.tokenizer.save_pretrained(self.run_dir) # save tokenizer in HF format
del policy_state_dict
dist.barrier()
if not save_model_only:
save_policy = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(self.policy, StateDictType.FULL_STATE_DICT, optim_state_dict_config=save_policy):
optimizer_state_dict = FSDP.optim_state_dict(self.policy, self.optimizer)
if self.rank == 0:
self.write_state_dict(self.example_counter, optimizer_state_dict, metrics, 'optimizer.pt', output_dir)
del optimizer_state_dict
dist.barrier()
if self.rank == 0:
scheduler_state_dict = self.scheduler.state_dict()
self.write_state_dict(self.example_counter, scheduler_state_dict, metrics, 'scheduler.pt', output_dir)
del scheduler_state_dict
dist.barrier()
else:
self.tokenizer.save_pretrained(self.run_dir) # save tokenizer in HF format
policy_state_dict = self.policy.state_dict()
self.write_state_dict(self.example_counter, policy_state_dict, metrics, 'policy.pt', output_dir)
del policy_state_dict
if not save_model_only:
optimizer_state_dict = self.optimizer.state_dict()
self.write_state_dict(self.example_counter, optimizer_state_dict, metrics, 'optimizer.pt', output_dir)
del optimizer_state_dict
scheduler_state_dict = self.scheduler.state_dict()
self.write_state_dict(self.example_counter, scheduler_state_dict, metrics, 'scheduler.pt', output_dir)
del scheduler_state_dict
class SFTTrainer(BasicTrainer):
def get_batch_metrics(self, batch: Dict[str, Union[List, torch.LongTensor]], mode: str=None):
"""Compute the loss and other metrics for the given batch of inputs.
Args:
batch: dictionary of inputs for the batch (should contain 'target_attention_mask', 'target_input_input_ids',
'target_labels' where 'target' corresponds to the SFT example)
mode: one of 'train', 'eval', 'sample'
"""
metrics = {}
if mode is None: mode = self.config.mode
policy_chosen_logits = self.policy(batch['target_combined_input_ids'], attention_mask=batch['target_combined_attention_mask'], use_cache=(not self.is_mistral)).logits.to(self.policy_dtype)
policy_chosen_logps = get_batch_logps(policy_chosen_logits, batch['target_labels'], average_log_prob=False)
losses = -policy_chosen_logps
policy_chosen_logps = all_gather_if_needed(policy_chosen_logps.detach(), self.rank, self.world_size)
all_devices_losses = all_gather_if_needed(losses.detach(), self.rank, self.world_size)
metrics[f'logps_{mode}/chosen'] = policy_chosen_logps.float().cpu().numpy().tolist()
metrics[f'loss/{mode}'] = all_devices_losses.float().cpu().numpy().tolist()
return losses.mean(), metrics
class UnpairedPreferenceTrainer(BasicTrainer):
"""A trainer for any loss that doesn't use paired preference, like KTO."""
def forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.BoolTensor]:
"""Run the given model on the given batch of inputs.
Returns:
chosen_logps: log probabilities of chosen examples (should be batch size / 2 if data was read in correctly)
rejected_logps: log probabilities of rejected examples (should be batch size / 2 if data was read in correctly)
"""
all_logits = model(batch['target_combined_input_ids'], attention_mask=batch['target_combined_attention_mask'], use_cache=(not self.is_mistral)).logits.to(self.policy_dtype)
all_logps = get_batch_logps(all_logits, batch['target_labels'], average_log_prob=False)
assert all_logps.shape[0] == len(batch['status'])
chosen_idx = [i for i in range(all_logps.shape[0]) if batch['status'][i] == 'chosen']
rejected_idx = [i for i in range(all_logps.shape[0]) if batch['status'][i] == 'rejected']
chosen_logps = all_logps[chosen_idx, ...]
rejected_logps = all_logps[rejected_idx, ...]
return chosen_logps, rejected_logps
def get_batch_metrics(self, batch: Dict[str, Union[List, torch.LongTensor]], mode: str=None):
"""Compute the loss and other metrics for the given batch of inputs."""
metrics = {}
if mode is None: mode = self.config.mode
if self.reference_model is None:
policy_chosen_logps, policy_rejected_logps = self.forward(self.policy, batch)
losses, chosen_rewards, rejected_rewards = self.loss(policy_chosen_logps, policy_rejected_logps)
else:
policy_chosen_logps, policy_rejected_logps = self.forward(self.policy, batch)
with torch.no_grad():
reference_chosen_logps, reference_rejected_logps = self.forward(self.reference_model, batch)
losses, chosen_rewards, rejected_rewards = self.loss(policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps)
# all_gather treats empty lists/tensors poorly, and empty lists can occur because a batch can contain all chosen or all rejected example
# therefore, concatenate chosen + rejected rewards before all_gather
combined_rewards = torch.cat((chosen_rewards.detach(), rejected_rewards.detach()), 0)
combined_statuses = torch.Tensor([1] * len(chosen_rewards) + [0] * len(rejected_rewards)).to(self.device)
all_rewards = all_gather_if_needed(combined_rewards, self.rank, self.world_size)
all_statuses = all_gather_if_needed(combined_statuses, self.rank, self.world_size)
chosen_rewards_idx = [ i for i in range(len(all_statuses)) if all_statuses[i].item() == 1 ]
rejected_rewards_idx = [ i for i in range(len(all_statuses)) if all_statuses[i].item() == 0 ]
all_devices_losses = all_gather_if_needed(losses.detach(), self.rank, self.world_size)
metrics[f'rewards_{mode}/chosen'] = all_rewards[chosen_rewards_idx].float().cpu().numpy().tolist()
metrics[f'rewards_{mode}/rejected'] = all_rewards[rejected_rewards_idx].float().cpu().numpy().tolist()
metrics[f'rewards_{mode}/margins'] = [(all_rewards[chosen_rewards_idx].mean().nan_to_num(0) - all_rewards[rejected_rewards_idx].mean().nan_to_num(0)).item()]
metrics[f'loss/{mode}'] = all_devices_losses.float().cpu().numpy().tolist()
del policy_chosen_logps, policy_rejected_logps
del combined_rewards, combined_statuses, all_rewards, all_statuses, chosen_rewards_idx, rejected_rewards_idx, all_devices_losses
if self.reference_model:
del reference_chosen_logps, reference_rejected_logps
return losses.mean(), metrics
class PairedPreferenceTrainer(BasicTrainer):
"""A trainer for any loss that uses paired preference, like DPO."""
def concatenated_inputs(self, batch: Dict[str, Union[List, torch.LongTensor]]) -> Dict[str, torch.LongTensor]:
"""Concatenate the chosen and rejected inputs into a single tensor. The first half is chosen outputs, the second half is rejected.
Args:
batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).
Returns:
A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
"""
max_length = max(batch['chosen_combined_input_ids'].shape[1], batch['rejected_combined_input_ids'].shape[1])
concatenated_batch = {}
for k in batch:
if k.startswith('chosen') and isinstance(batch[k], torch.Tensor):
pad_value = -100 if 'labels' in k else 0
concatenated_key = k.replace('chosen', 'concatenated')
concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
for k in batch:
if k.startswith('rejected') and isinstance(batch[k], torch.Tensor):
pad_value = -100 if 'labels' in k else 0
concatenated_key = k.replace('rejected', 'concatenated')
concatenated_batch[concatenated_key] = torch.cat((
concatenated_batch[concatenated_key],
pad_to_length(batch[k], max_length, pad_value=pad_value),
), dim=0)
return concatenated_batch
def forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
Return two tensors of shape (batch size), one of the chosen examples, another of the rejected ones.
"""
concatenated_batch = self.concatenated_inputs(batch)
all_logits = model(concatenated_batch['concatenated_combined_input_ids'], attention_mask=concatenated_batch['concatenated_combined_attention_mask'], use_cache=(not self.is_mistral)).logits.to(self.policy_dtype)
all_logps = get_batch_logps(all_logits, concatenated_batch['concatenated_labels'], average_log_prob=False)
chosen_logps = all_logps[:batch['chosen_combined_input_ids'].shape[0]]
rejected_logps = all_logps[batch['chosen_combined_input_ids'].shape[0]:]
return chosen_logps, rejected_logps
def get_batch_metrics(self, batch: Dict[str, Union[List, torch.LongTensor]], mode: str=None):
"""Compute the loss and other metrics for the given batch of inputs."""
metrics = {}
if mode is None: mode = self.config.mode
if self.reference_model is None:
policy_chosen_logps, policy_rejected_logps = self.forward(self.policy, batch)
losses, chosen_rewards, rejected_rewards = self.loss(policy_chosen_logps, policy_rejected_logps)
else:
policy_chosen_logps, policy_rejected_logps = self.forward(self.policy, batch)
with torch.no_grad():
reference_chosen_logps, reference_rejected_logps = self.forward(self.reference_model, batch)
losses, chosen_rewards, rejected_rewards = self.loss(policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps)
# accuracy calculated on unpaired examples (for apples-to-apples comparison with UnpairedPreferenceTrainer)
reward_accuracies = (chosen_rewards > rejected_rewards.flip(dims=[0])).float()
chosen_rewards = all_gather_if_needed(chosen_rewards, self.rank, self.world_size)
rejected_rewards = all_gather_if_needed(rejected_rewards, self.rank, self.world_size)
reward_accuracies = all_gather_if_needed(reward_accuracies, self.rank, self.world_size)
policy_chosen_logps = all_gather_if_needed(policy_chosen_logps.detach(), self.rank, self.world_size)
policy_rejected_logps = all_gather_if_needed(policy_rejected_logps.detach(), self.rank, self.world_size)
all_devices_losses = all_gather_if_needed(losses.detach(), self.rank, self.world_size)
metrics[f'rewards_{mode}/chosen'] = chosen_rewards.float().cpu().numpy().tolist()
metrics[f'rewards_{mode}/rejected'] = rejected_rewards.float().cpu().numpy().tolist()
metrics[f'rewards_{mode}/accuracies'] = reward_accuracies.float().cpu().numpy().tolist()
metrics[f'rewards_{mode}/margins'] = (chosen_rewards - rejected_rewards).float().cpu().numpy().tolist()
metrics[f'logps_{mode}/rejected'] = policy_rejected_logps.float().cpu().numpy().tolist()
metrics[f'logps_{mode}/chosen'] = policy_chosen_logps.float().cpu().numpy().tolist()
metrics[f'loss/{mode}'] = all_devices_losses.float().cpu().numpy().tolist()
del chosen_rewards, rejected_rewards, reward_accuracies, policy_chosen_logps, policy_rejected_logps, all_devices_losses
if self.reference_model:
del reference_chosen_logps, reference_rejected_logps
return losses.mean(), metrics
class DPOTrainer(PairedPreferenceTrainer):
def loss(self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the DPO loss for a batch of policy and reference model log probabilities."""
pi_logratios = policy_chosen_logps - policy_rejected_logps
ref_logratios = reference_chosen_logps - reference_rejected_logps
logits = pi_logratios - ref_logratios
losses = -F.logsigmoid(self.config.loss.beta * logits)
chosen_rewards = self.config.loss.beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = self.config.loss.beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses, chosen_rewards, rejected_rewards
class CDPOTrainer(PairedPreferenceTrainer):
def loss(self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the CDPO loss for a batch of policy and reference model log probabilities."""
forward_losses = -F.logsigmoid(self.config.loss.beta * ((policy_chosen_logps - reference_chosen_logps) - (policy_rejected_logps - reference_rejected_logps)))
reverse_losses = -F.logsigmoid(self.config.loss.beta * ((policy_rejected_logps - reference_rejected_logps) - (policy_chosen_logps - reference_chosen_logps)))
losses = (1 - self.config.loss.epsilon) * forward_losses + self.config.loss.epsilon * reverse_losses
chosen_rewards = self.config.loss.beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = self.config.loss.beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses, chosen_rewards, rejected_rewards
class SLiCTrainer(PairedPreferenceTrainer):
def loss(self, policy_chosen_logps: torch.FloatTensor, policy_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the SLIC loss as defined by Zhao et al. in https://arxiv.org/pdf/2305.10425.pdf
Calibration loss defined as:
L(x, y) := max(0, beta - log p_policy(y_chosen|x) + log p_rejected(y|x))
For the cross-entropy loss, just use the NLL of the chosen sequence (equivalent to SFT).
"""
cal_loss = torch.clamp(self.config.loss.beta - policy_chosen_logps + policy_rejected_logps, min=0)
reg_loss = -policy_chosen_logps
losses = cal_loss + self.config.loss.lambda_coef * reg_loss
chosen_rewards = policy_chosen_logps.detach()
rejected_rewards = policy_rejected_logps.detach()
return losses, chosen_rewards, rejected_rewards
class SimpleKTOTrainer(UnpairedPreferenceTrainer):
def loss(self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the Kahneman-Tversky loss for a batch of policy and reference model log probabilities.
For each batch of n/2 chosen examples and n/2 rejected examples (belonging to n different inputs), calculate the loss as follows.
If generation y ~ p_chosen, where x' ~ are the examples with rejected generations, we have the 'chosen' loss:
L(x, y) := 1 - sigmoid(beta * ([log p_policy(y|x) - log p_reference(y|x)] - KL(p_policy(y_rejected|x') || p_reference(y_rejected|x')))
If generation y ~ p_rejected, , where x' ~ are the examples with chosen generations, we have the 'rejected' loss:
L(x, y) := 1 - sigmoid(beta * (KL(p_policy(y_chosen|x') || p_reference(y_chosen|x')) - [log p_policy(y|x) - log p_reference(y|x)]))
"""
chosen_KL = (policy_chosen_logps - reference_chosen_logps).mean().clamp(min=0)
rejected_KL = (policy_rejected_logps - reference_rejected_logps).mean().clamp(min=0)
chosen_logratios = (policy_chosen_logps - reference_chosen_logps)
rejected_logratios = (policy_rejected_logps - reference_rejected_logps)
losses = torch.cat((1 - F.sigmoid(self.config.loss.beta * (chosen_logratios - rejected_KL)), 1 - F.sigmoid(self.config.loss.beta * (chosen_KL - rejected_logratios))), 0)
chosen_rewards = self.config.loss.beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = self.config.loss.beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses, chosen_rewards, rejected_rewards
class KTOTrainer(UnpairedPreferenceTrainer):
def loss(self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
policy_KL_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor,
reference_KL_logps) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute the Kahneman-Tversky loss for a batch of policy and reference model log probabilities.
If generation y ~ p_desirable, we have the 'desirable' loss:
L(x, y) := 1 - sigmoid(beta * ([log p_policy(y|x) - log p_reference(y|x)] - KL(p_policy || p_reference)))
If generation y ~ p_undesirable, we have the 'undesirable' loss:
L(x, y) := 1 - sigmoid(beta * (KL(p_policy || p_reference) - [log p_policy(y|x) - log p_reference(y|x)]))
The desirable losses are weighed by config.loss.desirable_weight.
The undesirable losses are weighed by config.loss.undesirable_weight.
This should be used to address imbalances in the ratio of desirable:undesirable examples respectively.
The KL term is estimated by matching x with unrelated outputs y', then calculating the average log ratio
log p_policy(y'|x) - log p_reference(y'|x). Doing so avoids the requirement that there be equal numbers of
desirable and undesirable examples in the microbatch.
"""
KL = (policy_KL_logps - reference_KL_logps).mean().detach()
# nn.all_reduce sums up the KL estimates across all devices (gradient will also be scaled by world size)
dist.nn.all_reduce(KL, op=dist.ReduceOp.SUM)
# take average (will also scale gradients appropriately)
KL = (KL / self.world_size).clamp(min=0)
if policy_chosen_logps.shape[0] != 0:
chosen_logratios = (policy_chosen_logps - reference_chosen_logps)
chosen_losses = 1 - F.sigmoid(self.config.loss.beta * (chosen_logratios - KL))
chosen_rewards = self.config.loss.beta * chosen_logratios.detach()
else:
# important to cast to policy_dtype; otherwise error will occur during all_gather
chosen_losses = torch.Tensor([]).to(self.policy_dtype).to(self.device)
chosen_rewards = torch.Tensor([]).to(self.policy_dtype).to(self.device)
if policy_rejected_logps.shape[0] != 0:
rejected_logratios = (policy_rejected_logps - reference_rejected_logps)
rejected_losses = 1 - F.sigmoid(self.config.loss.beta * (KL - rejected_logratios))
rejected_rewards = self.config.loss.beta * rejected_logratios.detach()
else:
# important to cast to policy_dtype; otherwise error will occur during all_gather
rejected_losses = torch.Tensor([]).to(self.policy_dtype).to(self.device)
rejected_rewards = torch.Tensor([]).to(self.policy_dtype).to(self.device)
losses = torch.cat((self.config.loss.desirable_weight * chosen_losses, self.config.loss.undesirable_weight * rejected_losses), 0)
return losses, chosen_rewards, rejected_rewards, KL
def forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Run the given model on the given batch of inputs. The examples used to calculate the rewards and the KL term should be
processed in a single forward pass, since the gradient is taken wrt both groups. Doing it in multiple forward passes will give
you a RuntimeError: 'The tensor has a non-zero number of elements, but its data is not allocated yet.'
Args:
- model: the model to use for the forward pass
- batch: the microbatch (should have the input ids, attention mask, and labels)
Returns:
chosen_logps: log probabilities of chosen examples (should be batch size / 2 if data was read in correctly)
rejected_logps: log probabilities of rejected examples (should be batch size / 2 if data was read in correctly)
KL_logps: log probabilities of the unmatched y'|x (used to estimate the KL divergence between policy and reference; should be batch size)
"""
max_length = max(batch['target_combined_input_ids'].shape[1], batch['KL_combined_input_ids'].shape[1])
concatenated_batch = {}
for k in batch:
if k.startswith('target') and isinstance(batch[k], torch.Tensor):
pad_value = -100 if 'labels' in k else 0
concatenated_key = k.replace('target', 'concatenated')
concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
for k in batch:
if k.startswith('KL') and isinstance(batch[k], torch.Tensor):
pad_value = -100 if 'labels' in k else 0
concatenated_key = k.replace('KL', 'concatenated')
concatenated_batch[concatenated_key] = torch.cat((
concatenated_batch[concatenated_key],
pad_to_length(batch[k], max_length, pad_value=pad_value),
), dim=0)
all_logits = model(
concatenated_batch[f'concatenated_combined_input_ids'],
attention_mask=concatenated_batch[f'concatenated_combined_attention_mask']
).logits.to(self.policy_dtype)
all_logps = get_batch_logps(all_logits, concatenated_batch[f'concatenated_labels'], average_log_prob=False)
target_logps = all_logps[:batch['target_combined_input_ids'].shape[0]]
KL_logps = all_logps[batch['target_combined_input_ids'].shape[0]:]
assert target_logps.shape[0] == len(batch['status'])
chosen_idx = [i for i in range(target_logps.shape[0]) if batch['status'][i] == 'chosen']
rejected_idx = [i for i in range(target_logps.shape[0]) if batch['status'][i] == 'rejected']
chosen_logps = target_logps[chosen_idx, ...]
rejected_logps = target_logps[rejected_idx, ...]
return chosen_logps, rejected_logps, KL_logps
def get_batch_metrics(self, batch: Dict[str, Union[List, torch.LongTensor]], mode: str=None):
"""Compute the loss and other metrics for the given batch of inputs."""
metrics = {}
if mode is None: mode = self.config.mode
policy_chosen_logps, policy_rejected_logps, policy_KL_logps = self.forward(self.policy, batch)
with torch.no_grad():
reference_chosen_logps, reference_rejected_logps, reference_KL_logps = self.forward(self.reference_model, batch)
losses, chosen_rewards, rejected_rewards, KL = self.loss(
policy_chosen_logps,
policy_rejected_logps,
policy_KL_logps,
reference_chosen_logps,
reference_rejected_logps,
reference_KL_logps
)
combined_rewards = torch.cat((chosen_rewards.detach(), rejected_rewards.detach()), 0)
combined_statuses = torch.Tensor([1] * len(chosen_rewards) + [0] * len(rejected_rewards)).to(self.device)
all_rewards = all_gather_if_needed(combined_rewards, self.rank, self.world_size)
all_statuses = all_gather_if_needed(combined_statuses, self.rank, self.world_size)
all_KL = all_gather_if_needed(KL, self.rank, self.world_size)
chosen_rewards_idx = [ i for i in range(len(all_statuses)) if all_statuses[i].item() == 1 ]
rejected_rewards_idx = [ i for i in range(len(all_statuses)) if all_statuses[i].item() == 0 ]
all_devices_losses = all_gather_if_needed(losses.detach(), self.rank, self.world_size)
metrics[f'rewards_{mode}/chosen'] = all_rewards[chosen_rewards_idx].float().cpu().numpy().tolist()
metrics[f'rewards_{mode}/rejected'] = all_rewards[rejected_rewards_idx].float().cpu().numpy().tolist()
metrics[f'rewards_{mode}/margins'] = [(all_rewards[chosen_rewards_idx].mean().nan_to_num(0) - all_rewards[rejected_rewards_idx].mean().nan_to_num(0)).item()]
metrics[f'rewards_{mode}/KL_estimate'] = all_KL.float().cpu().numpy().tolist()
metrics[f'loss/{mode}'] = all_devices_losses.float().cpu().numpy().tolist()
del policy_chosen_logps, policy_rejected_logps, policy_KL_logps, reference_chosen_logps, reference_rejected_logps, reference_KL_logps
del combined_rewards, combined_statuses, all_rewards, all_statuses, chosen_rewards_idx, rejected_rewards_idx, all_devices_losses, all_KL
return losses.mean(), metrics
class KTOZeroTrainer(UnpairedPreferenceTrainer):
def loss(self,
policy_chosen_logps: torch.FloatTensor,
policy_rejected_logps: torch.FloatTensor,
reference_chosen_logps: torch.FloatTensor,
reference_rejected_logps: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Compute a variant of the Kahneman-Tversky loss where the reference point is 0 instead of the expected reward
(i.e., the human reference point remains what it is at initialization, when policy = reference). This should NOT
be used for purposes other than to understand the importance of the KL term.
One can also think of this as a variant of unlikelihood training (Welleck et al., 2023). The purpose of this is to understand
the importance of the KL term in the standard variant of the KTO loss. We do *not* reecommend using this in practice as its
performance is usually inferior. For each batch of n/2 chosen examples and n/2 rejected examples (belonging to n different
inputs), calculate the loss as follows.
If generation y ~ p_chosen, where x' ~ are the examples with rejected generations, we have the 'chosen' loss:
L(x, y) := 1 - sigmoid(beta * ([log p_policy(y|x) - log p_reference(y|x)] - 0))
If generation y ~ p_rejected, , where x' ~ are the examples with chosen generations, we have the 'rejected' loss:
L(x, y) := 1 - sigmoid(beta * (0 - [log p_policy(y|x) - log p_reference(y|x)]))
"""
chosen_logratios = (policy_chosen_logps - reference_chosen_logps)
rejected_logratios = (policy_rejected_logps - reference_rejected_logps)
losses = torch.cat((1 - F.sigmoid(self.config.loss.beta * (chosen_logratios - 0)), 1 - F.sigmoid(self.config.loss.beta * (0 - rejected_logratios))), 0)
chosen_rewards = self.config.loss.beta * (policy_chosen_logps - reference_chosen_logps).detach()
rejected_rewards = self.config.loss.beta * (policy_rejected_logps - reference_rejected_logps).detach()
return losses, chosen_rewards, rejected_rewards
class PPOTrainer(BasicTrainer):
"""One-step, offline variant of PPO."""
def forward(self, model: AutoModelForCausalLMWithValueHead, batch: Dict[str, Union[List, torch.LongTensor]], is_policy: bool=True) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Run the given model on the given batch of inputs.
Args:
model: model to run forward pass on
batch: input batch (forward pass will be run on keys with prefix 'chosen')
masks: binary-valued tensor shape (batch size, sequence length)
is_policy: whether the model is the policy or reference
Returns:
all_logps: batch log probabilities at the token level of shape (batch size, sequence length)
all_logits: corresponding logits of shape (batch size, sequence length)
all_values: values predicted for each token, of shape (batch size, sequence length)
"""
if is_policy:
# here the prefix 'chosen' is a misnomer, since it can refer to the dispreferred generations
# the 'status' field contains the actual status of the generations
all_logits, _, all_values = model(batch['target_combined_input_ids'], attention_mask=batch['target_combined_attention_mask'])
all_values = all_values[:, :-1].contiguous().to(self.rank)
else:
all_logits = model(batch['target_combined_input_ids'], attention_mask=batch['target_combined_attention_mask'], use_cache=(not self.is_mistral)).logits.to(self.policy_dtype)
all_values = None
all_logps = get_batch_logps(all_logits.to(self.policy_dtype), batch['target_labels'], average_log_prob=False, token_level=True)
# Returned tensors will have sequence length that is one less than the inputs (to account for label shifting).
all_logits = all_logits[:, :-1].contiguous().to(self.rank)
all_logps = all_logps.contiguous().to(self.rank)
return all_logps, all_logits, all_values
def compute_advantages(self, values: torch.FloatTensor, rewards: torch.FloatTensor, masks: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""
Estimate the advantages and rewards for every token taken.
Args:
values: the estimated values of the tokens. Should already be detached from graph.
rewards: signal from the environment as to whether the generation is good or bad.
In the basic implementation, this is only one nonzero reward, on the last unpadded token of each sequence.
torch tensor of shape (batch size, sequence length)
masks: torch tensor of shape (batch size, sequence length); 1 if token should be considered and 0 otherwise
Returns:
advantages: torch tensor of shape (batch size, sequence length)
returns: Also called 'rewards-to-go'.
Only tokens after the current token are used to calculate this: http://rail.eecs.berkeley.edu/deeprlcourse/static/slides/lec-5.pdf
torch tensor of shape (batch size, sequence length)
"""
values = values * masks
rewards = rewards * masks
gae = 0 # generalized advantage estimation
seq_len = rewards.shape[-1]
advantages_reversed = []
discounted_future_reward = torch.zeros_like(rewards[:,0])
discounted_future_rewards_reversed = []
for t in reversed(range(seq_len)):
# see https://towardsdatascience.com/proximal-policy-optimization-tutorial-part-2-2-gae-and-ppo-loss-fe1b3c5549e8
delta = rewards[:, t] + self.config.loss.gamma * (values[:, t + 1] if t < seq_len - 1 else 0.0) - values[:, t]
gae = delta + self.config.loss.gamma * self.config.loss.lam * gae
advantages_reversed.append(gae)
discounted_future_rewards_reversed.append(discounted_future_reward)
discounted_future_reward = rewards[:, t] + self.config.loss.gamma * discounted_future_reward
advantages = (torch.stack(advantages_reversed[::-1]).transpose(0, 1) * masks)
returns = (advantages + values).contiguous().to(self.rank)
discounted_future_rewards = (torch.stack(discounted_future_rewards_reversed[::-1]).transpose(0, 1) * masks).contiguous().to(self.rank)
# normalizing advantages leads to more stable learning
mean_adv, var_adv = masked_mean(advantages, masks), masked_var(advantages, masks)
normalized_advantages = (advantages - mean_adv) * torch.rsqrt(var_adv + 1e-8)
normalized_advantages = (normalized_advantages * masks).detach().contiguous().to(self.rank)
return normalized_advantages, returns, discounted_future_rewards
def loss(self, batch: Dict, episode: Dict) -> Tuple[torch.FloatTensor, Dict]:
"""
Given the batch statistics and the current episode's values, calculate the loss and return some loss statistics.
Args:
batch: dictionary containing batch data (shoud have keys 'values', 'returns', 'advantages', 'logprobs', 'masks')
episode: dictionary containing the episode data (should have keys 'logits', 'values', 'logprobs')
Returns:
loss: combined policy and critic loss of shape (1,)
loss_stats: dictionary of episode/batch statistics
"""
value_losses = (episode['values'] - batch['discounted_future_rewards'].detach()) ** 2
critic_loss = 0.5 * masked_mean(value_losses, batch['masks'])
ratio = torch.exp(episode['logprobs'] - batch['logprobs'])
policy_losses = -batch['advantages'] * ratio
policy_losses_clipped = -batch['advantages'] * torch.clamp(ratio, self.config.loss.cliprange, 1 / self.config.loss.cliprange)
policy_loss = masked_mean(torch.max(policy_losses, policy_losses_clipped), batch['masks'])
KL_penalty = masked_mean(batch['logprobs'] - episode['logprobs'], batch['masks'])
loss = policy_loss + self.config.loss.critic_coef * critic_loss + self.config.loss.KL_coef * KL_penalty
loss_stats = {
'loss/total' : loss.detach(),
'loss/critic' : critic_loss.detach(),
'loss/policy' : policy_loss.detach(),
'clipfrac/policy' : masked_mean(torch.gt(policy_losses_clipped, policy_losses).float(), batch['masks']).detach(), | 'loss/entropy' : entropy_from_logits(episode['logits'], batch['masks']).detach(), | 10 | 2023-12-03 07:53:36+00:00 | 12k |
AIFSH/NativeSpeaker | src/third_part/whisperx/transcribe.py | [
{
"identifier": "align",
"path": "src/third_part/whisperx/alignment.py",
"snippet": "def align(\n transcript: Iterable[SingleSegment],\n model: torch.nn.Module,\n align_model_metadata: dict,\n audio: Union[str, np.ndarray, torch.Tensor],\n device: str,\n interpolate_method: str = \"nearest\",\n return_char_alignments: bool = False,\n print_progress: bool = False,\n combined_progress: bool = False,\n) -> AlignedTranscriptionResult:\n \"\"\"\n Align phoneme recognition predictions to known transcription.\n \"\"\"\n \n if not torch.is_tensor(audio):\n if isinstance(audio, str):\n audio = load_audio(audio)\n audio = torch.from_numpy(audio)\n if len(audio.shape) == 1:\n audio = audio.unsqueeze(0)\n \n MAX_DURATION = audio.shape[1] / SAMPLE_RATE\n\n model_dictionary = align_model_metadata[\"dictionary\"]\n model_lang = align_model_metadata[\"language\"]\n model_type = align_model_metadata[\"type\"]\n\n # 1. Preprocess to keep only characters in dictionary\n total_segments = len(transcript)\n for sdx, segment in enumerate(transcript):\n # strip spaces at beginning / end, but keep track of the amount.\n if print_progress:\n base_progress = ((sdx + 1) / total_segments) * 100\n percent_complete = (50 + base_progress / 2) if combined_progress else base_progress\n print(f\"Progress: {percent_complete:.2f}%...\")\n \n num_leading = len(segment[\"text\"]) - len(segment[\"text\"].lstrip())\n num_trailing = len(segment[\"text\"]) - len(segment[\"text\"].rstrip())\n text = segment[\"text\"]\n\n # split into words\n if model_lang not in LANGUAGES_WITHOUT_SPACES:\n per_word = text.split(\" \")\n else:\n per_word = text\n\n clean_char, clean_cdx = [], []\n for cdx, char in enumerate(text):\n char_ = char.lower()\n # wav2vec2 models use \"|\" character to represent spaces\n if model_lang not in LANGUAGES_WITHOUT_SPACES:\n char_ = char_.replace(\" \", \"|\")\n \n # ignore whitespace at beginning and end of transcript\n if cdx < num_leading:\n pass\n elif cdx > len(text) - num_trailing - 1:\n pass\n elif char_ in model_dictionary.keys():\n clean_char.append(char_)\n clean_cdx.append(cdx)\n\n clean_wdx = []\n for wdx, wrd in enumerate(per_word):\n if any([c in model_dictionary.keys() for c in wrd]):\n clean_wdx.append(wdx)\n\n \n punkt_param = PunktParameters()\n punkt_param.abbrev_types = set(PUNKT_ABBREVIATIONS)\n sentence_splitter = PunktSentenceTokenizer(punkt_param)\n sentence_spans = list(sentence_splitter.span_tokenize(text))\n\n segment[\"clean_char\"] = clean_char\n segment[\"clean_cdx\"] = clean_cdx\n segment[\"clean_wdx\"] = clean_wdx\n segment[\"sentence_spans\"] = sentence_spans\n \n aligned_segments: List[SingleAlignedSegment] = []\n \n # 2. Get prediction matrix from alignment model & align\n for sdx, segment in enumerate(transcript):\n \n t1 = segment[\"start\"]\n t2 = segment[\"end\"]\n text = segment[\"text\"]\n\n aligned_seg: SingleAlignedSegment = {\n \"start\": t1,\n \"end\": t2,\n \"text\": text,\n \"words\": [],\n }\n\n if return_char_alignments:\n aligned_seg[\"chars\"] = []\n\n # check we can align\n if len(segment[\"clean_char\"]) == 0:\n print(f'Failed to align segment (\"{segment[\"text\"]}\"): no characters in this segment found in model dictionary, resorting to original...')\n aligned_segments.append(aligned_seg)\n continue\n\n if t1 >= MAX_DURATION:\n print(f'Failed to align segment (\"{segment[\"text\"]}\"): original start time longer than audio duration, skipping...')\n aligned_segments.append(aligned_seg)\n continue\n\n text_clean = \"\".join(segment[\"clean_char\"])\n tokens = [model_dictionary[c] for c in text_clean]\n\n f1 = int(t1 * SAMPLE_RATE)\n f2 = int(t2 * SAMPLE_RATE)\n\n # TODO: Probably can get some speedup gain with batched inference here\n waveform_segment = audio[:, f1:f2]\n # Handle the minimum input length for wav2vec2 models\n if waveform_segment.shape[-1] < 400:\n lengths = torch.as_tensor([waveform_segment.shape[-1]]).to(device)\n waveform_segment = torch.nn.functional.pad(\n waveform_segment, (0, 400 - waveform_segment.shape[-1])\n )\n else:\n lengths = None\n \n with torch.inference_mode():\n if model_type == \"torchaudio\":\n emissions, _ = model(waveform_segment.to(device), lengths=lengths)\n elif model_type == \"huggingface\":\n emissions = model(waveform_segment.to(device)).logits\n else:\n raise NotImplementedError(f\"Align model of type {model_type} not supported.\")\n emissions = torch.log_softmax(emissions, dim=-1)\n\n emission = emissions[0].cpu().detach()\n\n blank_id = 0\n for char, code in model_dictionary.items():\n if char == '[pad]' or char == '<pad>':\n blank_id = code\n\n trellis = get_trellis(emission, tokens, blank_id)\n path = backtrack(trellis, emission, tokens, blank_id)\n\n if path is None:\n print(f'Failed to align segment (\"{segment[\"text\"]}\"): backtrack failed, resorting to original...')\n aligned_segments.append(aligned_seg)\n continue\n\n char_segments = merge_repeats(path, text_clean)\n\n duration = t2 -t1\n ratio = duration * waveform_segment.size(0) / (trellis.size(0) - 1)\n\n # assign timestamps to aligned characters\n char_segments_arr = []\n word_idx = 0\n for cdx, char in enumerate(text):\n start, end, score = None, None, None\n if cdx in segment[\"clean_cdx\"]:\n char_seg = char_segments[segment[\"clean_cdx\"].index(cdx)]\n start = round(char_seg.start * ratio + t1, 3)\n end = round(char_seg.end * ratio + t1, 3)\n score = round(char_seg.score, 3)\n\n char_segments_arr.append(\n {\n \"char\": char,\n \"start\": start,\n \"end\": end,\n \"score\": score,\n \"word-idx\": word_idx,\n }\n )\n\n # increment word_idx, nltk word tokenization would probably be more robust here, but us space for now...\n if model_lang in LANGUAGES_WITHOUT_SPACES:\n word_idx += 1\n elif cdx == len(text) - 1 or text[cdx+1] == \" \":\n word_idx += 1\n \n char_segments_arr = pd.DataFrame(char_segments_arr)\n\n aligned_subsegments = []\n # assign sentence_idx to each character index\n char_segments_arr[\"sentence-idx\"] = None\n for sdx, (sstart, send) in enumerate(segment[\"sentence_spans\"]):\n curr_chars = char_segments_arr.loc[(char_segments_arr.index >= sstart) & (char_segments_arr.index <= send)]\n char_segments_arr.loc[(char_segments_arr.index >= sstart) & (char_segments_arr.index <= send), \"sentence-idx\"] = sdx\n \n sentence_text = text[sstart:send]\n sentence_start = curr_chars[\"start\"].min()\n end_chars = curr_chars[curr_chars[\"char\"] != ' ']\n sentence_end = end_chars[\"end\"].max()\n sentence_words = []\n\n for word_idx in curr_chars[\"word-idx\"].unique():\n word_chars = curr_chars.loc[curr_chars[\"word-idx\"] == word_idx]\n word_text = \"\".join(word_chars[\"char\"].tolist()).strip()\n if len(word_text) == 0:\n continue\n\n # dont use space character for alignment\n word_chars = word_chars[word_chars[\"char\"] != \" \"]\n\n word_start = word_chars[\"start\"].min()\n word_end = word_chars[\"end\"].max()\n word_score = round(word_chars[\"score\"].mean(), 3)\n\n # -1 indicates unalignable \n word_segment = {\"word\": word_text}\n\n if not np.isnan(word_start):\n word_segment[\"start\"] = word_start\n if not np.isnan(word_end):\n word_segment[\"end\"] = word_end\n if not np.isnan(word_score):\n word_segment[\"score\"] = word_score\n\n sentence_words.append(word_segment)\n \n aligned_subsegments.append({\n \"text\": sentence_text,\n \"start\": sentence_start,\n \"end\": sentence_end,\n \"words\": sentence_words,\n })\n\n if return_char_alignments:\n curr_chars = curr_chars[[\"char\", \"start\", \"end\", \"score\"]]\n curr_chars.fillna(-1, inplace=True)\n curr_chars = curr_chars.to_dict(\"records\")\n curr_chars = [{key: val for key, val in char.items() if val != -1} for char in curr_chars]\n aligned_subsegments[-1][\"chars\"] = curr_chars\n\n aligned_subsegments = pd.DataFrame(aligned_subsegments)\n aligned_subsegments[\"start\"] = interpolate_nans(aligned_subsegments[\"start\"], method=interpolate_method)\n aligned_subsegments[\"end\"] = interpolate_nans(aligned_subsegments[\"end\"], method=interpolate_method)\n # concatenate sentences with same timestamps\n agg_dict = {\"text\": \" \".join, \"words\": \"sum\"}\n if model_lang in LANGUAGES_WITHOUT_SPACES:\n agg_dict[\"text\"] = \"\".join\n if return_char_alignments:\n agg_dict[\"chars\"] = \"sum\"\n aligned_subsegments= aligned_subsegments.groupby([\"start\", \"end\"], as_index=False).agg(agg_dict)\n aligned_subsegments = aligned_subsegments.to_dict('records')\n aligned_segments += aligned_subsegments\n\n # create word_segments list\n word_segments: List[SingleWordSegment] = []\n for segment in aligned_segments:\n word_segments += segment[\"words\"]\n\n return {\"segments\": aligned_segments, \"word_segments\": word_segments}"
},
{
"identifier": "load_align_model",
"path": "src/third_part/whisperx/alignment.py",
"snippet": "def load_align_model(language_code, device, model_name=None, model_dir=None):\n if model_name is None:\n # use default model\n if language_code in DEFAULT_ALIGN_MODELS_TORCH:\n model_name = DEFAULT_ALIGN_MODELS_TORCH[language_code]\n elif language_code in DEFAULT_ALIGN_MODELS_HF:\n model_name = DEFAULT_ALIGN_MODELS_HF[language_code]\n else:\n print(f\"There is no default alignment model set for this language ({language_code}).\\\n Please find a wav2vec2.0 model finetuned on this language in https://huggingface.co/models, then pass the model name in --align_model [MODEL_NAME]\")\n raise ValueError(f\"No default align-model for language: {language_code}\")\n\n if model_name in torchaudio.pipelines.__all__:\n pipeline_type = \"torchaudio\"\n bundle = torchaudio.pipelines.__dict__[model_name]\n align_model = bundle.get_model(dl_kwargs={\"model_dir\": model_dir}).to(device)\n labels = bundle.get_labels()\n align_dictionary = {c.lower(): i for i, c in enumerate(labels)}\n else:\n try:\n processor = Wav2Vec2Processor.from_pretrained(model_name)\n align_model = Wav2Vec2ForCTC.from_pretrained(model_name)\n except Exception as e:\n print(e)\n print(f\"Error loading model from huggingface, check https://huggingface.co/models for finetuned wav2vec2.0 models\")\n raise ValueError(f'The chosen align_model \"{model_name}\" could not be found in huggingface (https://huggingface.co/models) or torchaudio (https://pytorch.org/audio/stable/pipelines.html#id14)')\n pipeline_type = \"huggingface\"\n align_model = align_model.to(device)\n labels = processor.tokenizer.get_vocab()\n align_dictionary = {char.lower(): code for char,code in processor.tokenizer.get_vocab().items()}\n\n align_metadata = {\"language\": language_code, \"dictionary\": align_dictionary, \"type\": pipeline_type}\n\n return align_model, align_metadata"
},
{
"identifier": "load_model",
"path": "src/third_part/whisperx/asr.py",
"snippet": "def load_model(whisper_arch,\n device,\n device_index=0,\n compute_type=\"float16\",\n asr_options=None,\n language : Optional[str] = None,\n vad_options=None,\n model : Optional[WhisperModel] = None,\n task=\"transcribe\",\n download_root=None,\n threads=4):\n '''Load a Whisper model for inference.\n Args:\n whisper_arch: str - The name of the Whisper model to load.\n device: str - The device to load the model on.\n compute_type: str - The compute type to use for the model.\n options: dict - A dictionary of options to use for the model.\n language: str - The language of the model. (use English for now)\n model: Optional[WhisperModel] - The WhisperModel instance to use.\n download_root: Optional[str] - The root directory to download the model to.\n threads: int - The number of cpu threads to use per worker, e.g. will be multiplied by num workers.\n Returns:\n A Whisper pipeline.\n '''\n\n if whisper_arch.endswith(\".en\"):\n language = \"en\"\n\n model = model or WhisperModel(whisper_arch,\n device=device,\n device_index=device_index,\n compute_type=compute_type,\n download_root=download_root,\n cpu_threads=threads)\n if language is not None:\n tokenizer = faster_whisper.tokenizer.Tokenizer(model.hf_tokenizer, model.model.is_multilingual, task=task, language=language)\n else:\n print(\"No language specified, language will be first be detected for each audio file (increases inference time).\")\n tokenizer = None\n\n default_asr_options = {\n \"beam_size\": 5,\n \"best_of\": 5,\n \"patience\": 1,\n \"length_penalty\": 1,\n \"repetition_penalty\": 1,\n \"no_repeat_ngram_size\": 0,\n \"temperatures\": [0.0, 0.2, 0.4, 0.6, 0.8, 1.0],\n \"compression_ratio_threshold\": 2.4,\n \"log_prob_threshold\": -1.0,\n \"no_speech_threshold\": 0.6,\n \"condition_on_previous_text\": False,\n \"prompt_reset_on_temperature\": 0.5,\n \"initial_prompt\": None,\n \"prefix\": None,\n \"suppress_blank\": True,\n \"suppress_tokens\": [-1],\n \"without_timestamps\": True,\n \"max_initial_timestamp\": 0.0,\n \"word_timestamps\": False,\n \"prepend_punctuations\": \"\\\"'“¿([{-\",\n \"append_punctuations\": \"\\\"'.。,,!!??::”)]}、\",\n \"suppress_numerals\": False,\n }\n\n if asr_options is not None:\n default_asr_options.update(asr_options)\n\n suppress_numerals = default_asr_options[\"suppress_numerals\"]\n del default_asr_options[\"suppress_numerals\"]\n\n default_asr_options = faster_whisper.transcribe.TranscriptionOptions(**default_asr_options)\n\n default_vad_options = {\n \"vad_onset\": 0.500,\n \"vad_offset\": 0.363\n }\n\n if vad_options is not None:\n default_vad_options.update(vad_options)\n\n vad_model = load_vad_model(torch.device(device), use_auth_token=None, **default_vad_options)\n\n return FasterWhisperPipeline(\n model=model,\n vad=vad_model,\n options=default_asr_options,\n tokenizer=tokenizer,\n language=language,\n suppress_numerals=suppress_numerals,\n vad_params=default_vad_options,\n )"
},
{
"identifier": "load_audio",
"path": "src/third_part/whisperx/audio.py",
"snippet": "def load_audio(file: str, sr: int = SAMPLE_RATE):\n \"\"\"\n Open an audio file and read as mono waveform, resampling as necessary\n\n Parameters\n ----------\n file: str\n The audio file to open\n\n sr: int\n The sample rate to resample the audio if necessary\n\n Returns\n -------\n A NumPy array containing the audio waveform, in float32 dtype.\n \"\"\"\n try:\n # Launches a subprocess to decode audio while down-mixing and resampling as necessary.\n # Requires the ffmpeg CLI to be installed.\n cmd = [\n \"ffmpeg\",\n \"-nostdin\",\n \"-threads\",\n \"0\",\n \"-i\",\n file,\n \"-f\",\n \"s16le\",\n \"-ac\",\n \"1\",\n \"-acodec\",\n \"pcm_s16le\",\n \"-ar\",\n str(sr),\n \"-\",\n ]\n out = subprocess.run(cmd, capture_output=True, check=True).stdout\n except subprocess.CalledProcessError as e:\n raise RuntimeError(f\"Failed to load audio: {e.stderr.decode()}\") from e\n\n return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0"
},
{
"identifier": "DiarizationPipeline",
"path": "src/third_part/whisperx/diarize.py",
"snippet": "class DiarizationPipeline:\n def __init__(\n self,\n model_name=\"pyannote/speaker-diarization-3.0\",\n use_auth_token=None,\n device: Optional[Union[str, torch.device]] = \"cpu\",\n ):\n if isinstance(device, str):\n device = torch.device(device)\n self.model = Pipeline.from_pretrained(model_name, use_auth_token=use_auth_token).to(device)\n\n def __call__(self, audio: Union[str, np.ndarray], min_speakers=None, max_speakers=None):\n if isinstance(audio, str):\n audio = load_audio(audio)\n audio_data = {\n 'waveform': torch.from_numpy(audio[None, :]),\n 'sample_rate': SAMPLE_RATE\n }\n segments = self.model(audio_data, min_speakers=min_speakers, max_speakers=max_speakers)\n diarize_df = pd.DataFrame(segments.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])\n diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start)\n diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end)\n return diarize_df"
},
{
"identifier": "assign_word_speakers",
"path": "src/third_part/whisperx/diarize.py",
"snippet": "def assign_word_speakers(diarize_df, transcript_result, fill_nearest=False):\n transcript_segments = transcript_result[\"segments\"]\n for seg in transcript_segments:\n # assign speaker to segment (if any)\n diarize_df['intersection'] = np.minimum(diarize_df['end'], seg['end']) - np.maximum(diarize_df['start'], seg['start'])\n diarize_df['union'] = np.maximum(diarize_df['end'], seg['end']) - np.minimum(diarize_df['start'], seg['start'])\n # remove no hit, otherwise we look for closest (even negative intersection...)\n if not fill_nearest:\n dia_tmp = diarize_df[diarize_df['intersection'] > 0]\n else:\n dia_tmp = diarize_df\n if len(dia_tmp) > 0:\n # sum over speakers\n speaker = dia_tmp.groupby(\"speaker\")[\"intersection\"].sum().sort_values(ascending=False).index[0]\n seg[\"speaker\"] = speaker\n \n # assign speaker to words\n if 'words' in seg:\n for word in seg['words']:\n if 'start' in word:\n diarize_df['intersection'] = np.minimum(diarize_df['end'], word['end']) - np.maximum(diarize_df['start'], word['start'])\n diarize_df['union'] = np.maximum(diarize_df['end'], word['end']) - np.minimum(diarize_df['start'], word['start'])\n # remove no hit\n if not fill_nearest:\n dia_tmp = diarize_df[diarize_df['intersection'] > 0]\n else:\n dia_tmp = diarize_df\n if len(dia_tmp) > 0:\n # sum over speakers\n speaker = dia_tmp.groupby(\"speaker\")[\"intersection\"].sum().sort_values(ascending=False).index[0]\n word[\"speaker\"] = speaker\n \n return transcript_result "
},
{
"identifier": "LANGUAGES",
"path": "src/third_part/whisperx/utils.py",
"snippet": "LANGUAGES = {\n \"en\": \"english\",\n \"zh\": \"chinese\",\n \"de\": \"german\",\n \"es\": \"spanish\",\n \"ru\": \"russian\",\n \"ko\": \"korean\",\n \"fr\": \"french\",\n \"ja\": \"japanese\",\n \"pt\": \"portuguese\",\n \"tr\": \"turkish\",\n \"pl\": \"polish\",\n \"ca\": \"catalan\",\n \"nl\": \"dutch\",\n \"ar\": \"arabic\",\n \"sv\": \"swedish\",\n \"it\": \"italian\",\n \"id\": \"indonesian\",\n \"hi\": \"hindi\",\n \"fi\": \"finnish\",\n \"vi\": \"vietnamese\",\n \"he\": \"hebrew\",\n \"uk\": \"ukrainian\",\n \"el\": \"greek\",\n \"ms\": \"malay\",\n \"cs\": \"czech\",\n \"ro\": \"romanian\",\n \"da\": \"danish\",\n \"hu\": \"hungarian\",\n \"ta\": \"tamil\",\n \"no\": \"norwegian\",\n \"th\": \"thai\",\n \"ur\": \"urdu\",\n \"hr\": \"croatian\",\n \"bg\": \"bulgarian\",\n \"lt\": \"lithuanian\",\n \"la\": \"latin\",\n \"mi\": \"maori\",\n \"ml\": \"malayalam\",\n \"cy\": \"welsh\",\n \"sk\": \"slovak\",\n \"te\": \"telugu\",\n \"fa\": \"persian\",\n \"lv\": \"latvian\",\n \"bn\": \"bengali\",\n \"sr\": \"serbian\",\n \"az\": \"azerbaijani\",\n \"sl\": \"slovenian\",\n \"kn\": \"kannada\",\n \"et\": \"estonian\",\n \"mk\": \"macedonian\",\n \"br\": \"breton\",\n \"eu\": \"basque\",\n \"is\": \"icelandic\",\n \"hy\": \"armenian\",\n \"ne\": \"nepali\",\n \"mn\": \"mongolian\",\n \"bs\": \"bosnian\",\n \"kk\": \"kazakh\",\n \"sq\": \"albanian\",\n \"sw\": \"swahili\",\n \"gl\": \"galician\",\n \"mr\": \"marathi\",\n \"pa\": \"punjabi\",\n \"si\": \"sinhala\",\n \"km\": \"khmer\",\n \"sn\": \"shona\",\n \"yo\": \"yoruba\",\n \"so\": \"somali\",\n \"af\": \"afrikaans\",\n \"oc\": \"occitan\",\n \"ka\": \"georgian\",\n \"be\": \"belarusian\",\n \"tg\": \"tajik\",\n \"sd\": \"sindhi\",\n \"gu\": \"gujarati\",\n \"am\": \"amharic\",\n \"yi\": \"yiddish\",\n \"lo\": \"lao\",\n \"uz\": \"uzbek\",\n \"fo\": \"faroese\",\n \"ht\": \"haitian creole\",\n \"ps\": \"pashto\",\n \"tk\": \"turkmen\",\n \"nn\": \"nynorsk\",\n \"mt\": \"maltese\",\n \"sa\": \"sanskrit\",\n \"lb\": \"luxembourgish\",\n \"my\": \"myanmar\",\n \"bo\": \"tibetan\",\n \"tl\": \"tagalog\",\n \"mg\": \"malagasy\",\n \"as\": \"assamese\",\n \"tt\": \"tatar\",\n \"haw\": \"hawaiian\",\n \"ln\": \"lingala\",\n \"ha\": \"hausa\",\n \"ba\": \"bashkir\",\n \"jw\": \"javanese\",\n \"su\": \"sundanese\",\n}"
},
{
"identifier": "TO_LANGUAGE_CODE",
"path": "src/third_part/whisperx/utils.py",
"snippet": "TO_LANGUAGE_CODE = {\n **{language: code for code, language in LANGUAGES.items()},\n \"burmese\": \"my\",\n \"valencian\": \"ca\",\n \"flemish\": \"nl\",\n \"haitian\": \"ht\",\n \"letzeburgesch\": \"lb\",\n \"pushto\": \"ps\",\n \"panjabi\": \"pa\",\n \"moldavian\": \"ro\",\n \"moldovan\": \"ro\",\n \"sinhalese\": \"si\",\n \"castilian\": \"es\",\n}"
},
{
"identifier": "get_writer",
"path": "src/third_part/whisperx/utils.py",
"snippet": "def get_writer(\n output_format: str, output_dir: str\n) -> Callable[[dict, TextIO, dict], None]:\n writers = {\n \"txt\": WriteTXT,\n \"vtt\": WriteVTT,\n \"srt\": WriteSRT,\n \"tsv\": WriteTSV,\n \"json\": WriteJSON,\n }\n optional_writers = {\n \"aud\": WriteAudacity,\n }\n\n if output_format == \"all\":\n all_writers = [writer(output_dir) for writer in writers.values()]\n\n def write_all(result: dict, file: TextIO, options: dict):\n for writer in all_writers:\n writer(result, file, options)\n\n return write_all\n\n if output_format in optional_writers:\n return optional_writers[output_format](output_dir)\n return writers[output_format](output_dir)"
},
{
"identifier": "optional_float",
"path": "src/third_part/whisperx/utils.py",
"snippet": "def optional_float(string):\n return None if string == \"None\" else float(string)"
},
{
"identifier": "optional_int",
"path": "src/third_part/whisperx/utils.py",
"snippet": "def optional_int(string):\n return None if string == \"None\" else int(string)"
},
{
"identifier": "str2bool",
"path": "src/third_part/whisperx/utils.py",
"snippet": "def str2bool(string):\n str2val = {\"True\": True, \"False\": False}\n if string in str2val:\n return str2val[string]\n else:\n raise ValueError(f\"Expected one of {set(str2val.keys())}, got {string}\")"
}
] | import argparse
import gc
import os
import warnings
import numpy as np
import torch
from .alignment import align, load_align_model
from .asr import load_model
from .audio import load_audio
from .diarize import DiarizationPipeline, assign_word_speakers
from .utils import (LANGUAGES, TO_LANGUAGE_CODE, get_writer, optional_float,
optional_int, str2bool) | 9,119 | parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file")
parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")
parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero")
parser.add_argument("--patience", type=float, default=1.0, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search")
parser.add_argument("--length_penalty", type=float, default=1.0, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default")
parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations")
parser.add_argument("--suppress_numerals", action="store_true", help="whether to suppress numeric symbols and currency symbols during sampling, since wav2vec2 cannot align them correctly")
parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.")
parser.add_argument("--condition_on_previous_text", type=str2bool, default=False, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop")
parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default")
parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below")
parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed")
parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence")
parser.add_argument("--max_line_width", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
parser.add_argument("--max_line_count", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of lines in a segment")
parser.add_argument("--highlight_words", type=str2bool, default=False, help="(not possible with --no_align) underline each word as it is spoken in srt and vtt")
parser.add_argument("--segment_resolution", type=str, default="sentence", choices=["sentence", "chunk"], help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
parser.add_argument("--threads", type=optional_int, default=0, help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS")
parser.add_argument("--hf_token", type=str, default=None, help="Hugging Face Access Token to access PyAnnote gated models")
parser.add_argument("--print_progress", type=str2bool, default = False, help = "if True, progress will be printed in transcribe() and align() methods.")
# fmt: on
args = parser.parse_args().__dict__
model_name: str = args.pop("model")
batch_size: int = args.pop("batch_size")
output_dir: str = args.pop("output_dir")
output_format: str = args.pop("output_format")
device: str = args.pop("device")
device_index: int = args.pop("device_index")
compute_type: str = args.pop("compute_type")
# model_flush: bool = args.pop("model_flush")
os.makedirs(output_dir, exist_ok=True)
align_model: str = args.pop("align_model")
interpolate_method: str = args.pop("interpolate_method")
no_align: bool = args.pop("no_align")
task : str = args.pop("task")
if task == "translate":
# translation cannot be aligned
no_align = True
return_char_alignments: bool = args.pop("return_char_alignments")
hf_token: str = args.pop("hf_token")
vad_onset: float = args.pop("vad_onset")
vad_offset: float = args.pop("vad_offset")
chunk_size: int = args.pop("chunk_size")
diarize: bool = args.pop("diarize")
min_speakers: int = args.pop("min_speakers")
max_speakers: int = args.pop("max_speakers")
print_progress: bool = args.pop("print_progress")
if args["language"] is not None:
args["language"] = args["language"].lower()
if args["language"] not in LANGUAGES:
if args["language"] in TO_LANGUAGE_CODE:
args["language"] = TO_LANGUAGE_CODE[args["language"]]
else:
raise ValueError(f"Unsupported language: {args['language']}")
if model_name.endswith(".en") and args["language"] != "en":
if args["language"] is not None:
warnings.warn(
f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead."
)
args["language"] = "en"
align_language = args["language"] if args["language"] is not None else "en" # default to loading english if not specified
temperature = args.pop("temperature")
if (increment := args.pop("temperature_increment_on_fallback")) is not None:
temperature = tuple(np.arange(temperature, 1.0 + 1e-6, increment))
else:
temperature = [temperature]
faster_whisper_threads = 4
if (threads := args.pop("threads")) > 0:
torch.set_num_threads(threads)
faster_whisper_threads = threads
asr_options = {
"beam_size": args.pop("beam_size"),
"patience": args.pop("patience"),
"length_penalty": args.pop("length_penalty"),
"temperatures": temperature,
"compression_ratio_threshold": args.pop("compression_ratio_threshold"),
"log_prob_threshold": args.pop("logprob_threshold"),
"no_speech_threshold": args.pop("no_speech_threshold"),
"condition_on_previous_text": False,
"initial_prompt": args.pop("initial_prompt"),
"suppress_tokens": [int(x) for x in args.pop("suppress_tokens").split(",")],
"suppress_numerals": args.pop("suppress_numerals"),
}
writer = get_writer(output_format, output_dir)
word_options = ["highlight_words", "max_line_count", "max_line_width"]
if no_align:
for option in word_options:
if args[option]:
parser.error(f"--{option} not possible with --no_align")
if args["max_line_count"] and not args["max_line_width"]:
warnings.warn("--max_line_count has no effect without --max_line_width")
writer_args = {arg: args.pop(arg) for arg in word_options}
# Part 1: VAD & ASR Loop
results = []
tmp_results = []
# model = load_model(model_name, device=device, download_root=model_dir)
|
def cli():
# fmt: off
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe")
parser.add_argument("--model", default="small", help="name of the Whisper model to use")
parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default")
parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference")
parser.add_argument("--device_index", default=0, type=int, help="device index to use for FasterWhisper inference")
parser.add_argument("--batch_size", default=8, type=int, help="the preferred batch size for inference")
parser.add_argument("--compute_type", default="float16", type=str, choices=["float16", "float32", "int8"], help="compute type for computation")
parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs")
parser.add_argument("--output_format", "-f", type=str, default="all", choices=["all", "srt", "vtt", "txt", "tsv", "json", "aud"], help="format of the output file; if not specified, all available formats will be produced")
parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages")
parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]), help="language spoken in the audio, specify None to perform language detection")
# alignment params
parser.add_argument("--align_model", default=None, help="Name of phoneme-level ASR model to do alignment")
parser.add_argument("--interpolate_method", default="nearest", choices=["nearest", "linear", "ignore"], help="For word .srt, method to assign timestamps to non-aligned words, or merge them into neighbouring.")
parser.add_argument("--no_align", action='store_true', help="Do not perform phoneme alignment")
parser.add_argument("--return_char_alignments", action='store_true', help="Return character-level alignments in the output json file")
# vad params
parser.add_argument("--vad_onset", type=float, default=0.500, help="Onset threshold for VAD (see pyannote.audio), reduce this if speech is not being detected")
parser.add_argument("--vad_offset", type=float, default=0.363, help="Offset threshold for VAD (see pyannote.audio), reduce this if speech is not being detected.")
parser.add_argument("--chunk_size", type=int, default=30, help="Chunk size for merging VAD segments. Default is 30, reduce this if the chunk is too long.")
# diarization params
parser.add_argument("--diarize", action="store_true", help="Apply diarization to assign speaker labels to each segment/word")
parser.add_argument("--min_speakers", default=None, type=int, help="Minimum number of speakers to in audio file")
parser.add_argument("--max_speakers", default=None, type=int, help="Maximum number of speakers to in audio file")
parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")
parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero")
parser.add_argument("--patience", type=float, default=1.0, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search")
parser.add_argument("--length_penalty", type=float, default=1.0, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default")
parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations")
parser.add_argument("--suppress_numerals", action="store_true", help="whether to suppress numeric symbols and currency symbols during sampling, since wav2vec2 cannot align them correctly")
parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.")
parser.add_argument("--condition_on_previous_text", type=str2bool, default=False, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop")
parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default")
parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below")
parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed")
parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence")
parser.add_argument("--max_line_width", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
parser.add_argument("--max_line_count", type=optional_int, default=None, help="(not possible with --no_align) the maximum number of lines in a segment")
parser.add_argument("--highlight_words", type=str2bool, default=False, help="(not possible with --no_align) underline each word as it is spoken in srt and vtt")
parser.add_argument("--segment_resolution", type=str, default="sentence", choices=["sentence", "chunk"], help="(not possible with --no_align) the maximum number of characters in a line before breaking the line")
parser.add_argument("--threads", type=optional_int, default=0, help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS")
parser.add_argument("--hf_token", type=str, default=None, help="Hugging Face Access Token to access PyAnnote gated models")
parser.add_argument("--print_progress", type=str2bool, default = False, help = "if True, progress will be printed in transcribe() and align() methods.")
# fmt: on
args = parser.parse_args().__dict__
model_name: str = args.pop("model")
batch_size: int = args.pop("batch_size")
output_dir: str = args.pop("output_dir")
output_format: str = args.pop("output_format")
device: str = args.pop("device")
device_index: int = args.pop("device_index")
compute_type: str = args.pop("compute_type")
# model_flush: bool = args.pop("model_flush")
os.makedirs(output_dir, exist_ok=True)
align_model: str = args.pop("align_model")
interpolate_method: str = args.pop("interpolate_method")
no_align: bool = args.pop("no_align")
task : str = args.pop("task")
if task == "translate":
# translation cannot be aligned
no_align = True
return_char_alignments: bool = args.pop("return_char_alignments")
hf_token: str = args.pop("hf_token")
vad_onset: float = args.pop("vad_onset")
vad_offset: float = args.pop("vad_offset")
chunk_size: int = args.pop("chunk_size")
diarize: bool = args.pop("diarize")
min_speakers: int = args.pop("min_speakers")
max_speakers: int = args.pop("max_speakers")
print_progress: bool = args.pop("print_progress")
if args["language"] is not None:
args["language"] = args["language"].lower()
if args["language"] not in LANGUAGES:
if args["language"] in TO_LANGUAGE_CODE:
args["language"] = TO_LANGUAGE_CODE[args["language"]]
else:
raise ValueError(f"Unsupported language: {args['language']}")
if model_name.endswith(".en") and args["language"] != "en":
if args["language"] is not None:
warnings.warn(
f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead."
)
args["language"] = "en"
align_language = args["language"] if args["language"] is not None else "en" # default to loading english if not specified
temperature = args.pop("temperature")
if (increment := args.pop("temperature_increment_on_fallback")) is not None:
temperature = tuple(np.arange(temperature, 1.0 + 1e-6, increment))
else:
temperature = [temperature]
faster_whisper_threads = 4
if (threads := args.pop("threads")) > 0:
torch.set_num_threads(threads)
faster_whisper_threads = threads
asr_options = {
"beam_size": args.pop("beam_size"),
"patience": args.pop("patience"),
"length_penalty": args.pop("length_penalty"),
"temperatures": temperature,
"compression_ratio_threshold": args.pop("compression_ratio_threshold"),
"log_prob_threshold": args.pop("logprob_threshold"),
"no_speech_threshold": args.pop("no_speech_threshold"),
"condition_on_previous_text": False,
"initial_prompt": args.pop("initial_prompt"),
"suppress_tokens": [int(x) for x in args.pop("suppress_tokens").split(",")],
"suppress_numerals": args.pop("suppress_numerals"),
}
writer = get_writer(output_format, output_dir)
word_options = ["highlight_words", "max_line_count", "max_line_width"]
if no_align:
for option in word_options:
if args[option]:
parser.error(f"--{option} not possible with --no_align")
if args["max_line_count"] and not args["max_line_width"]:
warnings.warn("--max_line_count has no effect without --max_line_width")
writer_args = {arg: args.pop(arg) for arg in word_options}
# Part 1: VAD & ASR Loop
results = []
tmp_results = []
# model = load_model(model_name, device=device, download_root=model_dir) | model = load_model(model_name, device=device, device_index=device_index, compute_type=compute_type, language=args['language'], asr_options=asr_options, vad_options={"vad_onset": vad_onset, "vad_offset": vad_offset}, task=task, threads=faster_whisper_threads) | 2 | 2023-12-01 12:23:19+00:00 | 12k |
JiahuiLei/GART | test_utils/test_func.py | [
{
"identifier": "render_cam_pcl",
"path": "lib_render/gauspl_renderer.py",
"snippet": "def render_cam_pcl(\n xyz,\n frame,\n scale,\n opacity,\n color_feat,\n H,\n W,\n CAM_K,\n verbose=False,\n active_sph_order=0,\n bg_color=[1.0, 1.0, 1.0],\n):\n # ! Camera is at origin, every input is in camera coordinate space\n\n S = torch.zeros_like(frame)\n S[:, 0, 0] = scale[:, 0]\n S[:, 1, 1] = scale[:, 1]\n S[:, 2, 2] = scale[:, 2]\n actual_covariance = frame @ (S**2) @ frame.permute(0, 2, 1)\n\n # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means\n device = xyz.device\n screenspace_points = (\n torch.zeros_like(xyz, dtype=xyz.dtype, requires_grad=True, device=xyz.device) + 0\n )\n # screenspace_points.retain_grad()\n try:\n screenspace_points.retain_grad()\n except:\n pass\n\n # * Specially handle the non-centered camera, using first padding and finally crop\n if abs(H // 2 - CAM_K[1, 2]) > 1.0 or abs(W // 2 - CAM_K[0, 2]) > 1.0:\n center_handling_flag = True\n left_w, right_w = CAM_K[0, 2], W - CAM_K[0, 2]\n top_h, bottom_h = CAM_K[1, 2], H - CAM_K[1, 2]\n new_W = int(2 * max(left_w, right_w))\n new_H = int(2 * max(top_h, bottom_h))\n else:\n center_handling_flag = False\n new_W, new_H = W, H\n\n # Set up rasterization configuration\n FoVx = focal2fov(CAM_K[0, 0], new_W)\n FoVy = focal2fov(CAM_K[1, 1], new_H)\n tanfovx = math.tan(FoVx * 0.5)\n tanfovy = math.tan(FoVy * 0.5)\n\n # TODO: Check dynamic gaussian repos and original gaussian repo, they use projection matrix to handle non-centered K, not using this stupid padding like me\n viewmatrix = torch.from_numpy(getWorld2View2(np.eye(3), np.zeros(3)).transpose(0, 1)).to(device)\n projection_matrix = (\n getProjectionMatrix(znear=0.01, zfar=1.0, fovX=FoVx, fovY=FoVy).transpose(0, 1).to(device)\n )\n full_proj_transform = (viewmatrix.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)\n camera_center = viewmatrix.inverse()[3, :3]\n\n raster_settings = GaussianRasterizationSettings(\n image_height=new_H,\n image_width=new_W,\n tanfovx=tanfovx,\n tanfovy=tanfovy,\n bg=torch.tensor(bg_color, dtype=torch.float32, device=device),\n scale_modifier=1.0,\n viewmatrix=viewmatrix,\n projmatrix=full_proj_transform,\n sh_degree=0, # ! use pre-compute color!\n campos=camera_center,\n prefiltered=False,\n debug=False,\n )\n rasterizer = GaussianRasterizer(raster_settings=raster_settings)\n\n means3D = xyz\n means2D = screenspace_points\n # opacity = torch.ones_like(means3D[:, 0]) * sigma\n\n # If precomputed 3d covariance is provided, use it. If not, then it will be computed from\n # scaling / rotation by the rasterizer.\n scales = None\n rotations = None\n # JH\n cov3D_precomp = strip_lowerdiag(actual_covariance)\n\n # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors\n # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.\n # xyz are in camera frame, so the dir in camera frame is just their normalized direction\n dir_cam = F.normalize(xyz, dim=-1)\n # P_w = Frame @ P_local\n dir_local = torch.einsum(\"nji,nj->ni\", frame, dir_cam) # note the transpose\n dir_local = F.normalize(\n dir_local, dim=-1\n ) # If frame is not SO(3) but Affinity, have to normalize\n N = len(color_feat)\n shs_view = color_feat.reshape(N, -1, 3) # N, Deg, Channels\n sh2rgb = eval_sh(active_sph_order, shs_view.permute(0, 2, 1), dir_local)\n colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)\n # colors_precomp = color_feat\n\n # Rasterize visible Gaussians to image, obtain their radii (on screen).\n\n start_time = time.time()\n ret = rasterizer(\n means3D=means3D.float(),\n means2D=means2D.float(),\n shs=None,\n colors_precomp=colors_precomp.float(),\n opacities=opacity.float(),\n scales=scales,\n rotations=rotations,\n cov3D_precomp=cov3D_precomp.float(),\n )\n if len(ret) == 2:\n rendered_image, radii = ret\n depth, alpha = None, None\n elif len(ret) == 4:\n rendered_image, radii, depth, alpha = ret\n else:\n raise ValueError(f\"Unexpected return value from rasterizer with len={len(ret)}\")\n if verbose:\n print(\n f\"render time: {(time.time() - start_time)*1000:.3f}ms\",\n )\n ret = {\n \"rgb\": rendered_image,\n \"dep\": depth,\n \"alpha\": alpha,\n \"viewspace_points\": screenspace_points,\n \"visibility_filter\": radii > 0,\n \"radii\": radii,\n }\n if center_handling_flag:\n for k in [\"rgb\", \"dep\", \"alpha\"]:\n if ret[k] is None:\n continue\n if left_w > right_w:\n ret[k] = ret[k][:, :, :W]\n else:\n ret[k] = ret[k][:, :, -W:]\n if top_h > bottom_h:\n ret[k] = ret[k][:, :H, :]\n else:\n ret[k] = ret[k][:, -H:, :]\n return ret"
},
{
"identifier": "Dataset",
"path": "lib_data/instant_avatar_people_snapshot.py",
"snippet": "class Dataset(Dataset):\n # from instant avatar\n def __init__(\n self,\n noisy_flag,\n data_root=\"data/people_snapshot_public_instant_avatar_processed\",\n video_name=\"male-3-casual\",\n split=\"train\",\n image_zoom_ratio=0.5,\n use_refined_pose=True, # ! Use Instant Avatar refined pose!!\n ) -> None:\n super().__init__()\n self.noisy_flag = noisy_flag\n self.data_root = data_root\n self.video_name = video_name\n\n self.image_zoom_ratio = image_zoom_ratio\n\n root = osp.join(data_root, video_name)\n\n camera = np.load(osp.join(root, \"cameras.npz\"))\n K = camera[\"intrinsic\"]\n T_wc = np.linalg.inv(camera[\"extrinsic\"])\n assert np.allclose(T_wc, np.eye(4))\n\n height = camera[\"height\"]\n width = camera[\"width\"]\n\n self.downscale = 1.0 / self.image_zoom_ratio\n if self.downscale > 1:\n height = int(height / self.downscale)\n width = int(width / self.downscale)\n K[:2] /= self.downscale\n self.K = K\n\n start = START_END[video_name][split][0]\n end = START_END[video_name][split][1] + 1\n skip = START_END[video_name][split][2]\n self.img_lists = sorted(glob.glob(f\"{root}/images/*.png\"))[start:end:skip]\n self.msk_lists = sorted(glob.glob(f\"{root}/masks/*.npy\"))[start:end:skip]\n\n # ! we take refine false\n if use_refined_pose:\n if noisy_flag:\n pose_fn = osp.join(root, \"poses_noisier\", f\"anim_nerf_{split}.npz\")\n else:\n pose_fn = osp.join(root, \"poses\", f\"anim_nerf_{split}.npz\")\n self.smpl_params = load_smpl_param(pose_fn)\n else:\n self.smpl_params = load_smpl_param(osp.join(root, \"poses.npz\"))\n for k, v in self.smpl_params.items():\n if k != \"betas\":\n self.smpl_params[k] = v[start:end:skip]\n\n # cache the images\n self.img_buffer, self.msk_buffer = [], []\n for idx in tqdm(range(len(self.img_lists))):\n img = cv2.imread(self.img_lists[idx])[..., ::-1]\n msk = np.load(self.msk_lists[idx])\n if self.downscale > 1:\n img = cv2.resize(img, dsize=None, fx=1 / self.downscale, fy=1 / self.downscale)\n msk = cv2.resize(msk, dsize=None, fx=1 / self.downscale, fy=1 / self.downscale)\n\n img = (img[..., :3] / 255).astype(np.float32)\n msk = msk.astype(np.float32)\n # apply mask\n # always white\n bg_color = np.ones_like(img).astype(np.float32)\n img = img * msk[..., None] + (1 - msk[..., None])\n self.img_buffer.append(img)\n self.msk_buffer.append(msk)\n return\n\n def __len__(self):\n return len(self.img_lists)\n\n def __getitem__(self, idx):\n img = self.img_buffer[idx]\n msk = self.msk_buffer[idx]\n\n pose = self.smpl_params[\"body_pose\"][idx].reshape((23, 3))\n pose = np.concatenate([self.smpl_params[\"global_orient\"][idx][None], pose], 0)\n\n ret = {\n \"rgb\": img.astype(np.float32),\n \"mask\": msk,\n \"K\": self.K.copy(),\n \"smpl_beta\": self.smpl_params[\"betas\"][0], # ! use the first beta!\n \"smpl_pose\": pose,\n \"smpl_trans\": self.smpl_params[\"transl\"][idx],\n \"idx\": idx,\n }\n\n meta_info = {\n \"video\": self.video_name,\n }\n viz_id = f\"video{self.video_name}_dataidx{idx}\"\n meta_info[\"viz_id\"] = viz_id\n return ret, meta_info"
},
{
"identifier": "Dataset",
"path": "lib_data/zju_mocap.py",
"snippet": "class Dataset(Dataset):\n # from instant avatar\n def __init__(\n self,\n data_root=\"data/zju-mocap\",\n video_name=\"my_377\",\n split=\"train\",\n image_zoom_ratio=0.5, # 0.5, # instant-nvr use 0.5 for both train and test\n # for cfg input from instant-nvr\n # for zju mocap instant-nvr use test_view: []; training_view: [4]\n training_view=[4], #[0,4,8,12,16,20], #[4], # [4], # 4\n num_eval_frame=-1,\n test_novel_pose=False,\n # my cfg\n bg_color=0.0,\n ) -> None:\n super().__init__()\n self.data_root = data_root\n self.video_name = video_name\n self.image_zoom_ratio = image_zoom_ratio\n\n self.bg_color = bg_color\n\n root = osp.join(data_root, video_name)\n\n # anno_fn = osp.join(root, \"annots_old.npy\") # ! debug\n anno_fn = osp.join(root, \"annots.npy\")\n annots = np.load(anno_fn, allow_pickle=True).item()\n\n # old_anno_fn = osp.join(root, \"annots_old.npy\") # ! debug\n # old_annots = np.load(old_anno_fn, allow_pickle=True).item()\n\n self.cams = annots[\"cams\"]\n\n # ! Check the run.py in instant-nvr evaluation\n\n num_cams = len(self.cams[\"K\"])\n test_view = [i for i in range(num_cams) if i not in training_view]\n if len(test_view) == 0:\n test_view = [0]\n\n if split == \"train\" or split == \"prune\":\n self.view = training_view\n elif split == \"test\":\n self.view = test_view\n elif split == \"val\":\n self.view = test_view[::4]\n # self.view = test_view\n\n i = META[self.video_name][\"begin_ith_frame\"]\n i_intv = META[self.video_name][\"frame_interval\"]\n self.f_intv = i_intv\n ni = META[self.video_name][\"num_train_frame\"]\n if split == \"val\":\n # * Seems the\n self.view = [5]\n self.tick = 0\n ni = 500\n i_intv = 1\n if test_novel_pose:\n i = (\n META[self.video_name][\"begin_ith_frame\"]\n + META[self.video_name][\"num_train_frame\"] * i_intv\n )\n ni = num_eval_frame\n\n self.ims = np.array(\n [\n np.array(ims_data[\"ims\"])[self.view]\n for ims_data in annots[\"ims\"][i : i + ni * i_intv][::i_intv]\n ]\n ).ravel()\n self.cam_inds = np.array(\n [\n np.arange(len(ims_data[\"ims\"]))[self.view]\n for ims_data in annots[\"ims\"][i : i + ni * i_intv][::i_intv]\n ]\n ).ravel()\n self.num_cams = len(self.view)\n\n # Use camera extrinsic to rotate the simple to each camera coordinate frame!\n\n # the R,t is used like this, stored in cam\n # i.e. the T stored in cam is actually p_c = T_cw @ p_w\n # def get_rays(H, W, K, R, T):\n # # calculate the camera origin\n # rays_o = -np.dot(R.T, T).ravel()\n # # calculate the world coodinates of pixels\n # i, j = np.meshgrid(\n # np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing=\"xy\"\n # )\n # xy1 = np.stack([i, j, np.ones_like(i)], axis=2)\n # pixel_camera = np.dot(xy1, np.linalg.inv(K).T)\n # pixel_world = np.dot(pixel_camera - T.ravel(), R)\n # # calculate the ray direction\n # rays_d = pixel_world - rays_o[None, None]\n # rays_d = rays_d / np.linalg.norm(rays_d, axis=2, keepdims=True)\n # rays_o = np.broadcast_to(rays_o, rays_d.shape)\n # return rays_o, rays_d\n\n # ! the cams R is in a very low precision, have use SVD to project back to SO(3)\n for cid in range(num_cams):\n _R = self.cams[\"R\"][cid]\n u, s, vh = np.linalg.svd(_R)\n new_R = u @ vh\n self.cams[\"R\"][cid] = new_R\n\n # this is copied\n smpl_layer = SMPLLayer(osp.join(osp.dirname(__file__), \"../data/smpl-meta/SMPL_NEUTRAL.pkl\"))\n\n # * Load smpl to camera frame\n self.smpl_theta_list, self.smpl_trans_list, smpl_beta_list = [], [], []\n self.meta = []\n for img_fn in self.ims:\n cam_ind = int(img_fn.split(\"/\")[-2])\n frame_idx = int(img_fn.split(\"/\")[-1].split(\".\")[0])\n self.meta.append({\"cam_ind\": cam_ind, \"frame_idx\": frame_idx})\n smpl_fn = osp.join(root, \"smpl_params\", f\"{frame_idx}.npy\")\n smpl_data = np.load(smpl_fn, allow_pickle=True).item()\n T_cw = np.eye(4)\n T_cw[:3, :3], T_cw[:3, 3] = (\n np.array(self.cams[\"R\"][cam_ind]),\n np.array(self.cams[\"T\"][cam_ind]).squeeze(-1) / 1000.0,\n )\n\n smpl_theta = smpl_data[\"poses\"].reshape((24, 3))\n assert np.allclose(smpl_theta[0], 0)\n smpl_rot, smpl_trans = smpl_data[\"Rh\"][0], smpl_data[\"Th\"]\n smpl_R = axangle2mat(\n smpl_rot / (np.linalg.norm(smpl_rot) + 1e-6), np.linalg.norm(smpl_rot)\n )\n\n T_wh = np.eye(4)\n T_wh[:3, :3], T_wh[:3, 3] = smpl_R.copy(), smpl_trans.squeeze(0).copy()\n\n T_ch = T_cw.astype(np.float64) @ T_wh.astype(np.float64)\n\n smpl_global_rot_d, smpl_global_rot_a = mat2axangle(T_ch[:3, :3])\n smpl_global_rot = smpl_global_rot_d * smpl_global_rot_a\n smpl_trans = T_ch[:3, 3] # 3\n smpl_theta[0] = smpl_global_rot\n beta = smpl_data[\"shapes\"][0][:10]\n\n # ! Because SMPL global rot is rot around joint-0, have to correct this in the global translation!!\n _pose = axis_angle_to_matrix(torch.from_numpy(smpl_theta)[None])\n so = smpl_layer(\n torch.from_numpy(beta)[None],\n body_pose=_pose[:, 1:],\n )\n j0 = (so.joints[0, 0]).numpy()\n t_correction = (_pose[0, 0].numpy() - np.eye(3)) @ j0\n smpl_trans = smpl_trans + t_correction\n\n self.smpl_theta_list.append(smpl_theta)\n smpl_beta_list.append(beta)\n self.smpl_trans_list.append(smpl_trans)\n\n # ! debug\n if DEBUG:\n vtx_fn = osp.join(root, \"vertices\", f\"{frame_idx}.npy\")\n nb_vtx_world = np.load(vtx_fn)\n np.savetxt(\"../debug/nb_vtx_world.xyz\", nb_vtx_world, fmt=\"%.6f\")\n nb_vtx_cam = np.dot(nb_vtx_world.copy(), T_cw[:3, :3].T) + T_cw[:3, 3]\n np.savetxt(\"../debug/nb_vtx_cam.xyz\", nb_vtx_cam, fmt=\"%.6f\")\n T_hw = np.linalg.inv(T_wh)\n nb_vtx_human = np.dot(nb_vtx_world.copy(), T_hw[:3, :3].T) + T_hw[:3, 3]\n Rh = smpl_data[\"Rh\"][0]\n R = cv2.Rodrigues(Rh)[0].astype(np.float32)\n Th = smpl_data[\"Th\"][0]\n nb_vtx_human2 = np.dot(nb_vtx_world.copy() - Th, R)\n np.savetxt(\"../debug/nb_vtx_human2.xyz\", nb_vtx_human2, fmt=\"%.6f\")\n np.savetxt(\"../debug/nb_vtx_human.xyz\", nb_vtx_human, fmt=\"%.6f\")\n\n smpl_vtx_human2 = (\n smpl_layer(\n torch.from_numpy(beta)[None],\n body_pose=_pose[:, 1:],\n # !!wired!!\n global_orient=_pose[:, 0],\n transl=torch.from_numpy(smpl_trans)[None],\n )\n .vertices[0]\n .numpy()\n )\n np.savetxt(\"../debug/smpl_vtx_cam2.xyz\", smpl_vtx_human2, fmt=\"%.6f\")\n\n smpl_vtx_human = smpl_layer(torch.from_numpy(beta)[None], body_pose=_pose[:, 1:])\n smpl_vtx_human = smpl_vtx_human.vertices[0].numpy()\n np.savetxt(\"../debug/smpl_vtx_human.xyz\", smpl_vtx_human, fmt=\"%.6f\")\n smpl_vtx_world = np.dot(smpl_vtx_human, T_wh[:3, :3].T) + T_wh[:3, 3]\n np.savetxt(\"../debug/smpl_vtx_world.xyz\", smpl_vtx_world, fmt=\"%.6f\")\n smpl_vtx_cam = np.dot(smpl_vtx_human, T_ch[:3, :3].T) + T_ch[:3, 3]\n np.savetxt(\"../debug/smpl_vtx_cam.xyz\", smpl_vtx_cam, fmt=\"%.6f\")\n\n # the smpl and nb are aligned\n\n img = imageio.imread(osp.join(root, img_fn)).astype(np.float32) / 255.0\n K = np.array(self.cams[\"K\"][cam_ind])\n screen_smpl_vtx = np.dot(smpl_vtx_cam.copy(), K.T)\n screen_smpl_vtx = screen_smpl_vtx[:, :2] / screen_smpl_vtx[:, 2:]\n screen_smpl_vtx = screen_smpl_vtx.astype(np.int32)\n dbg = img.copy()\n for uv in screen_smpl_vtx:\n dbg[uv[1], uv[0], :] = 1\n imageio.imsave(\"../debug/dbg.png\", dbg)\n imageio.imsave(\"../debug/img.png\", img)\n\n K = np.array(self.cams[\"K\"][cam_ind])\n screen_smpl_vtx = np.dot(smpl_vtx_human2.copy(), K.T)\n screen_smpl_vtx = screen_smpl_vtx[:, :2] / screen_smpl_vtx[:, 2:]\n screen_smpl_vtx = screen_smpl_vtx.astype(np.int32)\n dbg = img.copy()\n for uv in screen_smpl_vtx:\n dbg[uv[1], uv[0]] = 1\n imageio.imsave(\"../debug/dbg2.png\", dbg)\n print()\n self.beta = np.array(smpl_beta_list).mean(0)\n\n return\n\n def __len__(self):\n return len(self.ims)\n\n def __getitem__(self, index):\n img_path = os.path.join(self.data_root, self.video_name, self.ims[index])\n img = imageio.imread(img_path).astype(np.float32) / 255.0\n mask_path = os.path.join(\n self.data_root,\n self.video_name,\n self.ims[index].replace(\"images\", \"mask\").replace(\".jpg\", \".png\"),\n )\n msk = imageio.imread(mask_path)\n\n H, W = img.shape[:2]\n msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST)\n cam_ind = self.cam_inds[index]\n K = np.array(self.cams[\"K\"][cam_ind])\n D = np.array(self.cams[\"D\"][cam_ind])\n img = cv2.undistort(img, K, D)\n msk = cv2.undistort(msk, K, D)\n\n H, W = int(img.shape[0] * self.image_zoom_ratio), int(img.shape[1] * self.image_zoom_ratio)\n img = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)\n msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST)\n K[:2] = K[:2] * self.image_zoom_ratio\n\n img[msk == 0] = self.bg_color\n\n ret = {\n \"rgb\": img.astype(np.float32),\n \"mask\": msk.astype(np.bool).astype(np.float32),\n \"K\": K.copy().astype(np.float32),\n \"smpl_beta\": self.beta.astype(np.float32),\n \"smpl_pose\": self.smpl_theta_list[index].astype(np.float32),\n \"smpl_trans\": self.smpl_trans_list[index].astype(np.float32),\n \"idx\": index,\n }\n\n assert cam_ind == self.meta[index][\"cam_ind\"]\n\n meta_info = {\n \"video\": self.video_name,\n \"cam_ind\": cam_ind,\n \"frame_idx\": self.meta[index][\"frame_idx\"],\n }\n viz_id = f\"video{self.video_name}_dataidx{index}\"\n meta_info[\"viz_id\"] = viz_id\n return ret, meta_info"
},
{
"identifier": "get_batch_sampler",
"path": "lib_data/zju_mocap.py",
"snippet": "def get_batch_sampler(dataset, frame_sampler_interval=6):\n # instant-nvr use 6\n sampler = FrameSampler(dataset, frame_sampler_interval=frame_sampler_interval)\n batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, False)\n return batch_sampler"
},
{
"identifier": "Dataset",
"path": "lib_data/instant_avatar_wild.py",
"snippet": "class Dataset(Dataset):\n # from instant avatar\n def __init__(\n self,\n data_root=\"data/people_snapshot_public_instant_avatar_processed\",\n video_name=\"male-3-casual\",\n split=\"train\",\n image_zoom_ratio=1.0,\n start_end_skip=None,\n ) -> None:\n super().__init__()\n self.data_root = data_root\n self.video_name = video_name\n\n if start_end_skip is not None:\n start, end, skip = start_end_skip\n else:\n # raise NotImplementedError(\"Must specify, check the end+1\")\n if split == \"train\":\n start, end, skip = 0, 41+1, 1\n elif split == \"val\":\n start, end, skip = 41, 42+1, 1\n elif split == \"test\":\n start, end, skip = 42, 51+1, 1\n\n self.image_zoom_ratio = image_zoom_ratio\n\n root = osp.join(data_root, video_name)\n\n camera = np.load(osp.join(root, \"cameras.npz\"))\n K = camera[\"intrinsic\"]\n T_wc = np.linalg.inv(camera[\"extrinsic\"])\n assert np.allclose(T_wc, np.eye(4))\n\n height = camera[\"height\"]\n width = camera[\"width\"]\n\n self.downscale = 1.0 / self.image_zoom_ratio\n if self.downscale > 1:\n height = int(height / self.downscale)\n width = int(width / self.downscale)\n K[:2] /= self.downscale\n self.K = K\n\n self.img_lists = sorted(glob.glob(f\"{root}/images/*.png\"))[start:end:skip]\n self.msk_lists = sorted(glob.glob(f\"{root}/masks/*.png\"))[start:end:skip]\n\n pose_fn = osp.join(root, \"poses_optimized.npz\")\n smpl_params = load_smpl_param(pose_fn)\n smpl_params[\"body_pose\"] = smpl_params[\"body_pose\"][start:end:skip]\n smpl_params[\"global_orient\"] = smpl_params[\"global_orient\"][start:end:skip]\n smpl_params[\"transl\"] = smpl_params[\"transl\"][start:end:skip]\n self.smpl_params = smpl_params\n\n # # ! debug\n # pose_fn = osp.join(root, \"poses\",\"train.npz\")\n # smpl_params = load_smpl_param(pose_fn)\n # smpl_params[\"body_pose\"] = smpl_params[\"body_pose\"][start:end:skip]\n # smpl_params[\"global_orient\"] = smpl_params[\"global_orient\"][start:end:skip]\n # smpl_params[\"transl\"] = smpl_params[\"transl\"][start:end:skip]\n # self.smpl_params = smpl_params\n\n # cache the images\n self.img_buffer, self.msk_buffer = [], []\n for idx in tqdm(range(len(self.img_lists))):\n img = cv2.imread(self.img_lists[idx])[..., ::-1]\n # msk = np.load(self.msk_lists[idx])\n msk = cv2.imread(self.msk_lists[idx], cv2.IMREAD_GRAYSCALE)\n if self.downscale > 1:\n img = cv2.resize(\n img, dsize=None, fx=1 / self.downscale, fy=1 / self.downscale\n )\n msk = cv2.resize(\n msk, dsize=None, fx=1 / self.downscale, fy=1 / self.downscale\n )\n\n img = (img[..., :3] / 255).astype(np.float32)\n msk = msk.astype(np.float32) / 255.0\n # apply mask\n # always white\n bg_color = np.ones_like(img).astype(np.float32)\n img = img * msk[..., None] + (1 - msk[..., None])\n self.img_buffer.append(img)\n self.msk_buffer.append(msk)\n return\n\n def __len__(self):\n return len(self.img_lists)\n\n def __getitem__(self, idx):\n img = self.img_buffer[idx]\n msk = self.msk_buffer[idx]\n\n pose = self.smpl_params[\"body_pose\"][idx].reshape((23, 3))\n pose = np.concatenate([self.smpl_params[\"global_orient\"][idx][None], pose], 0)\n\n ret = {\n \"rgb\": img.astype(np.float32),\n \"mask\": msk,\n \"K\": self.K.copy(),\n \"smpl_beta\": self.smpl_params[\"betas\"][0], # ! use the first beta!\n \"smpl_pose\": pose,\n \"smpl_trans\": self.smpl_params[\"transl\"][idx],\n \"idx\": idx,\n }\n\n meta_info = {\n \"video\": self.video_name,\n }\n viz_id = f\"video{self.video_name}_dataidx{idx}\"\n meta_info[\"viz_id\"] = viz_id\n return ret, meta_info"
},
{
"identifier": "Dataset",
"path": "lib_data/dog_demo.py",
"snippet": "class Dataset(Dataset):\n def __init__(\n self, data_root=\"data/dog_data\", video_name=\"hound\", test=False\n ) -> None:\n super().__init__()\n self.data_root = data_root\n self.video_name = video_name\n root = osp.join(data_root, video_name)\n\n image_dir = osp.join(root, \"images\")\n pose_dir = osp.join(root, \"pred\")\n\n if test:\n id_list = get_test_frame_id_list(video_name)\n else:\n id_list = get_frame_id_list(video_name)\n\n self.rgb_list, self.mask_list = [], []\n betas_list = []\n self.pose_list, self.trans_list = [], []\n self.K_list = []\n\n for i in tqdm(id_list):\n img_path = osp.join(image_dir, f\"{i:04d}.png\")\n msk_path = osp.join(image_dir, f\"{i:04d}.npy\")\n pose_path = osp.join(pose_dir, f\"{i:04d}.npz\")\n if not osp.exists(msk_path):\n continue\n\n rgb = imageio.imread(img_path)\n assert rgb.shape[0] == 512 and rgb.shape[1] == 512\n mask = np.load(msk_path, allow_pickle=True).item()\n mask = masktool.decode(mask)\n\n pred = dict(np.load(pose_path, allow_pickle=True))\n betas = pred[\"pred_betas\"]\n betas_limbs = pred[\"pred_betas_limbs\"]\n\n pose = pred[\"pred_pose\"]\n pose = matrix_to_axis_angle(torch.from_numpy(pose)).numpy()[0].reshape(-1)\n trans = pred[\"pred_trans\"][0]\n focal = pred[\"pred_focal\"] * 2 # for 512 size image\n\n K = np.eye(3)\n K[0, 0], K[1, 1] = focal, focal\n K[0, 2], K[1, 2] = 256, 256\n\n rgb = (rgb[..., :3] / 255).astype(np.float32)\n mask = mask.astype(np.float32)\n # apply mask\n rgb = rgb * mask[..., None] + (1 - mask[..., None])\n\n self.rgb_list.append(rgb)\n self.mask_list.append(mask)\n betas_list.append(betas)\n self.pose_list.append(np.concatenate([pose, betas_limbs[0]], 0))\n self.trans_list.append(trans)\n self.K_list.append(K)\n # average the beta\n self.betas = np.concatenate(betas_list, 0).mean(0)\n print(f\"Loaded {len(self.rgb_list)} frames from {video_name}\")\n return\n\n def __len__(self):\n return len(self.rgb_list)\n\n def __getitem__(self, idx):\n img = self.rgb_list[idx]\n msk = self.mask_list[idx]\n pose = self.pose_list[idx]\n\n ret = {\n \"rgb\": img.astype(np.float32),\n \"mask\": msk,\n \"K\": self.K_list[idx].copy(),\n \"smpl_beta\": self.betas,\n \"smpl_pose\": pose,\n \"smpl_trans\": self.trans_list[idx],\n \"idx\": idx,\n }\n\n meta_info = {\n \"video\": self.video_name,\n }\n viz_id = f\"video{self.video_name}_dataidx{idx}\"\n meta_info[\"viz_id\"] = viz_id\n return ret, meta_info"
}
] | import sys, os, os.path as osp
import torch
import numpy as np
import cv2, glob
import pandas as pd
import imageio
import logging
from eval_utils_instant_avatar import Evaluator as EvalAvatar
from eval_utils_instant_nvr import Evaluator as EvalNVR
from eval_utils_instant_avatar_brightness import Evaluator as EvalAvatarBrightness
from typing import Union
from lib_render.gauspl_renderer import render_cam_pcl
from tqdm import tqdm
from lib_data.instant_avatar_people_snapshot import Dataset as InstantAvatarDataset
from lib_data.zju_mocap import Dataset as ZJUDataset, get_batch_sampler
from lib_data.instant_avatar_wild import Dataset as InstantAvatarWildDataset
from lib_data.dog_demo import Dataset as DogDemoDataset
from matplotlib import pyplot as plt | 10,517 | tto_decay_factor=0.5,
tto_evaluator=None,
pose_base_lr=3e-3,
pose_rest_lr=3e-3,
trans_lr=3e-3,
device=torch.device("cuda:0"),
):
model.eval()
if tto_flag:
test_save_dir_tto = osp.join(log_dir, f"{save_name}_tto")
os.makedirs(test_save_dir_tto, exist_ok=True)
else:
test_save_dir = osp.join(log_dir, save_name)
os.makedirs(test_save_dir, exist_ok=True)
if dataset_mode == "zju":
# ! follow instant-nvr evaluation
iter_test_dataset = torch.utils.data.DataLoader(
test_dataset,
batch_sampler=get_batch_sampler(test_dataset, frame_sampler_interval=6),
num_workers=0,
)
else:
iter_test_dataset = test_dataset
logging.info(
f"Saving images [TTO={tto_flag}] [N={len(iter_test_dataset)}]..."
)
for batch_idx, batch in tqdm(enumerate(iter_test_dataset)):
# get data
data, meta = batch
if dataset_mode == "zju":
for k in data.keys():
data[k] = data[k].squeeze(0)
rgb_gt = torch.as_tensor(data["rgb"])[None].float().to(device)
mask_gt = torch.as_tensor(data["mask"])[None].float().to(device)
H, W = rgb_gt.shape[1:3]
K = torch.as_tensor(data["K"]).float().to(device)
pose = torch.as_tensor(data["smpl_pose"]).float().to(device)[None]
trans = torch.as_tensor(data["smpl_trans"]).float().to(device)[None]
if dataset_mode == "zju":
fn = f"frame{int(meta['frame_idx']):04d}_view{int(meta['cam_ind']):04d}.png"
else:
fn = f"{batch_idx}.png"
if tto_flag:
# change the pose from the dataset to fit the test view
pose_b, pose_r = pose[:, :1], pose[:, 1:]
model.eval()
# * for delta list
try:
list_flag = model.add_bones.mode in ["delta-list"]
except:
list_flag = False
if list_flag:
As = model.add_bones(t=batch_idx) # B,K,4,4, the nearest pose
else:
As = None # place holder
new_pose_b, new_pose_r, new_trans, As = solver.testtime_pose_optimization(
data_pack=[
rgb_gt,
mask_gt,
K[None],
pose_b,
pose_r,
trans,
None,
],
model=model,
evaluator=tto_evaluator,
pose_base_lr=pose_base_lr,
pose_rest_lr=pose_rest_lr,
trans_lr=trans_lr,
steps=tto_step,
decay_steps=tto_decay,
decay_factor=tto_decay_factor,
As=As,
)
pose = torch.cat([new_pose_b, new_pose_r], dim=1).detach()
trans = new_trans.detach()
save_fn = osp.join(test_save_dir_tto, fn)
_save_render_image_from_pose(
model,
pose,
trans,
H,
W,
K,
bg,
rgb_gt,
save_fn,
time_index=batch_idx,
As=As,
)
else:
save_fn = osp.join(test_save_dir, fn)
_save_render_image_from_pose(
model, pose, trans, H, W, K, bg, rgb_gt, save_fn, time_index=batch_idx
)
return
@torch.no_grad()
def _save_render_image_from_pose(
model, pose, trans, H, W, K, bg, rgb_gt, save_fn, time_index=None, As=None
):
act_sph_order = model.max_sph_order
device = pose.device
# TODO: handle novel time!, not does not pass in means t=None; Can always use TTO to directly find As!
additional_dict = {"t": time_index}
if As is not None:
additional_dict["As"] = As
mu, fr, sc, op, sph, _ = model(
pose, trans, additional_dict=additional_dict, active_sph_order=act_sph_order
) # TODO: directly input optimized As!
|
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
sys.path.append(osp.dirname(osp.abspath(__file__)))
def get_evaluator(mode, device):
if mode == "avatar":
evaluator = EvalAvatar()
elif mode == "nvr":
evaluator = EvalNVR()
elif mode == "avatar_brightness":
evaluator = EvalAvatarBrightness()
else:
raise NotImplementedError()
evaluator = evaluator.to(device)
evaluator.eval()
return evaluator
class TrainingSeqWrapper:
def __init__(self, seq) -> None:
self.seq = seq
def __len__(self):
return self.seq.total_t
def __getitem__(self, idx):
data = {}
data["rgb"] = self.seq.rgb_list[idx]
data["mask"] = self.seq.mask_list[idx]
data["K"] = self.seq.K_list[idx]
data["smpl_pose"] = torch.cat(
[self.seq.pose_base_list[idx], self.seq.pose_rest_list[idx]], dim=0
)
data["smpl_trans"] = self.seq.global_trans_list[idx]
return data, {}
def test(
solver,
seq_name: str,
tto_flag=True,
tto_step=300,
tto_decay=60,
tto_decay_factor=0.5,
pose_base_lr=3e-3,
pose_rest_lr=3e-3,
trans_lr=3e-3,
dataset_mode="people_snapshot",
training_optimized_seq=None,
):
device = solver.device
model = solver.load_saved_model()
assert dataset_mode in [
"people_snapshot",
"zju",
"instant_avatar_wild",
"dog_demo",
], f"Unknown dataset mode {dataset_mode}"
if dataset_mode == "people_snapshot":
eval_mode = "avatar"
bg = [1.0, 1.0, 1.0]
test_dataset = InstantAvatarDataset(
noisy_flag=False,
data_root="./data/people_snapshot/",
video_name=seq_name,
split="test",
image_zoom_ratio=0.5,
)
elif dataset_mode == "zju":
eval_mode = "nvr"
test_dataset = ZJUDataset(
data_root="./data/zju_mocap",
video_name=seq_name,
split="test",
image_zoom_ratio=0.5,
)
bg = [0.0, 0.0, 0.0] # zju use black background
elif dataset_mode == "instant_avatar_wild":
eval_mode = "avatar"
test_dataset = InstantAvatarWildDataset(
data_root="./data/insav_wild",
video_name=seq_name,
split="test",
image_zoom_ratio=1.0,
# ! warning, here follow the `ubc_hard.yaml` in InstAVT setting, use slicing
start_end_skip=[2, 1000000000, 4],
)
bg = [1.0, 1.0, 1.0]
test_len = len(test_dataset)
assert (training_optimized_seq.total_t == test_len) or (
training_optimized_seq.total_t == 1 + test_len
), "Now UBC can only support the same length of training and testing or + 1"
test_dataset.smpl_params["body_pose"] = (
training_optimized_seq.pose_rest_list.reshape(-1, 69)[:test_len]
.detach()
.cpu()
.numpy()
)
test_dataset.smpl_params["global_orient"] = (
training_optimized_seq.pose_base_list.reshape(-1, 3)[:test_len]
.detach()
.cpu()
.numpy()
)
test_dataset.smpl_params["transl"] = (
training_optimized_seq.global_trans_list.reshape(-1, 3)[:test_len]
.detach()
.cpu()
.numpy()
)
elif dataset_mode == "dog_demo":
eval_mode = "avatar_brightness"
bg = [1.0, 1.0, 1.0]
test_dataset = DogDemoDataset(
data_root="./data/dog_data_official/", video_name=seq_name, test=True
)
else:
raise NotImplementedError()
evaluator = get_evaluator(eval_mode, device)
_save_eval_maps(
solver.log_dir,
"test",
model,
solver,
test_dataset,
dataset_mode=dataset_mode,
device=device,
bg=bg,
tto_flag=tto_flag,
tto_step=tto_step,
tto_decay=tto_decay,
tto_decay_factor=tto_decay_factor,
tto_evaluator=evaluator,
pose_base_lr=pose_base_lr,
pose_rest_lr=pose_rest_lr,
trans_lr=trans_lr,
)
if tto_flag:
_evaluate_dir(evaluator, solver.log_dir, "test_tto")
else:
_evaluate_dir(evaluator, solver.log_dir, "test")
return
def _save_eval_maps(
log_dir,
save_name,
model,
solver,
test_dataset,
dataset_mode="people_snapshot",
bg=[1.0, 1.0, 1.0],
# tto
tto_flag=False,
tto_step=300,
tto_decay=60,
tto_decay_factor=0.5,
tto_evaluator=None,
pose_base_lr=3e-3,
pose_rest_lr=3e-3,
trans_lr=3e-3,
device=torch.device("cuda:0"),
):
model.eval()
if tto_flag:
test_save_dir_tto = osp.join(log_dir, f"{save_name}_tto")
os.makedirs(test_save_dir_tto, exist_ok=True)
else:
test_save_dir = osp.join(log_dir, save_name)
os.makedirs(test_save_dir, exist_ok=True)
if dataset_mode == "zju":
# ! follow instant-nvr evaluation
iter_test_dataset = torch.utils.data.DataLoader(
test_dataset,
batch_sampler=get_batch_sampler(test_dataset, frame_sampler_interval=6),
num_workers=0,
)
else:
iter_test_dataset = test_dataset
logging.info(
f"Saving images [TTO={tto_flag}] [N={len(iter_test_dataset)}]..."
)
for batch_idx, batch in tqdm(enumerate(iter_test_dataset)):
# get data
data, meta = batch
if dataset_mode == "zju":
for k in data.keys():
data[k] = data[k].squeeze(0)
rgb_gt = torch.as_tensor(data["rgb"])[None].float().to(device)
mask_gt = torch.as_tensor(data["mask"])[None].float().to(device)
H, W = rgb_gt.shape[1:3]
K = torch.as_tensor(data["K"]).float().to(device)
pose = torch.as_tensor(data["smpl_pose"]).float().to(device)[None]
trans = torch.as_tensor(data["smpl_trans"]).float().to(device)[None]
if dataset_mode == "zju":
fn = f"frame{int(meta['frame_idx']):04d}_view{int(meta['cam_ind']):04d}.png"
else:
fn = f"{batch_idx}.png"
if tto_flag:
# change the pose from the dataset to fit the test view
pose_b, pose_r = pose[:, :1], pose[:, 1:]
model.eval()
# * for delta list
try:
list_flag = model.add_bones.mode in ["delta-list"]
except:
list_flag = False
if list_flag:
As = model.add_bones(t=batch_idx) # B,K,4,4, the nearest pose
else:
As = None # place holder
new_pose_b, new_pose_r, new_trans, As = solver.testtime_pose_optimization(
data_pack=[
rgb_gt,
mask_gt,
K[None],
pose_b,
pose_r,
trans,
None,
],
model=model,
evaluator=tto_evaluator,
pose_base_lr=pose_base_lr,
pose_rest_lr=pose_rest_lr,
trans_lr=trans_lr,
steps=tto_step,
decay_steps=tto_decay,
decay_factor=tto_decay_factor,
As=As,
)
pose = torch.cat([new_pose_b, new_pose_r], dim=1).detach()
trans = new_trans.detach()
save_fn = osp.join(test_save_dir_tto, fn)
_save_render_image_from_pose(
model,
pose,
trans,
H,
W,
K,
bg,
rgb_gt,
save_fn,
time_index=batch_idx,
As=As,
)
else:
save_fn = osp.join(test_save_dir, fn)
_save_render_image_from_pose(
model, pose, trans, H, W, K, bg, rgb_gt, save_fn, time_index=batch_idx
)
return
@torch.no_grad()
def _save_render_image_from_pose(
model, pose, trans, H, W, K, bg, rgb_gt, save_fn, time_index=None, As=None
):
act_sph_order = model.max_sph_order
device = pose.device
# TODO: handle novel time!, not does not pass in means t=None; Can always use TTO to directly find As!
additional_dict = {"t": time_index}
if As is not None:
additional_dict["As"] = As
mu, fr, sc, op, sph, _ = model(
pose, trans, additional_dict=additional_dict, active_sph_order=act_sph_order
) # TODO: directly input optimized As! | render_pkg = render_cam_pcl( | 0 | 2023-11-27 17:30:04+00:00 | 12k |
GongyeLiu/StyleCrafter | scripts/evaluation/style_inference.py | [
{
"identifier": "DDIMSampler",
"path": "lvdm/models/samplers/ddim.py",
"snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.counter = 0\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n self.use_scale = self.model.use_scale\n\n if self.use_scale:\n self.register_buffer('scale_arr', to_torch(self.model.scale_arr))\n ddim_scale_arr = self.scale_arr.cpu()[self.ddim_timesteps]\n self.register_buffer('ddim_scale_arr', ddim_scale_arr)\n ddim_scale_arr = np.asarray([self.scale_arr.cpu()[0]] + self.scale_arr.cpu()[self.ddim_timesteps[:-1]].tolist())\n self.register_buffer('ddim_scale_arr_prev', ddim_scale_arr)\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n schedule_verbose=False,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n \n # check condition bs\n if conditioning is not None:\n if isinstance(conditioning, dict):\n try:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n except:\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\n \n # make shape\n if len(shape) == 3:\n C, H, W = shape\n size = (batch_size, C, H, W)\n elif len(shape) == 4:\n C, T, H, W = shape\n size = (batch_size, C, T, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n \n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n verbose=verbose,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,\n cond_tau=1., target_size=None, start_timesteps=None,\n **kwargs):\n device = self.model.betas.device \n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n \n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n \n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n if verbose:\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n else:\n iterator = time_range\n\n init_x0 = False\n clean_cond = kwargs.pop(\"clean_cond\", False)\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n if start_timesteps is not None:\n assert x0 is not None\n if step > start_timesteps*time_range[0]:\n continue\n elif not init_x0:\n img = self.model.q_sample(x0, ts) \n init_x0 = True\n\n # use mask to blend noised original latent (img_orig) & new sampled latent (img)\n if mask is not None:\n assert x0 is not None\n if clean_cond:\n img_orig = x0\n else:\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\n \n index_clip = int((1 - cond_tau) * total_steps)\n if index <= index_clip and target_size is not None:\n target_size_ = [target_size[0], target_size[1]//8, target_size[2]//8]\n img = torch.nn.functional.interpolate(\n img,\n size=target_size_,\n mode=\"nearest\",\n )\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n x0=x0,\n **kwargs)\n \n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n\n uncond_kwargs = kwargs.copy()\n uncond_kwargs['append_to_context'] = None\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n # with unconditional condition\n if isinstance(c, torch.Tensor):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **uncond_kwargs)\n elif isinstance(c, dict):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **uncond_kwargs)\n else:\n raise NotImplementedError\n # text cfg\n if uc_type is None:\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n else:\n if uc_type == 'cfg_original':\n e_t = e_t + unconditional_guidance_scale * (e_t - e_t_uncond)\n elif uc_type == 'cfg_ours':\n e_t = e_t + unconditional_guidance_scale * (e_t_uncond - e_t)\n else:\n raise NotImplementedError\n # temporal guidance\n if conditional_guidance_scale_temporal is not None:\n e_t_temporal = self.model.apply_model(x, t, c, **kwargs)\n e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)\n e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n if self.use_scale:\n scale_arr = self.model.scale_arr if use_original_steps else self.ddim_scale_arr\n scale_t = torch.full(size, scale_arr[index], device=device)\n scale_arr_prev = self.model.scale_arr_prev if use_original_steps else self.ddim_scale_arr_prev\n scale_t_prev = torch.full(size, scale_arr_prev[index], device=device)\n pred_x0 /= scale_t \n x_prev = a_prev.sqrt() * scale_t_prev * pred_x0 + dir_xt + noise\n else:\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0\n\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n\n def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))\n\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec"
},
{
"identifier": "DDIMStyleSampler",
"path": "lvdm/models/samplers/ddim.py",
"snippet": "class DDIMStyleSampler(DDIMSampler):\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_guidance_scale_style=None, unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n uncond_kwargs = kwargs.copy()\n uncond_kwargs['append_to_context'] = None\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n # with unconditional condition\n if isinstance(c, torch.Tensor):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **uncond_kwargs)\n if unconditional_guidance_scale_style is not None:\n e_t_uncond_style = self.model.apply_model(x, t, c, **uncond_kwargs)\n elif isinstance(c, dict):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **uncond_kwargs)\n if unconditional_guidance_scale_style is not None:\n e_t_uncond_style = self.model.apply_model(x, t, c, **uncond_kwargs)\n else:\n raise NotImplementedError\n \n if unconditional_guidance_scale_style is None:\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n else:\n e_t = e_t + unconditional_guidance_scale_style * (e_t - e_t_uncond_style) + \\\n unconditional_guidance_scale * (e_t_uncond_style - e_t_uncond)\n \n # temporal guidance\n if conditional_guidance_scale_temporal is not None:\n e_t_temporal = self.model.apply_model(x, t, c, **kwargs)\n e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)\n e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # print(f't={t}, pred_x0, min={torch.min(pred_x0)}, max={torch.max(pred_x0)}',file=f)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n # # norm pred_x0\n # p=2\n # s=()\n # pred_x0 = pred_x0 - torch.max(torch.abs(pred_x0))\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0"
},
{
"identifier": "instantiate_from_config",
"path": "utils/utils.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))"
},
{
"identifier": "tensor_to_mp4",
"path": "utils/save_video.py",
"snippet": "def tensor_to_mp4(video, savepath, fps, rescale=True, nrow=None):\n \"\"\"\n video: torch.Tensor, b,c,t,h,w, 0-1\n if -1~1, enable rescale=True\n \"\"\"\n n = video.shape[0]\n video = video.permute(2, 0, 1, 3, 4) # t,n,c,h,w\n nrow = int(np.sqrt(n)) if nrow is None else nrow\n frame_grids = [torchvision.utils.make_grid(framesheet, nrow=nrow) for framesheet in video] # [3, grid_h, grid_w]\n grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [T, 3, grid_h, grid_w]\n grid = torch.clamp(grid.float(), -1., 1.)\n if rescale:\n grid = (grid + 1.0) / 2.0\n grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1) # [T, 3, grid_h, grid_w] -> [T, grid_h, grid_w, 3]\n #print(f'Save video to {savepath}')\n torchvision.io.write_video(savepath, grid, fps=fps, video_codec='h264', options={'crf': '10'})"
}
] | import argparse, os, sys, glob
import datetime, time
import numpy as np
import torch
import torchvision
import json
from omegaconf import OmegaConf
from tqdm import tqdm
from einops import rearrange, repeat
from collections import OrderedDict
from torch.utils.data import DataLoader
from pytorch_lightning import seed_everything
from decord import VideoReader, cpu
from PIL import Image
from torchvision.transforms import transforms
from torchvision.utils import make_grid
from lvdm.models.samplers.ddim import DDIMSampler, DDIMStyleSampler
from utils.utils import instantiate_from_config
from utils.save_video import tensor_to_mp4 | 7,613 | if isinstance(style_path, list) and not DISABLE_MULTI_REF:
style_imgs = []
for path in style_path:
style_img = Image.open(os.path.join(data_dir, path)).convert('RGB')
style_img_tensor = style_transforms(style_img)
style_imgs.append(style_img_tensor)
style_img_tensor = torch.stack(style_imgs, dim=0)
elif isinstance(style_path, list) and DISABLE_MULTI_REF:
rand_idx = np.random.randint(0, len(style_path))
style_img = Image.open(os.path.join(data_dir, style_path[rand_idx])).convert('RGB')
style_img_tensor = style_transforms(style_img)
print(f"Warning: multiple style images exist. The one {style_path[rand_idx]} is used.")
else:
style_img = Image.open(os.path.join(data_dir, style_path)).convert('RGB')
style_img_tensor = style_transforms(style_img)
else:
raise ValueError("Error: style image path is None!")
data_list.append({
'prompt': prompt,
'style': style_img_tensor
})
return data_list
def save_results(prompt, samples, filename, sample_dir, prompt_dir, fps=10, out_type='video'):
## save prompt
prompt = prompt[0] if isinstance(prompt, list) else prompt
path = os.path.join(prompt_dir, "%s.txt"%filename)
with open(path, 'w') as f:
f.write(f'{prompt}')
f.close()
## save video
if out_type == 'image':
n = samples.shape[0]
output = make_grid(samples, nrow=n, normalize=True, range=(-1, 1))
output_img = Image.fromarray(output.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy())
output_img.save(os.path.join(sample_dir, "%s.jpg"%filename))
elif out_type == 'video':
## save video
# b,c,t,h,w
video = samples.detach().cpu()
video = torch.clamp(video.float(), -1., 1.)
n = video.shape[0]
video = video.permute(2, 0, 1, 3, 4) # t,n,c,h,w
frame_grids = [torchvision.utils.make_grid(framesheet, nrow=int(n)) for framesheet in video] #[3, 1*h, n*w]
grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [t, 3, n*h, w]
grid = (grid + 1.0) / 2.0
grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1)
path = os.path.join(sample_dir, "%s.mp4"%filename)
torchvision.io.write_video(path, grid, fps=fps, video_codec='h264', options={'crf': '10'})
else:
raise ValueError("Error: output type should be image or video!")
def style_guided_synthesis(model, prompts, style, noise_shape, n_samples=1, ddim_steps=50, ddim_eta=1., \
unconditional_guidance_scale=1.0, unconditional_guidance_scale_style=None, **kwargs):
ddim_sampler = DDIMSampler(model) if unconditional_guidance_scale_style is None else DDIMStyleSampler(model)
batch_size = noise_shape[0]
## get condition embeddings (support single prompt only)
if isinstance(prompts, str):
prompts = [prompts]
cond = model.get_learned_conditioning(prompts)
# cond = repeat(cond, 'b n c -> (b f) n c', f=16)
if unconditional_guidance_scale != 1.0:
prompts = batch_size * [""]
uc = model.get_learned_conditioning(prompts)
# uc = repeat(uc, 'b n c -> (b f) n c', f=16)
else:
uc = None
if len(style.shape) == 4:
style_cond = model.get_batch_style(style)
append_to_context = model.adapter(style_cond)
else:
bs, n, c, h, w = style.shape
style = rearrange(style, "b n c h w -> (b n) c h w")
style_cond = model.get_batch_style(style)
style_cond = rearrange(style_cond, "(b n) l c -> b (n l ) c", b=bs)
append_to_context = model.adapter(style_cond)
# append_to_context = repeat(append_to_context, 'b n c -> (b f) n c', f=16)
if hasattr(model.adapter, "scale_predictor"):
scale_scalar = model.adapter.scale_predictor(torch.concat([append_to_context, cond], dim=1))
else:
scale_scalar = None
batch_variants = []
for _ in range(n_samples):
if ddim_sampler is not None:
samples, _ = ddim_sampler.sample(S=ddim_steps,
conditioning=cond,
batch_size=noise_shape[0],
shape=noise_shape[1:],
verbose=False,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_guidance_scale_style=unconditional_guidance_scale_style,
unconditional_conditioning=uc,
eta=ddim_eta,
temporal_length=noise_shape[2],
append_to_context=append_to_context,
scale_scalar=scale_scalar,
**kwargs
)
## reconstruct from latent to pixel space
batch_images = model.decode_first_stage(samples)
batch_variants.append(batch_images)
## variants, batch, c, t, h, w
batch_variants = torch.stack(batch_variants)
return batch_variants.permute(1, 0, 2, 3, 4, 5)
def run_inference(args, gpu_num, gpu_no):
## model config
config = OmegaConf.load(args.base)
model_config = config.pop("model", OmegaConf.create())
model_config['params']['adapter_config']['params']['scale'] = args.style_weight
print(f"Set adapter scale to {args.style_weight:.2f}")
|
## note: decord should be imported after torch
sys.path.insert(1, os.path.join(sys.path[0], '..', '..'))
def save_img(img, path, is_tensor=True):
if is_tensor:
img = img.permute(1, 2, 0).cpu().numpy()
img = (img * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
img = Image.fromarray(img)
img.save(path)
def get_filelist(data_dir, ext='*'):
file_list = glob.glob(os.path.join(data_dir, '*.%s'%ext))
file_list.sort()
return file_list
def load_model_checkpoint(model, ckpt):
state_dict = torch.load(ckpt, map_location="cpu")
if "state_dict" in list(state_dict.keys()):
state_dict = state_dict["state_dict"]
else:
# deepspeed
state_dict = OrderedDict()
for key in state_dict['module'].keys():
state_dict[key[16:]]=state_dict['module'][key]
model.load_state_dict(state_dict, strict=False)
print('>>> model checkpoint loaded.')
return model
def load_data_from_json(data_dir, filename=None, DISABLE_MULTI_REF=False):
# load data from json file
if filename is not None:
json_file = os.path.join(data_dir, filename)
with open(json_file, 'r') as f:
data = json.load(f)
else:
json_file = get_filelist(data_dir, 'json')
assert len(json_file) > 0, "Error: found NO prompt file!"
default_idx = 0
default_idx = min(default_idx, len(json_file)-1)
if len(json_file) > 1:
print(f"Warning: multiple prompt files exist. The one {os.path.split(json_file[default_idx])[1]} is used.")
## only use the first one (sorted by name) if multiple exist
with open(json_file[default_idx], 'r') as f:
data = json.load(f)
n_samples = len(data)
data_list = []
style_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize(512),
torchvision.transforms.CenterCrop(512),
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: x * 2. - 1.),
])
for idx in range(n_samples):
prompt = data[idx]['prompt']
# load style image
if data[idx]['style_path'] is not None:
style_path = data[idx]['style_path']
if isinstance(style_path, list) and not DISABLE_MULTI_REF:
style_imgs = []
for path in style_path:
style_img = Image.open(os.path.join(data_dir, path)).convert('RGB')
style_img_tensor = style_transforms(style_img)
style_imgs.append(style_img_tensor)
style_img_tensor = torch.stack(style_imgs, dim=0)
elif isinstance(style_path, list) and DISABLE_MULTI_REF:
rand_idx = np.random.randint(0, len(style_path))
style_img = Image.open(os.path.join(data_dir, style_path[rand_idx])).convert('RGB')
style_img_tensor = style_transforms(style_img)
print(f"Warning: multiple style images exist. The one {style_path[rand_idx]} is used.")
else:
style_img = Image.open(os.path.join(data_dir, style_path)).convert('RGB')
style_img_tensor = style_transforms(style_img)
else:
raise ValueError("Error: style image path is None!")
data_list.append({
'prompt': prompt,
'style': style_img_tensor
})
return data_list
def save_results(prompt, samples, filename, sample_dir, prompt_dir, fps=10, out_type='video'):
## save prompt
prompt = prompt[0] if isinstance(prompt, list) else prompt
path = os.path.join(prompt_dir, "%s.txt"%filename)
with open(path, 'w') as f:
f.write(f'{prompt}')
f.close()
## save video
if out_type == 'image':
n = samples.shape[0]
output = make_grid(samples, nrow=n, normalize=True, range=(-1, 1))
output_img = Image.fromarray(output.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy())
output_img.save(os.path.join(sample_dir, "%s.jpg"%filename))
elif out_type == 'video':
## save video
# b,c,t,h,w
video = samples.detach().cpu()
video = torch.clamp(video.float(), -1., 1.)
n = video.shape[0]
video = video.permute(2, 0, 1, 3, 4) # t,n,c,h,w
frame_grids = [torchvision.utils.make_grid(framesheet, nrow=int(n)) for framesheet in video] #[3, 1*h, n*w]
grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [t, 3, n*h, w]
grid = (grid + 1.0) / 2.0
grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1)
path = os.path.join(sample_dir, "%s.mp4"%filename)
torchvision.io.write_video(path, grid, fps=fps, video_codec='h264', options={'crf': '10'})
else:
raise ValueError("Error: output type should be image or video!")
def style_guided_synthesis(model, prompts, style, noise_shape, n_samples=1, ddim_steps=50, ddim_eta=1., \
unconditional_guidance_scale=1.0, unconditional_guidance_scale_style=None, **kwargs):
ddim_sampler = DDIMSampler(model) if unconditional_guidance_scale_style is None else DDIMStyleSampler(model)
batch_size = noise_shape[0]
## get condition embeddings (support single prompt only)
if isinstance(prompts, str):
prompts = [prompts]
cond = model.get_learned_conditioning(prompts)
# cond = repeat(cond, 'b n c -> (b f) n c', f=16)
if unconditional_guidance_scale != 1.0:
prompts = batch_size * [""]
uc = model.get_learned_conditioning(prompts)
# uc = repeat(uc, 'b n c -> (b f) n c', f=16)
else:
uc = None
if len(style.shape) == 4:
style_cond = model.get_batch_style(style)
append_to_context = model.adapter(style_cond)
else:
bs, n, c, h, w = style.shape
style = rearrange(style, "b n c h w -> (b n) c h w")
style_cond = model.get_batch_style(style)
style_cond = rearrange(style_cond, "(b n) l c -> b (n l ) c", b=bs)
append_to_context = model.adapter(style_cond)
# append_to_context = repeat(append_to_context, 'b n c -> (b f) n c', f=16)
if hasattr(model.adapter, "scale_predictor"):
scale_scalar = model.adapter.scale_predictor(torch.concat([append_to_context, cond], dim=1))
else:
scale_scalar = None
batch_variants = []
for _ in range(n_samples):
if ddim_sampler is not None:
samples, _ = ddim_sampler.sample(S=ddim_steps,
conditioning=cond,
batch_size=noise_shape[0],
shape=noise_shape[1:],
verbose=False,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_guidance_scale_style=unconditional_guidance_scale_style,
unconditional_conditioning=uc,
eta=ddim_eta,
temporal_length=noise_shape[2],
append_to_context=append_to_context,
scale_scalar=scale_scalar,
**kwargs
)
## reconstruct from latent to pixel space
batch_images = model.decode_first_stage(samples)
batch_variants.append(batch_images)
## variants, batch, c, t, h, w
batch_variants = torch.stack(batch_variants)
return batch_variants.permute(1, 0, 2, 3, 4, 5)
def run_inference(args, gpu_num, gpu_no):
## model config
config = OmegaConf.load(args.base)
model_config = config.pop("model", OmegaConf.create())
model_config['params']['adapter_config']['params']['scale'] = args.style_weight
print(f"Set adapter scale to {args.style_weight:.2f}") | model = instantiate_from_config(model_config) | 2 | 2023-11-30 15:01:49+00:00 | 12k |
emdgroup/baybe | tests/test_iterations.py | [
{
"identifier": "NonPredictiveRecommender",
"path": "baybe/recommenders/base.py",
"snippet": "class NonPredictiveRecommender(Recommender, ABC):\n \"\"\"Abstract base class for recommenders that are non-predictive.\"\"\"\n\n def recommend( # noqa: D102\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n allow_repeated_recommendations: bool = False,\n allow_recommending_already_measured: bool = True,\n ) -> pd.DataFrame:\n # See base class.\n\n if searchspace.type == SearchSpaceType.DISCRETE:\n return _select_candidates_and_recommend(\n searchspace,\n self._recommend_discrete,\n batch_quantity,\n allow_repeated_recommendations,\n allow_recommending_already_measured,\n )\n if searchspace.type == SearchSpaceType.CONTINUOUS:\n return self._recommend_continuous(\n searchspace=searchspace, batch_quantity=batch_quantity\n )\n return self._recommend_hybrid(\n searchspace=searchspace, batch_quantity=batch_quantity\n )\n\n def _recommend_discrete(\n self,\n searchspace: SearchSpace,\n candidates_comp: pd.DataFrame,\n batch_quantity: int,\n ) -> pd.Index:\n \"\"\"Calculate recommendations in a discrete search space.\n\n Args:\n searchspace: The discrete search space in which the recommendations should\n be made.\n candidates_comp: The computational representation of all possible candidates\n batch_quantity: The size of the calculated batch.\n\n Raises:\n NotImplementedError: If the function is not implemented by the child class.\n\n Returns:\n The indices of the recommended points with respect to the\n computational representation.\n \"\"\"\n try:\n return self._recommend_hybrid(\n searchspace=searchspace,\n batch_quantity=batch_quantity,\n candidates_comp=candidates_comp,\n ).index\n except NotImplementedError as exc:\n raise NotImplementedError(\n \"\"\"Hybrid recommender could not be used as fallback when trying to\n optimize a discrete space. This is probably due to your search space and\n recommender not being compatible. Please verify that your search space\n is purely discrete and that you are either using a discrete or hybrid\n recommender.\"\"\"\n ) from exc\n\n def _recommend_continuous(\n self, searchspace: SearchSpace, batch_quantity: int\n ) -> pd.DataFrame:\n \"\"\"Calculate recommendations in a continuous search space.\n\n Args:\n searchspace: The continuous search space in which the recommendations should\n be made.\n batch_quantity: The size of the calculated batch.\n\n Raises:\n NotImplementedError: If the function is not implemented by the child class.\n\n Returns:\n The recommended points.\n \"\"\"\n # If this method is not implemented by a children class, try to call\n # _recommend_hybrid instead.\n try:\n return self._recommend_hybrid(\n searchspace=searchspace, batch_quantity=batch_quantity\n )\n except NotImplementedError as exc:\n raise NotImplementedError(\n \"\"\"Hybrid recommender could not be used as fallback when trying to\n optimize a continuous space. This is probably due to your search space\n and recommender not being compatible. Please verify that your\n search space is purely continuous and that you are either using a\n continuous or hybrid recommender.\"\"\"\n ) from exc\n\n def _recommend_hybrid(\n self,\n searchspace: SearchSpace,\n batch_quantity: int,\n candidates_comp: Optional[pd.DataFrame] = None,\n ) -> pd.DataFrame:\n \"\"\"Calculate recommendations in a hybrid search space.\n\n If the recommender does not implement additional functions for discrete and\n continuous search spaces, this method is used as a fallback for those spaces\n as well.\n\n Args:\n searchspace: The hybrid search space in which the recommendations should\n be made.\n batch_quantity: The size of the calculated batch.\n candidates_comp: The computational representation of the candidates. This\n is necessary for using this function as a fallback mechanism for\n recommendations in discrete search spaces.\n\n Raises:\n NotImplementedError: If the function is not implemented by the child class.\n\n Returns:\n The recommended points.\n \"\"\"\n raise NotImplementedError(\"Hybrid recommender is not implemented.\")"
},
{
"identifier": "Recommender",
"path": "baybe/recommenders/base.py",
"snippet": "class Recommender(ABC):\n \"\"\"Abstract base class for all recommenders.\"\"\"\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType]\n \"\"\"Class variable describing the search space compatibility.\"\"\"\n\n @abstractmethod\n def recommend(\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n allow_repeated_recommendations: bool = False,\n allow_recommending_already_measured: bool = True,\n ) -> pd.DataFrame:\n \"\"\"Recommend (a batch of) points in the search space.\n\n Args:\n searchspace: The search space in which experiments are being conducted.\n batch_quantity: The number of points that should be recommended.\n train_x: The training data used to train the model.\n train_y: The training labels used to train the model.\n allow_repeated_recommendations: Allow to make recommendations that were\n already recommended earlier. This only has an influence in discrete\n search spaces.\n allow_recommending_already_measured: Allow to output recommendations that\n were measured previously. This only has an influence in discrete\n search spaces.\n\n Returns:\n A DataFrame containing the recommendations as individual rows.\n \"\"\""
},
{
"identifier": "BayesianRecommender",
"path": "baybe/recommenders/bayesian.py",
"snippet": "class BayesianRecommender(Recommender, ABC):\n \"\"\"An abstract class for Bayesian Recommenders.\"\"\"\n\n surrogate_model: Surrogate = field(factory=GaussianProcessSurrogate)\n \"\"\"The used surrogate model.\"\"\"\n\n acquisition_function_cls: Literal[\n \"PM\", \"PI\", \"EI\", \"UCB\", \"qPI\", \"qEI\", \"qUCB\", \"VarUCB\", \"qVarUCB\"\n ] = field(default=\"qEI\")\n \"\"\"The used acquisition function class.\"\"\"\n\n def _get_acquisition_function_cls(\n self,\n ) -> Callable:\n \"\"\"Get the actual acquisition function class.\n\n Returns:\n The debotorchized acquisition function class.\n \"\"\"\n mapping = {\n \"PM\": PosteriorMean,\n \"PI\": ProbabilityOfImprovement,\n \"EI\": ExpectedImprovement,\n \"UCB\": partial(UpperConfidenceBound, beta=1.0),\n \"qEI\": qExpectedImprovement,\n \"qPI\": qProbabilityOfImprovement,\n \"qUCB\": partial(qUpperConfidenceBound, beta=1.0),\n \"VarUCB\": partial(UpperConfidenceBound, beta=100.0),\n \"qVarUCB\": partial(qUpperConfidenceBound, beta=100.0),\n }\n fun = debotorchize(mapping[self.acquisition_function_cls])\n return fun\n\n def setup_acquisition_function(\n self, searchspace: SearchSpace, train_x: pd.DataFrame, train_y: pd.DataFrame\n ) -> AcquisitionFunction:\n \"\"\"Create the current acquisition function from provided training data.\n\n Args:\n searchspace: The search space in which the experiments are to be conducted.\n train_x: The features of the conducted experiments.\n train_y: The corresponding response values.\n\n Returns:\n An acquisition function obtained by fitting the surrogate model of self to\n the provided training data.\n\n \"\"\"\n best_f = train_y.max()\n surrogate_model = self._fit(searchspace, train_x, train_y)\n acquisition_function_cls = self._get_acquisition_function_cls()\n return acquisition_function_cls(surrogate_model, best_f)\n\n def _fit(\n self,\n searchspace: SearchSpace,\n train_x: pd.DataFrame,\n train_y: pd.DataFrame,\n ) -> Surrogate:\n \"\"\"Train a fresh surrogate model instance for the DOE strategy.\n\n Args:\n searchspace: The search space.\n train_x: The features of the conducted experiments.\n train_y: The corresponding response values.\n\n Returns:\n A surrogate model fitted to the provided data.\n\n Raises:\n ValueError: If the training inputs and targets do not have the same index.\n \"\"\"\n # validate input\n if not train_x.index.equals(train_y.index):\n raise ValueError(\"Training inputs and targets must have the same index.\")\n\n self.surrogate_model.fit(searchspace, *to_tensor(train_x, train_y))\n\n return self.surrogate_model\n\n def recommend( # noqa: D102\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n allow_repeated_recommendations: bool = False,\n allow_recommending_already_measured: bool = True,\n ) -> pd.DataFrame:\n # See base class.\n\n if _ONNX_INSTALLED and isinstance(self.surrogate_model, CustomONNXSurrogate):\n CustomONNXSurrogate.validate_compatibility(searchspace)\n\n acqf = self.setup_acquisition_function(searchspace, train_x, train_y)\n\n if searchspace.type == SearchSpaceType.DISCRETE:\n return _select_candidates_and_recommend(\n searchspace,\n partial(self._recommend_discrete, acqf),\n batch_quantity,\n allow_repeated_recommendations,\n allow_recommending_already_measured,\n )\n if searchspace.type == SearchSpaceType.CONTINUOUS:\n return self._recommend_continuous(acqf, searchspace, batch_quantity)\n return self._recommend_hybrid(acqf, searchspace, batch_quantity)\n\n def _recommend_discrete(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n candidates_comp: pd.DataFrame,\n batch_quantity: int,\n ) -> pd.Index:\n \"\"\"Calculate recommendations in a discrete search space.\n\n Args:\n acquisition_function: The acquisition function used for choosing the\n recommendation.\n searchspace: The discrete search space in which the recommendations should\n be made.\n candidates_comp: The computational representation of all possible\n candidates.\n batch_quantity: The size of the calculated batch.\n\n Raises:\n NotImplementedError: If the function is not implemented by the child class.\n\n Returns:\n The indices of the recommended points with respect to the\n computational representation.\n \"\"\"\n raise NotImplementedError()\n\n def _recommend_continuous(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n \"\"\"Calculate recommendations in a continuous search space.\n\n Args:\n acquisition_function: The acquisition function used for choosing the\n recommendation.\n searchspace: The continuous search space in which the recommendations should\n be made.\n batch_quantity: The size of the calculated batch.\n\n Raises:\n NotImplementedError: If the function is not implemented by the child class.\n\n Returns:\n The recommended points.\n \"\"\"\n raise NotImplementedError()\n\n def _recommend_hybrid(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n \"\"\"Calculate recommendations in a hybrid search space.\n\n Args:\n acquisition_function: The acquisition function used for choosing the\n recommendation.\n searchspace: The hybrid search space in which the recommendations should\n be made.\n batch_quantity: The size of the calculated batch.\n\n Raises:\n NotImplementedError: If the function is not implemented by the child class.\n\n Returns:\n The recommended points.\n \"\"\"\n raise NotImplementedError()"
},
{
"identifier": "NaiveHybridRecommender",
"path": "baybe/recommenders/bayesian.py",
"snippet": "class NaiveHybridRecommender(Recommender):\n \"\"\"Recommend points by independent optimization of subspaces.\n\n This recommender splits the hybrid search space in the discrete and continuous\n subspace. Each of the subspaces is optimized on its own, and the recommenders for\n those subspaces can be chosen upon initilaization. If this recommender is used on\n a non-hybrid space, it uses the corresponding recommender.\n \"\"\"\n\n # TODO: This class (and potentially the recommender function signatures) need to\n # be refactored such that there is no more coupling to BayesianRecommender and it\n # can be moved to recommender.py\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType] = SearchSpaceType.HYBRID\n # See base class.\n\n # Object variables\n # TODO This used to be a Union of BayesianRecommender and NonPredictiveRecommender.\n # Due to serialization issues, this was changed to Recommender in general.\n # As we currently do not have other subclasses of Recommender, this solution works\n # for now. Still, we manually check whether the disc_recommender belogns to one of\n # these two subclasses such that we might be able to easily spot a potential problem\n # that might come up when implementing new subclasses of Recommender\n disc_recommender: Recommender = field(factory=SequentialGreedyRecommender)\n \"\"\"The recommender used for the discrete subspace. Default:\n :class:`baybe.recommenders.bayesian.SequentialGreedyRecommender`\"\"\"\n\n cont_recommender: BayesianRecommender = field(factory=SequentialGreedyRecommender)\n \"\"\"The recommender used for the continuous subspace. Default:\n :class:`baybe.recommenders.bayesian.SequentialGreedyRecommender`\"\"\"\n\n def recommend( # noqa: D102\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n allow_repeated_recommendations: bool = False,\n allow_recommending_already_measured: bool = True,\n ) -> pd.DataFrame:\n # See base class.\n\n # First check whether the disc_recommender is either bayesian or non predictive\n is_bayesian_recommender = isinstance(self.disc_recommender, BayesianRecommender)\n is_np_recommender = isinstance(self.disc_recommender, NonPredictiveRecommender)\n\n if (not is_bayesian_recommender) and (not is_np_recommender):\n raise NotImplementedError(\n \"\"\"The discrete recommender should be either a Bayesian or a\n NonPredictiveRecommender.\"\"\"\n )\n\n # Check if the space is a pure continuous or discrete space first and just use\n # the corresponding recommendation function in that case\n degenerate_recommender = None\n if searchspace.type == SearchSpaceType.DISCRETE:\n degenerate_recommender = self.disc_recommender\n elif searchspace.type == SearchSpaceType.CONTINUOUS:\n degenerate_recommender = self.cont_recommender\n if degenerate_recommender is not None:\n return degenerate_recommender.recommend(\n searchspace=searchspace,\n batch_quantity=batch_quantity,\n train_x=train_x,\n train_y=train_y,\n allow_repeated_recommendations=allow_repeated_recommendations,\n allow_recommending_already_measured=allow_recommending_already_measured,\n )\n\n # We are in a hybrid setting now\n\n # We will attach continuous parts to discrete parts and the other way round.\n # To make things simple, we sample a single point in the continuous space which\n # will then be attached to every discrete point when the acquisition function\n # is evaluated.\n cont_part = searchspace.continuous.samples_random(1)\n cont_part = to_tensor(cont_part).unsqueeze(-2)\n\n # Get discrete candidates. The metadata flags are ignored since the search space\n # is hybrid\n # TODO Slight BOILERPLATE CODE, see recommender.py, ll. 47+\n _, candidates_comp = searchspace.discrete.get_candidates(\n allow_repeated_recommendations=True,\n allow_recommending_already_measured=True,\n )\n\n # Due to different signatures depending on whether the discrete recommender is\n # bayesian or non-predictive, we need to check what kind of recommender we have\n # This is then used to potentially fill the dictionary containing the\n # corresponding keyword and acquisition function.\n acqf_func_dict = {}\n # We now check whether the discrete recommender is bayesian.\n if is_bayesian_recommender:\n # Get access to the recommenders acquisition function\n disc_acqf = self.disc_recommender.setup_acquisition_function(\n searchspace, train_x, train_y\n )\n\n # Construct the partial acquisition function that attaches cont_part\n # whenever evaluating the acquisition function\n disc_acqf_part = PartialAcquisitionFunction(\n acqf=disc_acqf, pinned_part=cont_part, pin_discrete=False\n )\n acqf_func_dict = {\"acquisition_function\": disc_acqf_part}\n\n # Call the private function of the discrete recommender and get the indices\n disc_rec_idx = self.disc_recommender._recommend_discrete(\n **(acqf_func_dict),\n searchspace=searchspace,\n candidates_comp=candidates_comp,\n batch_quantity=batch_quantity,\n )\n\n # Get one random discrete point that will be attached when evaluating the\n # acquisition function in the discrete space.\n disc_part = searchspace.discrete.comp_rep.loc[disc_rec_idx].sample(1)\n disc_part = to_tensor(disc_part).unsqueeze(-2)\n\n # Setup a fresh acquisition function for the continuous recommender\n cont_acqf = self.cont_recommender.setup_acquisition_function(\n searchspace, train_x, train_y\n )\n\n # Construct the continuous space as a standalone space\n cont_acqf_part = PartialAcquisitionFunction(\n acqf=cont_acqf, pinned_part=disc_part, pin_discrete=True\n )\n # Call the private function of the continuous recommender\n rec_cont = self.cont_recommender._recommend_continuous(\n cont_acqf_part, searchspace, batch_quantity\n )\n\n # Glue the solutions together and return them\n rec_disc_exp = searchspace.discrete.exp_rep.loc[disc_rec_idx]\n rec_cont.index = rec_disc_exp.index\n rec_exp = pd.concat([rec_disc_exp, rec_cont], axis=1)\n return rec_exp"
},
{
"identifier": "SequentialGreedyRecommender",
"path": "baybe/recommenders/bayesian.py",
"snippet": "class SequentialGreedyRecommender(BayesianRecommender):\n \"\"\"Recommender using sequential Greedy optimization.\n\n This recommender implements the BoTorch functions ``optimize_acqf_discrete``,\n ``optimize_acqf`` and ``optimize_acqf_mixed`` for the optimization of discrete,\n continuous and hybrid search spaces. In particular, it can be applied in all\n kinds of search spaces.\n It is important to note that this algorithm performs a brute-force optimization in\n hybrid search spaces which can be computationally expensive. Thus, the behavior of\n the algorithm in hybrid search spaces can be controlled by two additional\n parameters.\n \"\"\"\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType] = SearchSpaceType.HYBRID\n # See base class.\n\n # Object variables\n hybrid_sampler: str = field(\n validator=validators.in_([\"None\", \"Farthest\", \"Random\"]), default=\"None\"\n )\n \"\"\"Strategy used for sampling the discrete subspace when performing hybrid search\n space optimization.\"\"\"\n\n sampling_percentage: float = field(default=1.0)\n \"\"\"Percentage of discrete search space that is sampled when performing hybrid search\n space optimization. Ignored when ``hybrid_sampler=\"None\"``.\"\"\"\n\n @sampling_percentage.validator\n def _validate_percentage( # noqa: DOC101, DOC103\n self, _: Any, value: float\n ) -> None:\n \"\"\"Validate that the given value is in fact a percentage.\n\n Raises:\n ValueError: If ``value`` is not between 0 and 1.\n \"\"\"\n if not 0 <= value <= 1:\n raise ValueError(\n f\"Hybrid sampling percentage needs to be between 0 and 1 but is {value}\"\n )\n\n def _recommend_discrete(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n candidates_comp: pd.DataFrame,\n batch_quantity: int,\n ) -> pd.Index:\n # See base class.\n\n # determine the next set of points to be tested\n candidates_tensor = to_tensor(candidates_comp)\n try:\n points, _ = optimize_acqf_discrete(\n acquisition_function, batch_quantity, candidates_tensor\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # retrieve the index of the points from the input dataframe\n # IMPROVE: The merging procedure is conceptually similar to what\n # `SearchSpace._match_measurement_with_searchspace_indices` does, though using\n # a simpler matching logic. When refactoring the SearchSpace class to\n # handle continuous parameters, a corresponding utility could be extracted.\n idxs = pd.Index(\n pd.merge(\n candidates_comp.reset_index(),\n pd.DataFrame(points, columns=candidates_comp.columns),\n on=list(candidates_comp),\n )[\"index\"]\n )\n assert len(points) == len(idxs)\n\n return idxs\n\n def _recommend_continuous(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n # See base class.\n\n try:\n points, _ = optimize_acqf(\n acq_function=acquisition_function,\n bounds=searchspace.continuous.param_bounds_comp,\n q=batch_quantity,\n num_restarts=5, # TODO make choice for num_restarts\n raw_samples=10, # TODO make choice for raw_samples\n equality_constraints=[\n c.to_botorch(searchspace.continuous.parameters)\n for c in searchspace.continuous.constraints_lin_eq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n inequality_constraints=[\n c.to_botorch(searchspace.continuous.parameters)\n for c in searchspace.continuous.constraints_lin_ineq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # Return optimized points as dataframe\n rec = pd.DataFrame(points, columns=searchspace.continuous.param_names)\n return rec\n\n def _recommend_hybrid(\n self,\n acquisition_function: Callable,\n searchspace: SearchSpace,\n batch_quantity: int,\n ) -> pd.DataFrame:\n \"\"\"Recommend points using the ``optimize_acqf_mixed`` function of BoTorch.\n\n This functions samples points from the discrete subspace, performs optimization\n in the continuous subspace with these points being fixed and returns the best\n found solution.\n **Important**: This performs a brute-force calculation by fixing every possible\n assignment of discrete variables and optimizing the continuous subspace for\n each of them. It is thus computationally expensive.\n\n Args:\n acquisition_function: The acquisition function to be optimized.\n searchspace: The search space in which the recommendations should be made.\n batch_quantity: The size of the calculated batch.\n\n Returns:\n The recommended points.\n\n Raises:\n NoMCAcquisitionFunctionError: If a non Monte Carlo acquisition function\n is chosen.\n \"\"\"\n # Get discrete candidates.\n _, candidates_comp = searchspace.discrete.get_candidates(\n allow_repeated_recommendations=True,\n allow_recommending_already_measured=True,\n )\n\n # Calculate the number of samples from the given percentage\n n_candidates = int(self.sampling_percentage * len(candidates_comp.index))\n\n # Potential sampling of discrete candidates\n if self.hybrid_sampler == \"Farthest\":\n ilocs = farthest_point_sampling(candidates_comp.values, n_candidates)\n candidates_comp = candidates_comp.iloc[ilocs]\n elif self.hybrid_sampler == \"Random\":\n candidates_comp = candidates_comp.sample(n_candidates)\n\n # Prepare all considered discrete configurations in the List[Dict[int, float]]\n # format expected by BoTorch\n # TODO: Currently assumes that discrete parameters are first and continuous\n # second. Once parameter redesign [11611] is completed, we might adjust this.\n candidates_comp.columns = list(range(len(candidates_comp.columns)))\n fixed_features_list = candidates_comp.to_dict(\"records\")\n\n # Actual call of the BoTorch optimization routine\n try:\n points, _ = optimize_acqf_mixed(\n acq_function=acquisition_function,\n bounds=searchspace.param_bounds_comp,\n q=batch_quantity,\n num_restarts=5, # TODO make choice for num_restarts\n raw_samples=10, # TODO make choice for raw_samples\n fixed_features_list=fixed_features_list,\n equality_constraints=[\n c.to_botorch(\n searchspace.continuous.parameters,\n idx_offset=len(candidates_comp.columns),\n )\n for c in searchspace.continuous.constraints_lin_eq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n inequality_constraints=[\n c.to_botorch(\n searchspace.continuous.parameters,\n idx_offset=len(candidates_comp.columns),\n )\n for c in searchspace.continuous.constraints_lin_ineq\n ]\n or None, # TODO: https://github.com/pytorch/botorch/issues/2042\n )\n except AttributeError as ex:\n raise NoMCAcquisitionFunctionError(\n f\"The '{self.__class__.__name__}' only works with Monte Carlo \"\n f\"acquisition functions.\"\n ) from ex\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # TODO [14819]: The following code is necessary due to floating point\n # inaccuracies introduced by BoTorch (potentially due to some float32\n # conversion?). The current workaround is the match the recommendations back\n # to the closest candidate points.\n\n # Split discrete and continuous parts\n disc_points = points[:, : len(candidates_comp.columns)]\n cont_points = points[:, len(candidates_comp.columns) :]\n\n # Find the closest match with the discrete candidates\n candidates_comp_np = candidates_comp.to_numpy()\n disc_points_np = disc_points.numpy()\n if not disc_points_np.flags[\"C_CONTIGUOUS\"]:\n disc_points_np = np.ascontiguousarray(disc_points_np)\n if not candidates_comp_np.flags[\"C_CONTIGUOUS\"]:\n candidates_comp_np = np.ascontiguousarray(candidates_comp_np)\n disc_idxs_iloc = pairwise_distances_argmin(\n disc_points_np, candidates_comp_np, metric=\"manhattan\"\n )\n\n # Get the actual search space dataframe indices\n disc_idxs_loc = candidates_comp.iloc[disc_idxs_iloc].index\n\n # Get experimental representation of discrete and continuous parts\n rec_disc_exp = searchspace.discrete.exp_rep.loc[disc_idxs_loc]\n rec_cont_exp = pd.DataFrame(\n cont_points, columns=searchspace.continuous.param_names\n )\n\n # Adjust the index of the continuous part and concatenate both\n rec_cont_exp.index = rec_disc_exp.index\n rec_exp = pd.concat([rec_disc_exp, rec_cont_exp], axis=1)\n # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n return rec_exp"
},
{
"identifier": "SearchSpaceType",
"path": "baybe/searchspace/core.py",
"snippet": "class SearchSpaceType(Enum):\n \"\"\"Enum class for different types of search spaces and respective compatibility.\"\"\"\n\n DISCRETE = \"DISCRETE\"\n \"\"\"Flag for discrete search spaces resp. compatibility with discrete search\n spaces.\"\"\"\n\n CONTINUOUS = \"CONTINUOUS\"\n \"\"\"Flag for continuous search spaces resp. compatibility with continuous\n search spaces.\"\"\"\n\n EITHER = \"EITHER\"\n \"\"\"Flag compatibility with either discrete or continuous, but not hybrid\n search spaces.\"\"\"\n\n HYBRID = \"HYBRID\"\n \"\"\"Flag for hybrid search spaces resp. compatibility with hybrid search spaces.\"\"\""
},
{
"identifier": "Strategy",
"path": "baybe/strategies/base.py",
"snippet": "class Strategy(SerialMixin, ABC):\n \"\"\"Abstract base class for all BayBE strategies.\"\"\"\n\n allow_repeated_recommendations: bool = field(default=False, kw_only=True)\n \"\"\"Allow to make recommendations that were already recommended earlier. This only\n has an influence in discrete search spaces.\"\"\"\n\n allow_recommending_already_measured: bool = field(default=False, kw_only=True)\n \"\"\"Allow to output recommendations that were measured previously. This only has an\n influence in discrete search spaces.\"\"\"\n\n @abstractmethod\n def select_recommender(\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n ) -> Recommender:\n \"\"\"Select a recommender for the given experimentation context.\n\n Args:\n searchspace: See :func:`baybe.strategies.base.Strategy.recommend`.\n batch_quantity: See :func:`baybe.strategies.base.Strategy.recommend`.\n train_x: See :func:`baybe.strategies.base.Strategy.recommend`.\n train_y: See :func:`baybe.strategies.base.Strategy.recommend`.\n\n Returns:\n The selected recommender.\n \"\"\"\n\n def recommend(\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n ) -> pd.DataFrame:\n \"\"\"Recommend the next experiments to be conducted.\n\n Args:\n searchspace: The search space in which the experiments are conducted.\n batch_quantity: The number of experiments to be conducted in parallel.\n train_x: The features of the conducted experiments.\n train_y: The corresponding response values.\n\n Returns:\n The DataFrame with the specific experiments recommended.\n \"\"\"\n recommender = self.select_recommender(\n searchspace,\n batch_quantity,\n train_x,\n train_y,\n )\n return recommender.recommend(\n searchspace,\n batch_quantity,\n train_x,\n train_y,\n self.allow_repeated_recommendations,\n self.allow_recommending_already_measured,\n )"
},
{
"identifier": "get_available_surrogates",
"path": "baybe/surrogates/base.py",
"snippet": "def get_available_surrogates() -> List[Type[Surrogate]]:\n \"\"\"List all available surrogate models.\n\n Returns:\n A list of available surrogate classes.\n \"\"\"\n # List available names\n available_names = {\n cl.__name__\n for cl in get_subclasses(Surrogate)\n if cl.__name__ not in _WRAPPER_MODELS\n }\n\n # Convert them to classes\n available_classes = [\n getattr(sys.modules[__package__], mdl_name, None)\n for mdl_name in available_names\n ]\n\n # TODO: The initialization of the classes is currently necessary for the renaming\n # to take place (see [15436] and NOTE in `structure_surrogate`).\n [cl() for cl in available_classes if cl is not None]\n\n return [cl for cl in available_classes if cl is not None]"
},
{
"identifier": "get_subclasses",
"path": "baybe/utils/basic.py",
"snippet": "def get_subclasses(cls: _T, recursive: bool = True, abstract: bool = False) -> List[_T]:\n \"\"\"Return a list of subclasses for the given class.\n\n Args:\n cls: The base class to retrieve subclasses for.\n recursive: If ``True``, indirect subclasses (i.e. subclasses of subclasses)\n are included.\n abstract: If ``True``, abstract subclasses are included.\n\n Returns:\n A list of subclasses for the given class.\n \"\"\"\n from baybe.utils import is_abstract\n\n subclasses = []\n for subclass in cls.__subclasses__():\n # Append direct subclass only if it is not abstract\n if abstract or not is_abstract(subclass):\n subclasses.append(subclass)\n\n # If requested, add indirect subclasses\n if recursive:\n subclasses.extend(get_subclasses(subclass, abstract=abstract))\n\n return subclasses"
},
{
"identifier": "run_iterations",
"path": "tests/conftest.py",
"snippet": "def run_iterations(\n campaign: Campaign, n_iterations: int, batch_quantity: int, add_noise: bool = True\n) -> None:\n \"\"\"Run a campaign for some fake iterations.\n\n Args:\n campaign: The campaign encapsulating the experiments.\n n_iterations: Number of iterations run.\n batch_quantity: Number of recommended points per iteration.\n add_noise: Flag whether measurement noise should be added every 2nd iteration.\n \"\"\"\n for k in range(n_iterations):\n rec = campaign.recommend(batch_quantity=batch_quantity)\n # dont use parameter noise for these tests\n\n add_fake_results(rec, campaign)\n if add_noise and (k % 2):\n add_parameter_noise(rec, campaign.parameters, noise_level=0.1)\n\n campaign.add_measurements(rec)"
}
] | from typing import get_args, get_type_hints
from baybe.recommenders.base import NonPredictiveRecommender, Recommender
from baybe.recommenders.bayesian import (
BayesianRecommender,
NaiveHybridRecommender,
SequentialGreedyRecommender,
)
from baybe.searchspace import SearchSpaceType
from baybe.strategies.base import Strategy
from baybe.surrogates import get_available_surrogates
from baybe.utils.basic import get_subclasses
from .conftest import run_iterations
import pytest | 8,272 | # TODO: This file needs to be refactored.
"""Tests various configurations for a small number of iterations."""
########################################################################################
# Settings of the individual components to be tested
########################################################################################
valid_acquisition_functions = get_args(
get_type_hints(BayesianRecommender.__init__)["acquisition_function_cls"]
)
valid_surrogate_models = [cls() for cls in get_available_surrogates()]
| # TODO: This file needs to be refactored.
"""Tests various configurations for a small number of iterations."""
########################################################################################
# Settings of the individual components to be tested
########################################################################################
valid_acquisition_functions = get_args(
get_type_hints(BayesianRecommender.__init__)["acquisition_function_cls"]
)
valid_surrogate_models = [cls() for cls in get_available_surrogates()] | valid_initial_recommenders = [cls() for cls in get_subclasses(NonPredictiveRecommender)] | 0 | 2023-11-27 17:02:40+00:00 | 12k |
UX-Decoder/LLaVA-Grounding | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,343 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-12-04 10:59:21+00:00 | 12k |
daveredrum/SceneTex | models/modules/studio.py | [
{
"identifier": "init_trajectory",
"path": "lib/camera_helper.py",
"snippet": "def init_trajectory(dist_list, elev_list, azim_list, at):\n Rs, Ts = [], []\n for dist, elev, azim in zip(dist_list, elev_list, azim_list):\n R, T = look_at_view_transform(dist, elev, azim, at=at)\n\n Rs.append(R) # 1, 3, 3\n Ts.append(T) # 1, 3\n\n return Rs, Ts"
},
{
"identifier": "init_blenderproc_trajectory",
"path": "lib/camera_helper.py",
"snippet": "def init_blenderproc_trajectory(trajectory, device):\n \"\"\"\n This function only applies for Blenderproc cameras and original mesh data\n \"\"\"\n Rs, Ts = [], []\n for _, viewpoint in trajectory.items():\n c2w = torch.FloatTensor(viewpoint[\"matrix\"]).to(device)\n\n calibrate_axis = torch.FloatTensor([\n [-1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ]).to(device)\n rot_z = torch.FloatTensor([\n [np.cos(np.pi), -np.sin(np.pi), 0],\n [np.sin(np.pi), np.cos(np.pi), 0],\n [0, 0, 1]\n ]).to(device)\n rot_x = torch.FloatTensor([\n [1, 0, 0, 0],\n [0, np.cos(np.pi/2), -np.sin(np.pi/2), 0],\n [0, np.sin(np.pi/2), np.cos(np.pi/2), 0],\n [0, 0, 0, 1]\n ]).to(device)\n\n c2w = calibrate_axis @ c2w\n c2w = rot_x @ c2w\n\n t = c2w[:3,-1] # Extract translation of the camera\n r = c2w[:3, :3] @ rot_z # Extract rotation matrix of the camera\n\n # horizontally flip the image\n flip_x = torch.FloatTensor([\n [-1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]\n ]).to(device)\n r = r @ flip_x\n\n t = t @ r # Make rotation local\n\n Rs.append(r.unsqueeze(0))\n Ts.append(t.unsqueeze(0))\n\n return Rs, Ts"
},
{
"identifier": "init_camera_R_T",
"path": "lib/camera_helper.py",
"snippet": "def init_camera_R_T(R, T, image_size, device, fov=60):\n \"\"\"init camera using R and T matrics\n\n Args:\n R (torch.FloatTensor): Rotation matrix, (N, 3, 3)\n T (torch.FloatTensor): Translation matrix, (N, 3)\n image_size (int): rendering size\n device (torch.device): CPU or GPU\n\n Returns:\n camera: PyTorch3D camera instance\n \"\"\"\n\n if isinstance(image_size, int):\n image_size = torch.tensor([image_size, image_size]).unsqueeze(0)\n elif isinstance(image_size, tuple):\n image_size = torch.tensor(image_size).unsqueeze(0)\n else:\n raise TypeError(\"invalid image size.\")\n\n # cameras = PerspectiveCameras(R=R, T=T, device=device, image_size=image_size)\n cameras = FoVPerspectiveCameras(R=R, T=T, device=device, fov=fov)\n\n return cameras"
},
{
"identifier": "init_renderer",
"path": "lib/render_helper.py",
"snippet": "def init_renderer(camera, shader, image_size, faces_per_pixel):\n raster_settings = RasterizationSettings(image_size=image_size, faces_per_pixel=faces_per_pixel)\n renderer = MeshRendererWithFragments(\n rasterizer=MeshRasterizer(\n cameras=camera,\n raster_settings=raster_settings\n ),\n shader=shader\n )\n\n return renderer"
},
{
"identifier": "init_flat_texel_shader",
"path": "lib/shading_helper.py",
"snippet": "def init_flat_texel_shader(camera, device, blend_params=BlendParams()):\n shader=FlatTexelShader(\n cameras=camera,\n device=device,\n blend_params=blend_params\n )\n \n return shader"
},
{
"identifier": "get_visible_pixel_uvs",
"path": "lib/projection_helper.py",
"snippet": "@torch.no_grad()\ndef get_visible_pixel_uvs(mesh, renderer, faces_verts_uvs):\n fragments = renderer.rasterizer(mesh)\n pixel_uvs = interpolate_face_attributes(\n fragments.pix_to_face, fragments.bary_coords, faces_verts_uvs\n ) # NxHsxWsxKx2\n\n return pixel_uvs"
},
{
"identifier": "get_all_4_locations",
"path": "lib/projection_helper.py",
"snippet": "def get_all_4_locations(values_y, values_x):\n y_0 = torch.floor(values_y)\n y_1 = torch.ceil(values_y)\n x_0 = torch.floor(values_x)\n x_1 = torch.ceil(values_x)\n\n return torch.cat([y_0, y_0, y_1, y_1], 0).long(), torch.cat([x_0, x_1, x_0, x_1], 0).long()"
},
{
"identifier": "MLP",
"path": "models/modules/modules.py",
"snippet": "class MLP(nn.Module):\n def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True, dtype=torch.float32):\n super().__init__()\n self.dim_in = dim_in\n self.dim_out = dim_out\n self.dim_hidden = dim_hidden\n self.num_layers = num_layers\n\n net = []\n for l in range(num_layers):\n net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden,\n self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias, dtype=dtype))\n\n self.net = nn.ModuleList(net)\n\n def forward(self, x):\n for l in range(self.num_layers):\n x = self.net[l](x)\n if l != self.num_layers - 1:\n x = F.relu(x, inplace=True)\n return x"
},
{
"identifier": "Siren",
"path": "models/modules/modules.py",
"snippet": "class Siren(nn.Module):\n def __init__(self, in_features, hidden_features, hidden_layers, out_features, outermost_linear=False, \n first_omega_0=30, hidden_omega_0=30.):\n super().__init__()\n \n self.net = []\n self.net.append(SineLayer(in_features, hidden_features, \n is_first=True, omega_0=first_omega_0))\n\n for i in range(hidden_layers):\n self.net.append(SineLayer(hidden_features, hidden_features, \n is_first=False, omega_0=hidden_omega_0))\n\n if outermost_linear:\n final_linear = nn.Linear(hidden_features, out_features)\n \n with torch.no_grad():\n final_linear.weight.uniform_(-np.sqrt(6 / hidden_features) / hidden_omega_0, \n np.sqrt(6 / hidden_features) / hidden_omega_0)\n \n self.net.append(final_linear)\n else:\n self.net.append(SineLayer(hidden_features, out_features, \n is_first=False, omega_0=hidden_omega_0))\n \n self.net = nn.Sequential(*self.net)\n \n def forward(self, coords):\n outputs = self.net(coords)\n return outputs"
},
{
"identifier": "HashGrid",
"path": "models/modules/modules.py",
"snippet": "class HashGrid(nn.Module):\n def __init__(self, in_channels,\n otype, n_levels, n_features_per_level, log2_hashmap_size, base_resolution, # the same as in tinycudann\n max_resolution, # NOTE need to compute per_level_scale ,\n dtype=torch.float32 # half precision might lead to NaN\n ):\n \n super().__init__()\n\n self.otype = otype\n self.n_levels = n_levels\n self.n_features_per_level = n_features_per_level\n self.log2_hashmap_size = log2_hashmap_size\n self.base_resolution = base_resolution\n self.max_resolution = max_resolution\n self.per_level_scale = self.get_per_level_scale()\n\n self.config = {\n \"otype\": self.otype,\n \"n_levels\": self.n_levels,\n \"n_features_per_level\": self.n_features_per_level,\n \"log2_hashmap_size\": self.log2_hashmap_size,\n \"base_resolution\": self.base_resolution,\n \"per_level_scale\": self.per_level_scale\n }\n self.hashgrid = tcnn.Encoding(in_channels, self.config, dtype=dtype)\n\n def get_per_level_scale(self):\n return np.power(self.max_resolution / self.base_resolution, 1 / self.n_levels)\n \n def forward(self, inputs):\n return self.hashgrid(inputs)"
},
{
"identifier": "HashGridMLP",
"path": "models/modules/modules.py",
"snippet": "class HashGridMLP(nn.Module):\n def __init__(self, in_channels,\n hashgrid_config, mlp_config\n ):\n \n super().__init__()\n\n self.hashgrid_config = {\n \"otype\": hashgrid_config.otype,\n \"n_levels\": hashgrid_config.n_levels,\n \"n_features_per_level\": hashgrid_config.n_features_per_level,\n \"log2_hashmap_size\": hashgrid_config.log2_hashmap_size,\n \"base_resolution\": hashgrid_config.base_resolution,\n \"per_level_scale\": self.get_per_level_scale(\n hashgrid_config.max_resolution,\n hashgrid_config.base_resolution,\n hashgrid_config.n_levels\n )\n }\n self.MLP_config = {\n \"otype\": mlp_config.otype,\n \"activation\": mlp_config.activation,\n \"output_activation\": mlp_config.output_activation,\n \"n_neurons\": mlp_config.n_neurons,\n \"n_hidden_layers\": mlp_config.n_hidden_layers\n }\n\n self.net = tcnn.NetworkWithInputEncoding(in_channels, mlp_config.out_channels, self.hashgrid_config, self.MLP_config)\n\n def get_per_level_scale(self, max_resolution, base_resolution, n_levels):\n return np.power(max_resolution / base_resolution, 1 / n_levels)\n \n def forward(self, inputs):\n return self.net(inputs)"
},
{
"identifier": "AnchorTransformer",
"path": "models/modules/anchors.py",
"snippet": "class AnchorTransformer(nn.Module):\n def __init__(self, \n config,\n device,\n anchor_dim,\n num_instances # this must be specified on init\n ): \n \n super().__init__()\n\n self.config = config\n self.device = device\n\n self.anchor_dim = anchor_dim\n self.num_instances = num_instances\n\n self.hidden_size = config.anchor_config.hidden_size\n self.num_heads = config.anchor_config.num_heads\n self.num_mapping_layers = config.anchor_config.num_mapping_layers\n\n if self.config.anchor_config.anchor_type == \"self-attention\":\n if self.num_mapping_layers == 0 and self.num_heads == 1:\n self.hidden_size = anchor_dim\n self.map_key = nn.Identity()\n self.map_query = nn.Identity()\n self.map_value = nn.Identity()\n\n self.attention = nn.MultiheadAttention(\n anchor_dim,\n 1,\n batch_first=True # (batch, seq, feature)\n )\n self.map_outputs = nn.Identity()\n else:\n self.map_key = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n self.map_query = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n self.map_value = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n\n self.attention = nn.MultiheadAttention(\n self.hidden_size * self.num_heads,\n self.num_heads,\n batch_first=True # (batch, seq, feature)\n )\n self.map_outputs = MLP(self.hidden_size * self.num_heads, anchor_dim, self.hidden_size, self.num_mapping_layers)\n\n elif self.config.anchor_config.anchor_type == \"cross-attention\":\n if self.num_mapping_layers == 0 and self.num_heads == 1:\n self.hidden_size = anchor_dim\n self.map_key = nn.Identity()\n self.map_query = nn.Identity()\n self.map_value = nn.Identity()\n\n self.attention = nn.MultiheadAttention(\n anchor_dim,\n 1,\n batch_first=True # (batch, seq, feature)\n )\n self.map_outputs = nn.Identity()\n\n else:\n self.map_key = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n self.map_query = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n self.map_value = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n\n self.attention = nn.MultiheadAttention(\n self.hidden_size * self.num_heads,\n self.num_heads,\n batch_first=True # (batch, seq, feature)\n )\n self.map_outputs = MLP(self.hidden_size * self.num_heads, anchor_dim, self.hidden_size, self.num_mapping_layers)\n\n elif self.config.anchor_config.anchor_type == \"flash-attention\":\n if self.num_mapping_layers == 0 and self.num_heads == 1:\n self.hidden_size = anchor_dim\n self.map_key = nn.Identity()\n self.map_query = nn.Identity()\n self.map_value = nn.Identity()\n self.map_outputs = nn.Identity()\n\n else:\n self.map_key = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n self.map_query = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n self.map_value = MLP(anchor_dim, self.hidden_size * self.num_heads, self.hidden_size, self.num_mapping_layers)\n self.map_outputs = MLP(self.hidden_size * self.num_heads, anchor_dim, self.hidden_size, self.num_mapping_layers)\n\n self.attention = FlashCrossAttention() # NOTE input must be half precision\n\n def _map_inputs(self, query, key, value):\n if self.config.anchor_config.output_type == \"token\":\n # inputs = torch.cat([inputs, self.embedding.unsqueeze(1)], dim=1)\n avg_token = query.mean(1, keepdim=True)\n query = torch.cat([query, avg_token], dim=1)\n\n key = self.map_key(key)\n query = self.map_query(query)\n value = self.map_value(value)\n \n return query, key, value\n \n def _map_outputs(self, inputs):\n outputs = self.map_outputs(inputs) # (M, L, C)\n\n if self.config.anchor_config.output_type == \"token\":\n outputs = outputs[:, -1, :]\n elif self.config.anchor_config.output_type == \"mean\":\n outputs = outputs.mean(1)\n else:\n pass\n\n return outputs\n \n def _map_features(self, features, anchors, instances_in_view, pad_seq=False):\n B, H, W, C = features.shape\n features = features.reshape(-1, C)\n instances_in_view = instances_in_view.reshape(-1)\n labels = torch.unique(instances_in_view).long()\n\n # outputs\n seq_features, seq_anchors, seq_labels, seqlens, seqlens_k = [], [], [], [0], [0]\n cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k = None, None, None, None\n map_flag = False\n\n for label in labels:\n if label == 0: continue\n instance_mask = instances_in_view == label\n instance_feature = features[instance_mask]\n instace_labels = instances_in_view[instance_mask]\n seq_features.append(instance_feature)\n seq_anchors.append(anchors[label-1])\n seqlen = instance_feature.shape[0]\n seqlens.append(seqlen)\n seqlens_k.append(anchors.shape[1])\n seq_labels.append(instace_labels)\n\n if len(seq_features) > 0:\n\n map_flag = True\n\n if pad_seq:\n seq_features = pad_sequence(seq_features, batch_first=True)\n seq_labels = pad_sequence(seq_labels, batch_first=True)\n seq_anchors = torch.stack(seq_anchors)\n else:\n seq_features = torch.cat(seq_features, dim=0)\n seq_labels = torch.cat(seq_labels, dim=0)\n seq_anchors = torch.cat(seq_anchors, dim=0)\n cu_seqlens = torch.cumsum(torch.IntTensor(seqlens), dim=0).to(self.device).int()\n max_seqlen = max(seqlens)\n cu_seqlens_k = torch.cumsum(torch.IntTensor(seqlens_k), dim=0).to(self.device).int()\n max_seqlen_k = max(seqlens_k)\n\n return seq_features, seq_labels, seq_anchors, cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k, map_flag\n\n def _unmap_features(self, features, seq_labels, instances_in_view):\n *_, C = features.shape\n B, H, W = instances_in_view.shape\n unmapped = torch.zeros(B, H, W, C).to(self.device)\n\n if self.config.anchor_config.anchor_type == \"flash-attention\":\n unmapped = unmapped.reshape(-1, C)\n instances_in_view = instances_in_view.reshape(-1)\n assert unmapped.shape[0] == instances_in_view.shape[0]\n labels = torch.unique(instances_in_view)\n for label in labels:\n if label == 0: continue\n unmapped[instances_in_view == label] = features[seq_labels == label]\n\n unmapped = unmapped.reshape(B, H, W, C)\n\n elif self.config.anchor_config.anchor_type == \"cross-attention\":\n unmapped = unmapped.reshape(-1, C)\n instances_in_view = instances_in_view.reshape(-1)\n assert unmapped.shape[0] == instances_in_view.shape[0]\n for i in range(features.shape[0]): # feature indices indicate instances\n unmapped[instances_in_view == i+1] = features[seq_labels == i+1]\n\n unmapped = unmapped.reshape(B, H, W, C)\n\n return unmapped\n \n def _apply_outputs(self, features, anchors, instances_in_view):\n if self.config.anchor_config.anchor_type in [\"self-attention\", \"mean\"]:\n B, H, W = instances_in_view.shape # NOTE instance_in_view must in shape (B, H, W)\n instances_in_view = instances_in_view.reshape(-1) - 1 # instances are indexed from 0, -1 is the background\n background_mask = instances_in_view == -1\n anchor_features = anchors[instances_in_view.long(), :]\n anchor_features[background_mask] = 0\n anchor_features = anchor_features.reshape(B, H, W, -1)\n else:\n anchor_features = anchors\n\n # output\n features = features + anchor_features\n\n return features\n \n def _prepare_flash_attention_inputs(self, query, key, value):\n query = query.reshape(-1, self.num_heads, self.hidden_size)\n key = key.reshape(-1, self.num_heads, self.hidden_size)\n value = value.reshape(-1, self.num_heads, self.hidden_size)\n key_value = torch.stack([key, value], dim=1)\n\n return query, key_value\n\n def forward(self, anchors, features, instances_in_view):\n assert len(anchors.shape) == 3, \"anchors should be in shape (M, L, C)\"\n assert len(features.shape) == 4, \"features should be in shape (B, H, W, C)\"\n\n if self.config.anchor_config.anchor_type == \"self-attention\":\n query, key, value = self._map_inputs(anchors, anchors, anchors)\n anchors, _ = self.attention(query, key, value)\n anchors = self._map_outputs(anchors)\n elif self.config.anchor_config.anchor_type == \"cross-attention\":\n seq_features, seq_labels, seq_anchors, cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k, map_flag = self._map_features(features, anchors, instances_in_view, True)\n if map_flag:\n seq_features, seq_anchors, seq_anchors = self._map_inputs(seq_features, seq_anchors, seq_anchors)\n seq_features, _ = self.attention(\n seq_features,\n seq_anchors,\n seq_anchors\n )\n seq_features = self._map_outputs(seq_features)\n seq_features = self._unmap_features(seq_features, seq_labels, instances_in_view)\n anchors = seq_features\n else:\n anchors = features\n elif self.config.anchor_config.anchor_type == \"flash-attention\":\n seq_features, seq_labels, seq_anchors, cu_seqlens, max_seqlen, cu_seqlens_k, max_seqlen_k, map_flag = self._map_features(features, anchors, instances_in_view)\n if map_flag:\n seq_features, seq_anchors, seq_anchors = self._map_inputs(seq_features, seq_anchors, seq_anchors)\n seq_query, seq_key_value = self._prepare_flash_attention_inputs(seq_features, seq_anchors, seq_anchors)\n seq_features = self.attention(\n seq_query.half(), # (Sq, H, C)\n seq_key_value.half(), # (Sk, 2, H_k, C)\n cu_seqlens=cu_seqlens, max_seqlen=max_seqlen,\n cu_seqlens_k=cu_seqlens_k, max_seqlen_k=max_seqlen_k\n ).to(torch.float32) # (Sq, H, C)\n seq_features = self._map_outputs(seq_features.reshape(seq_features.shape[0], -1)) # (Sq, C)\n seq_features = self._unmap_features(seq_features, seq_labels, instances_in_view)\n anchors = seq_features\n else:\n anchors = features\n else:\n anchors = anchors.mean(1)\n\n # output\n features = self._apply_outputs(features, anchors, instances_in_view)\n\n return features"
}
] | import os
import json
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import torchvision
import numpy as np
import sys
from omegaconf import OmegaConf
from pytorch3d.ops import interpolate_face_attributes
from pytorch3d.renderer import look_at_view_transform
from lib.camera_helper import init_trajectory, init_blenderproc_trajectory, init_camera_R_T
from lib.render_helper import init_renderer
from lib.shading_helper import init_flat_texel_shader
from lib.projection_helper import get_visible_pixel_uvs, get_all_4_locations
from models.modules.modules import MLP, Siren, HashGrid, HashGridMLP
from models.modules.anchors import AnchorTransformer | 7,320 | # spherical cameras
self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)
dist_linspace = np.linspace(
self.sphere_cameras.dist.min,
self.sphere_cameras.dist.max,
1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,
)
elev_linspace = np.linspace(
self.sphere_cameras.elev.min,
self.sphere_cameras.elev.max,
1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,
)
azim_linspace = np.linspace(
self.sphere_cameras.azim.min,
self.sphere_cameras.azim.max,
1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,
)
fov_linspace = np.linspace(
self.sphere_cameras.fov.min,
self.sphere_cameras.fov.max,
1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,
)
at = np.array(self.sphere_cameras.at)
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
dist_list = combinations[:, 0].tolist()
elev_list = combinations[:, 1].tolist()
azim_list = combinations[:, 2].tolist()
sphere_Rs, sphere_Ts = init_trajectory(dist_list, elev_list, azim_list, at)
sphere_fov_list = combinations[:, 3].tolist()
# blenderproc cameras
poses = json.load(open(self.config.blenderproc_cameras))
blenderproc_Rs, blenderproc_Ts = init_blenderproc_trajectory(poses, self.device)
blenderproc_fov_list = [self.config.fov] * len(blenderproc_Rs)
self.Rs = sphere_Rs + blenderproc_Rs
self.Ts = sphere_Ts + blenderproc_Ts
self.fov_list = sphere_fov_list + blenderproc_fov_list
self.num_cameras = len(self.Rs)
print("=> using {} spherical cameras and {} blenderproc cameras for training".format(len(sphere_Rs), len(blenderproc_Rs)))
# self.sphere_Rs = sphere_Rs
# self.sphere_Ts = sphere_Ts
# self.sphere_fov_list = sphere_fov_list
# self.num_sphere_cameras = len(self.sphere_Rs)
# self.Rs = sphere_Rs + blenderproc_Rs
# self.Ts = sphere_Ts + blenderproc_Ts
# self.fov_list = sphere_fov_list + blenderproc_fov_list
# self.num_cameras = len(self.Rs)
# print("=> using {} spherical cameras and {} blenderproc cameras for training".format(len(sphere_Rs), len(blenderproc_Rs)))
# print("=> using {} cameras before annealing and {} cameras afterwards".format(self.num_sphere_cameras, self.num_cameras))
else: # use fixed cameras
raise NotImplementedError
# for inference
# FIXME only support spherical cameras for now
# spherical cameras
self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)
dist_linspace = [self.sphere_cameras.dist.min] # always take the min dist from spherical cameras
elev_linspace = [self.config.elev]
azim_linspace = np.linspace(
self.config.azim[0],
self.config.azim[1],
self.config.log_latents_views,
)
fov_linspace = [self.config.fov]
at = np.array(self.sphere_cameras.at) # always take the cameras center from spherical cameras
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
self.inference_dist_list = combinations[:, 0].tolist()
self.inference_elev_list = combinations[:, 1].tolist()
self.inference_azim_list = combinations[:, 2].tolist()
self.inference_fov_list = combinations[:, 3].tolist()
self.inference_at = at
self.num_inference_cameras = len(self.inference_dist_list)
print("=> using {} cameras for training, {} cameras for inference.".format(self.num_cameras, self.num_inference_cameras))
def _init_render_func(self):
if self.config.render_func_type == "mlp":
if self.config.texture_type == "hashgrid":
in_channels = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level
elif self.config.texture_type == "hashgrid_mlp":
in_channels = self.config.mlp_config.out_channels
else:
in_channels = self.config.latent_channels
render_func = MLP(
in_channels,
self.config.render_channels,
self.config.view_embedding_hidden_dim,
self.config.num_view_embedding_layers,
dtype=torch.float32
).to(self.device)
elif self.config.render_func_type == "none":
render_func = nn.Identity()
else:
raise NotImplementedError("not supported render function type: {}".format(self.config.render_func_type))
return render_func
def init_anchor_func(self, num_instances):
if self.config.texture_type == "hashgrid":
anchor_dim = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level
elif self.config.texture_type == "hashgrid_mlp":
anchor_dim = self.config.mlp_config.out_channels
else:
anchor_dim = self.config.latent_channels
|
# import tinycudann as tcnn
# customized
sys.path.append("./lib")
sys.path.append("./models")
class Studio(nn.Module):
def __init__(self,
config,
device
):
super().__init__()
self.config = config
self.device = device
# render function
self.render_func = self._init_render_func()
self._init_camera_settings()
def _init_camera_settings(self):
if self.config.use_sphere_cameras and not self.config.use_blenderproc_cameras: # use random cameras
self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)
dist_linspace = np.linspace(
self.sphere_cameras.dist.min,
self.sphere_cameras.dist.max,
1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,
)
elev_linspace = np.linspace(
self.sphere_cameras.elev.min,
self.sphere_cameras.elev.max,
1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,
)
azim_linspace = np.linspace(
self.sphere_cameras.azim.min,
self.sphere_cameras.azim.max,
1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,
)
fov_linspace = np.linspace(
self.sphere_cameras.fov.min,
self.sphere_cameras.fov.max,
1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,
)
at = np.array(self.sphere_cameras.at)
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
dist_list = combinations[:, 0].tolist()
elev_list = combinations[:, 1].tolist()
azim_list = combinations[:, 2].tolist()
self.Rs, self.Ts = init_trajectory(dist_list, elev_list, azim_list, at)
self.fov_list = combinations[:, 3].tolist()
self.num_cameras = len(self.Rs)
print("=> using {} spherical cameras for training".format(self.num_cameras))
elif not self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:
poses = json.load(open(self.config.blenderproc_cameras))
self.Rs, self.Ts = init_blenderproc_trajectory(poses, self.device)
self.num_cameras = len(self.Rs)
self.fov_list = [self.config.fov] * self.num_cameras
print("=> using {} blenderproc cameras for training".format(self.num_cameras))
elif self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:
# spherical cameras
self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)
dist_linspace = np.linspace(
self.sphere_cameras.dist.min,
self.sphere_cameras.dist.max,
1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,
)
elev_linspace = np.linspace(
self.sphere_cameras.elev.min,
self.sphere_cameras.elev.max,
1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,
)
azim_linspace = np.linspace(
self.sphere_cameras.azim.min,
self.sphere_cameras.azim.max,
1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,
)
fov_linspace = np.linspace(
self.sphere_cameras.fov.min,
self.sphere_cameras.fov.max,
1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,
)
at = np.array(self.sphere_cameras.at)
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
dist_list = combinations[:, 0].tolist()
elev_list = combinations[:, 1].tolist()
azim_list = combinations[:, 2].tolist()
sphere_Rs, sphere_Ts = init_trajectory(dist_list, elev_list, azim_list, at)
sphere_fov_list = combinations[:, 3].tolist()
# blenderproc cameras
poses = json.load(open(self.config.blenderproc_cameras))
blenderproc_Rs, blenderproc_Ts = init_blenderproc_trajectory(poses, self.device)
blenderproc_fov_list = [self.config.fov] * len(blenderproc_Rs)
self.Rs = sphere_Rs + blenderproc_Rs
self.Ts = sphere_Ts + blenderproc_Ts
self.fov_list = sphere_fov_list + blenderproc_fov_list
self.num_cameras = len(self.Rs)
print("=> using {} spherical cameras and {} blenderproc cameras for training".format(len(sphere_Rs), len(blenderproc_Rs)))
# self.sphere_Rs = sphere_Rs
# self.sphere_Ts = sphere_Ts
# self.sphere_fov_list = sphere_fov_list
# self.num_sphere_cameras = len(self.sphere_Rs)
# self.Rs = sphere_Rs + blenderproc_Rs
# self.Ts = sphere_Ts + blenderproc_Ts
# self.fov_list = sphere_fov_list + blenderproc_fov_list
# self.num_cameras = len(self.Rs)
# print("=> using {} spherical cameras and {} blenderproc cameras for training".format(len(sphere_Rs), len(blenderproc_Rs)))
# print("=> using {} cameras before annealing and {} cameras afterwards".format(self.num_sphere_cameras, self.num_cameras))
else: # use fixed cameras
raise NotImplementedError
# for inference
# FIXME only support spherical cameras for now
# spherical cameras
self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)
dist_linspace = [self.sphere_cameras.dist.min] # always take the min dist from spherical cameras
elev_linspace = [self.config.elev]
azim_linspace = np.linspace(
self.config.azim[0],
self.config.azim[1],
self.config.log_latents_views,
)
fov_linspace = [self.config.fov]
at = np.array(self.sphere_cameras.at) # always take the cameras center from spherical cameras
combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)
self.inference_dist_list = combinations[:, 0].tolist()
self.inference_elev_list = combinations[:, 1].tolist()
self.inference_azim_list = combinations[:, 2].tolist()
self.inference_fov_list = combinations[:, 3].tolist()
self.inference_at = at
self.num_inference_cameras = len(self.inference_dist_list)
print("=> using {} cameras for training, {} cameras for inference.".format(self.num_cameras, self.num_inference_cameras))
def _init_render_func(self):
if self.config.render_func_type == "mlp":
if self.config.texture_type == "hashgrid":
in_channels = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level
elif self.config.texture_type == "hashgrid_mlp":
in_channels = self.config.mlp_config.out_channels
else:
in_channels = self.config.latent_channels
render_func = MLP(
in_channels,
self.config.render_channels,
self.config.view_embedding_hidden_dim,
self.config.num_view_embedding_layers,
dtype=torch.float32
).to(self.device)
elif self.config.render_func_type == "none":
render_func = nn.Identity()
else:
raise NotImplementedError("not supported render function type: {}".format(self.config.render_func_type))
return render_func
def init_anchor_func(self, num_instances):
if self.config.texture_type == "hashgrid":
anchor_dim = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level
elif self.config.texture_type == "hashgrid_mlp":
anchor_dim = self.config.mlp_config.out_channels
else:
anchor_dim = self.config.latent_channels
| anchor_func = AnchorTransformer(self.config, self.device, anchor_dim=anchor_dim, num_instances=num_instances).to(self.device) | 11 | 2023-11-28 15:38:40+00:00 | 12k |
Vchitect/VBench | vbench/third_party/umt/datasets/ssv2.py | [
{
"identifier": "RandomErasing",
"path": "vbench/third_party/umt/datasets/random_erasing.py",
"snippet": "class RandomErasing:\n \"\"\"Randomly selects a rectangle region in an image and erases its pixels.\n 'Random Erasing Data Augmentation' by Zhong et al.\n See https://arxiv.org/pdf/1708.04896.pdf\n This variant of RandomErasing is intended to be applied to either a batch\n or single image tensor after it has been normalized by dataset mean and std.\n Args:\n probability: Probability that the Random Erasing operation will be performed.\n min_area: Minimum percentage of erased area wrt input image area.\n max_area: Maximum percentage of erased area wrt input image area.\n min_aspect: Minimum aspect ratio of erased area.\n mode: pixel color mode, one of 'const', 'rand', or 'pixel'\n 'const' - erase block is constant color of 0 for all channels\n 'rand' - erase block is same per-channel random (normal) color\n 'pixel' - erase block is per-pixel random (normal) color\n max_count: maximum number of erasing blocks per image, area per box is scaled by count.\n per-image count is randomly chosen between 1 and this value.\n \"\"\"\n\n def __init__(\n self,\n probability=0.5,\n min_area=0.02,\n max_area=1 / 3,\n min_aspect=0.3,\n max_aspect=None,\n mode=\"const\",\n min_count=1,\n max_count=None,\n num_splits=0,\n device=\"cuda\",\n cube=True,\n ):\n self.probability = probability\n self.min_area = min_area\n self.max_area = max_area\n max_aspect = max_aspect or 1 / min_aspect\n self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))\n self.min_count = min_count\n self.max_count = max_count or min_count\n self.num_splits = num_splits\n mode = mode.lower()\n self.rand_color = False\n self.per_pixel = False\n self.cube = cube\n if mode == \"rand\":\n self.rand_color = True # per block random normal\n elif mode == \"pixel\":\n self.per_pixel = True # per pixel random normal\n else:\n assert not mode or mode == \"const\"\n self.device = device\n\n def _erase(self, img, chan, img_h, img_w, dtype):\n if random.random() > self.probability:\n return\n area = img_h * img_w\n count = (\n self.min_count\n if self.min_count == self.max_count\n else random.randint(self.min_count, self.max_count)\n )\n for _ in range(count):\n for _ in range(10):\n target_area = (\n random.uniform(self.min_area, self.max_area) * area / count\n )\n aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if w < img_w and h < img_h:\n top = random.randint(0, img_h - h)\n left = random.randint(0, img_w - w)\n img[:, top : top + h, left : left + w] = _get_pixels(\n self.per_pixel,\n self.rand_color,\n (chan, h, w),\n dtype=dtype,\n device=self.device,\n )\n break\n\n def _erase_cube(\n self,\n img,\n batch_start,\n batch_size,\n chan,\n img_h,\n img_w,\n dtype,\n ):\n if random.random() > self.probability:\n return\n area = img_h * img_w\n count = (\n self.min_count\n if self.min_count == self.max_count\n else random.randint(self.min_count, self.max_count)\n )\n for _ in range(count):\n for _ in range(100):\n target_area = (\n random.uniform(self.min_area, self.max_area) * area / count\n )\n aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if w < img_w and h < img_h:\n top = random.randint(0, img_h - h)\n left = random.randint(0, img_w - w)\n for i in range(batch_start, batch_size):\n img_instance = img[i]\n img_instance[\n :, top : top + h, left : left + w\n ] = _get_pixels(\n self.per_pixel,\n self.rand_color,\n (chan, h, w),\n dtype=dtype,\n device=self.device,\n )\n break\n\n def __call__(self, input):\n if len(input.size()) == 3:\n self._erase(input, *input.size(), input.dtype)\n else:\n batch_size, chan, img_h, img_w = input.size()\n # skip first slice of batch if num_splits is set (for clean portion of samples)\n batch_start = (\n batch_size // self.num_splits if self.num_splits > 1 else 0\n )\n if self.cube:\n self._erase_cube(\n input,\n batch_start,\n batch_size,\n chan,\n img_h,\n img_w,\n input.dtype,\n )\n else:\n for i in range(batch_start, batch_size):\n self._erase(input[i], chan, img_h, img_w, input.dtype)\n return input"
},
{
"identifier": "Compose",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "class Compose(object):\n \"\"\"Composes several transforms\n Args:\n transforms (list of ``Transform`` objects): list of transforms\n to compose\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, clip):\n for t in self.transforms:\n clip = t(clip)\n return clip"
},
{
"identifier": "Resize",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "class Resize(object):\n \"\"\"Resizes a list of (H x W x C) numpy.ndarray to the final size\n The larger the original image is, the more times it takes to\n interpolate\n Args:\n interpolation (str): Can be one of 'nearest', 'bilinear'\n defaults to nearest\n size (tuple): (widht, height)\n \"\"\"\n\n def __init__(self, size, interpolation='nearest'):\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, clip):\n resized = FF.resize_clip(\n clip, self.size, interpolation=self.interpolation)\n return resized"
},
{
"identifier": "CenterCrop",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "class CenterCrop(object):\n \"\"\"Extract center crop at the same location for a list of images\n Args:\n size (sequence or int): Desired output size for the\n crop in format (h, w)\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n size = (size, size)\n\n self.size = size\n\n def __call__(self, clip):\n \"\"\"\n Args:\n img (PIL.Image or numpy.ndarray): List of images to be cropped\n in format (h, w, c) in numpy.ndarray\n Returns:\n PIL.Image or numpy.ndarray: Cropped list of images\n \"\"\"\n h, w = self.size\n if isinstance(clip[0], np.ndarray):\n im_h, im_w, im_c = clip[0].shape\n elif isinstance(clip[0], PIL.Image.Image):\n im_w, im_h = clip[0].size\n else:\n raise TypeError('Expected numpy.ndarray or PIL.Image' +\n 'but got list of {0}'.format(type(clip[0])))\n if w > im_w or h > im_h:\n error_msg = (\n 'Initial image size should be larger then '\n 'cropped size but got cropped sizes : ({w}, {h}) while '\n 'initial image is ({im_w}, {im_h})'.format(\n im_w=im_w, im_h=im_h, w=w, h=h))\n raise ValueError(error_msg)\n\n x1 = int(round((im_w - w) / 2.))\n y1 = int(round((im_h - h) / 2.))\n cropped = FF.crop_clip(clip, y1, x1, h, w)\n\n return cropped"
},
{
"identifier": "Normalize",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "class Normalize(object):\n \"\"\"Normalize a clip with mean and standard deviation.\n Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform\n will normalize each channel of the input ``torch.*Tensor`` i.e.\n ``input[channel] = (input[channel] - mean[channel]) / std[channel]``\n .. note::\n This transform acts out of place, i.e., it does not mutates the input tensor.\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, clip):\n \"\"\"\n Args:\n clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized.\n Returns:\n Tensor: Normalized Tensor clip.\n \"\"\"\n return FF.normalize(clip, self.mean, self.std)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)"
},
{
"identifier": "create_random_augment",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def create_random_augment(\n input_size,\n auto_augment=None,\n interpolation=\"bilinear\",\n):\n \"\"\"\n Get video randaug transform.\n\n Args:\n input_size: The size of the input video in tuple.\n auto_augment: Parameters for randaug. An example:\n \"rand-m7-n4-mstd0.5-inc1\" (m is the magnitude and n is the number\n of operations to apply).\n interpolation: Interpolation method.\n \"\"\"\n if isinstance(input_size, tuple):\n img_size = input_size[-2:]\n else:\n img_size = input_size\n\n if auto_augment:\n assert isinstance(auto_augment, str)\n if isinstance(img_size, tuple):\n img_size_min = min(img_size)\n else:\n img_size_min = img_size\n aa_params = {\"translate_const\": int(img_size_min * 0.45)}\n if interpolation and interpolation != \"random\":\n aa_params[\"interpolation\"] = _pil_interp(interpolation)\n if auto_augment.startswith(\"rand\"):\n return transforms.Compose(\n [rand_augment_transform(auto_augment, aa_params)]\n )\n raise NotImplementedError"
},
{
"identifier": "random_short_side_scale_jitter",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def random_short_side_scale_jitter(\n images, min_size, max_size, boxes=None, inverse_uniform_sampling=False\n):\n \"\"\"\n Perform a spatial short scale jittering on the given images and\n corresponding boxes.\n Args:\n images (tensor): images to perform scale jitter. Dimension is\n `num frames` x `channel` x `height` x `width`.\n min_size (int): the minimal size to scale the frames.\n max_size (int): the maximal size to scale the frames.\n boxes (ndarray): optional. Corresponding boxes to images.\n Dimension is `num boxes` x 4.\n inverse_uniform_sampling (bool): if True, sample uniformly in\n [1 / max_scale, 1 / min_scale] and take a reciprocal to get the\n scale. If False, take a uniform sample from [min_scale, max_scale].\n Returns:\n (tensor): the scaled images with dimension of\n `num frames` x `channel` x `new height` x `new width`.\n (ndarray or None): the scaled boxes with dimension of\n `num boxes` x 4.\n \"\"\"\n if inverse_uniform_sampling:\n size = int(\n round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))\n )\n else:\n size = int(round(np.random.uniform(min_size, max_size)))\n\n height = images.shape[2]\n width = images.shape[3]\n if (width <= height and width == size) or (\n height <= width and height == size\n ):\n return images, boxes\n new_width = size\n new_height = size\n if width < height:\n new_height = int(math.floor((float(height) / width) * size))\n if boxes is not None:\n boxes = boxes * float(new_height) / height\n else:\n new_width = int(math.floor((float(width) / height) * size))\n if boxes is not None:\n boxes = boxes * float(new_width) / width\n\n return (\n torch.nn.functional.interpolate(\n images,\n size=(new_height, new_width),\n mode=\"bilinear\",\n align_corners=False,\n ),\n boxes,\n )"
},
{
"identifier": "random_crop",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def random_crop(images, size, boxes=None):\n \"\"\"\n Perform random spatial crop on the given images and corresponding boxes.\n Args:\n images (tensor): images to perform random crop. The dimension is\n `num frames` x `channel` x `height` x `width`.\n size (int): the size of height and width to crop on the image.\n boxes (ndarray or None): optional. Corresponding boxes to images.\n Dimension is `num boxes` x 4.\n Returns:\n cropped (tensor): cropped images with dimension of\n `num frames` x `channel` x `size` x `size`.\n cropped_boxes (ndarray or None): the cropped boxes with dimension of\n `num boxes` x 4.\n \"\"\"\n if images.shape[2] == size and images.shape[3] == size:\n return images\n height = images.shape[2]\n width = images.shape[3]\n y_offset = 0\n if height > size:\n y_offset = int(np.random.randint(0, height - size))\n x_offset = 0\n if width > size:\n x_offset = int(np.random.randint(0, width - size))\n cropped = images[\n :, :, y_offset : y_offset + size, x_offset : x_offset + size\n ]\n\n cropped_boxes = (\n crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None\n )\n\n return cropped, cropped_boxes"
},
{
"identifier": "random_resized_crop_with_shift",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def random_resized_crop_with_shift(\n images,\n target_height,\n target_width,\n scale=(0.8, 1.0),\n ratio=(3.0 / 4.0, 4.0 / 3.0),\n):\n \"\"\"\n This is similar to random_resized_crop. However, it samples two different\n boxes (for cropping) for the first and last frame. It then linearly\n interpolates the two boxes for other frames.\n\n Args:\n images: Images to perform resizing and cropping.\n target_height: Desired height after cropping.\n target_width: Desired width after cropping.\n scale: Scale range of Inception-style area based random resizing.\n ratio: Aspect ratio range of Inception-style area based random resizing.\n \"\"\"\n t = images.shape[1]\n height = images.shape[2]\n width = images.shape[3]\n\n i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)\n i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width)\n i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()]\n j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()]\n h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()]\n w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()]\n out = torch.zeros((3, t, target_height, target_width))\n for ind in range(t):\n out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate(\n images[\n :,\n ind : ind + 1,\n i_s[ind] : i_s[ind] + h_s[ind],\n j_s[ind] : j_s[ind] + w_s[ind],\n ],\n size=(target_height, target_width),\n mode=\"bilinear\",\n align_corners=False,\n )\n return out"
},
{
"identifier": "random_resized_crop",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def random_resized_crop(\n images,\n target_height,\n target_width,\n scale=(0.8, 1.0),\n ratio=(3.0 / 4.0, 4.0 / 3.0),\n):\n \"\"\"\n Crop the given images to random size and aspect ratio. A crop of random\n size (default: of 0.08 to 1.0) of the original size and a random aspect\n ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This\n crop is finally resized to given size. This is popularly used to train the\n Inception networks.\n\n Args:\n images: Images to perform resizing and cropping.\n target_height: Desired height after cropping.\n target_width: Desired width after cropping.\n scale: Scale range of Inception-style area based random resizing.\n ratio: Aspect ratio range of Inception-style area based random resizing.\n \"\"\"\n\n height = images.shape[2]\n width = images.shape[3]\n\n i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width)\n cropped = images[:, :, i : i + h, j : j + w]\n return torch.nn.functional.interpolate(\n cropped,\n size=(target_height, target_width),\n mode=\"bilinear\",\n align_corners=False,\n )"
},
{
"identifier": "horizontal_flip",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def horizontal_flip(prob, images, boxes=None):\n \"\"\"\n Perform horizontal flip on the given images and corresponding boxes.\n Args:\n prob (float): probility to flip the images.\n images (tensor): images to perform horizontal flip, the dimension is\n `num frames` x `channel` x `height` x `width`.\n boxes (ndarray or None): optional. Corresponding boxes to images.\n Dimension is `num boxes` x 4.\n Returns:\n images (tensor): images with dimension of\n `num frames` x `channel` x `height` x `width`.\n flipped_boxes (ndarray or None): the flipped boxes with dimension of\n `num boxes` x 4.\n \"\"\"\n if boxes is None:\n flipped_boxes = None\n else:\n flipped_boxes = boxes.copy()\n\n if np.random.uniform() < prob:\n images = images.flip((-1))\n\n if len(images.shape) == 3:\n width = images.shape[2]\n elif len(images.shape) == 4:\n width = images.shape[3]\n else:\n raise NotImplementedError(\"Dimension does not supported\")\n if boxes is not None:\n flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1\n\n return images, flipped_boxes"
},
{
"identifier": "random_short_side_scale_jitter",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def random_short_side_scale_jitter(\n images, min_size, max_size, boxes=None, inverse_uniform_sampling=False\n):\n \"\"\"\n Perform a spatial short scale jittering on the given images and\n corresponding boxes.\n Args:\n images (tensor): images to perform scale jitter. Dimension is\n `num frames` x `channel` x `height` x `width`.\n min_size (int): the minimal size to scale the frames.\n max_size (int): the maximal size to scale the frames.\n boxes (ndarray): optional. Corresponding boxes to images.\n Dimension is `num boxes` x 4.\n inverse_uniform_sampling (bool): if True, sample uniformly in\n [1 / max_scale, 1 / min_scale] and take a reciprocal to get the\n scale. If False, take a uniform sample from [min_scale, max_scale].\n Returns:\n (tensor): the scaled images with dimension of\n `num frames` x `channel` x `new height` x `new width`.\n (ndarray or None): the scaled boxes with dimension of\n `num boxes` x 4.\n \"\"\"\n if inverse_uniform_sampling:\n size = int(\n round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size))\n )\n else:\n size = int(round(np.random.uniform(min_size, max_size)))\n\n height = images.shape[2]\n width = images.shape[3]\n if (width <= height and width == size) or (\n height <= width and height == size\n ):\n return images, boxes\n new_width = size\n new_height = size\n if width < height:\n new_height = int(math.floor((float(height) / width) * size))\n if boxes is not None:\n boxes = boxes * float(new_height) / height\n else:\n new_width = int(math.floor((float(width) / height) * size))\n if boxes is not None:\n boxes = boxes * float(new_width) / width\n\n return (\n torch.nn.functional.interpolate(\n images,\n size=(new_height, new_width),\n mode=\"bilinear\",\n align_corners=False,\n ),\n boxes,\n )"
},
{
"identifier": "uniform_crop",
"path": "vbench/third_party/umt/datasets/video_transforms.py",
"snippet": "def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):\n \"\"\"\n Perform uniform spatial sampling on the images and corresponding boxes.\n Args:\n images (tensor): images to perform uniform crop. The dimension is\n `num frames` x `channel` x `height` x `width`.\n size (int): size of height and weight to crop the images.\n spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width\n is larger than height. Or 0, 1, or 2 for top, center, and bottom\n crop if height is larger than width.\n boxes (ndarray or None): optional. Corresponding boxes to images.\n Dimension is `num boxes` x 4.\n scale_size (int): optinal. If not None, resize the images to scale_size before\n performing any crop.\n Returns:\n cropped (tensor): images with dimension of\n `num frames` x `channel` x `size` x `size`.\n cropped_boxes (ndarray or None): the cropped boxes with dimension of\n `num boxes` x 4.\n \"\"\"\n assert spatial_idx in [0, 1, 2]\n ndim = len(images.shape)\n if ndim == 3:\n images = images.unsqueeze(0)\n height = images.shape[2]\n width = images.shape[3]\n\n if scale_size is not None:\n if width <= height:\n width, height = scale_size, int(height / width * scale_size)\n else:\n width, height = int(width / height * scale_size), scale_size\n images = torch.nn.functional.interpolate(\n images,\n size=(height, width),\n mode=\"bilinear\",\n align_corners=False,\n )\n\n y_offset = int(math.ceil((height - size) / 2))\n x_offset = int(math.ceil((width - size) / 2))\n\n if height > width:\n if spatial_idx == 0:\n y_offset = 0\n elif spatial_idx == 2:\n y_offset = height - size\n else:\n if spatial_idx == 0:\n x_offset = 0\n elif spatial_idx == 2:\n x_offset = width - size\n cropped = images[\n :, :, y_offset : y_offset + size, x_offset : x_offset + size\n ]\n cropped_boxes = (\n crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None\n )\n if ndim == 3:\n cropped = cropped.squeeze(0)\n return cropped, cropped_boxes"
},
{
"identifier": "ClipToTensor",
"path": "vbench/third_party/umt/datasets/volume_transforms.py",
"snippet": "class ClipToTensor(object):\n \"\"\"Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]\n to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]\n \"\"\"\n\n def __init__(self, channel_nb=3, div_255=True, numpy=False):\n self.channel_nb = channel_nb\n self.div_255 = div_255\n self.numpy = numpy\n\n def __call__(self, clip):\n \"\"\"\n Args: clip (list of numpy.ndarray): clip (list of images)\n to be converted to tensor.\n \"\"\"\n # Retrieve shape\n if isinstance(clip[0], np.ndarray):\n h, w, ch = clip[0].shape\n assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(\n ch)\n elif isinstance(clip[0], Image.Image):\n w, h = clip[0].size\n else:\n raise TypeError('Expected numpy.ndarray or PIL.Image\\\n but got list of {0}'.format(type(clip[0])))\n\n np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])\n\n # Convert\n for img_idx, img in enumerate(clip):\n if isinstance(img, np.ndarray):\n pass\n elif isinstance(img, Image.Image):\n img = np.array(img, copy=False)\n else:\n raise TypeError('Expected numpy.ndarray or PIL.Image\\\n but got list of {0}'.format(type(clip[0])))\n img = convert_img(img)\n np_clip[:, img_idx, :, :] = img\n if self.numpy:\n if self.div_255:\n np_clip = np_clip / 255.0\n return np_clip\n\n else:\n tensor_clip = torch.from_numpy(np_clip)\n\n if not isinstance(tensor_clip, torch.FloatTensor):\n tensor_clip = tensor_clip.float()\n if self.div_255:\n tensor_clip = torch.div(tensor_clip, 255)\n return tensor_clip"
}
] | import os
import io
import cv2
import numpy as np
import torch
import warnings
import pandas as pd
import pandas as pd
from torchvision import transforms
from decord import VideoReader, cpu
from torch.utils.data import Dataset
from .random_erasing import RandomErasing
from .video_transforms import (
Compose, Resize, CenterCrop, Normalize,
create_random_augment, random_short_side_scale_jitter,
random_crop, random_resized_crop_with_shift, random_resized_crop,
horizontal_flip, random_short_side_scale_jitter, uniform_crop,
)
from .volume_transforms import ClipToTensor
from petrel_client.client import Client | 9,220 | args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3)
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3)
return buffer
def loadvideo_decord(self, sample, sample_rate_scale=1):
"""Load video content using Decord"""
fname = sample
fname = os.path.join(self.prefix, fname)
try:
if self.keep_aspect_ratio:
if fname.startswith('s3'):
video_bytes = self.client.get(fname)
vr = VideoReader(io.BytesIO(video_bytes),
num_threads=1,
ctx=cpu(0))
else:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
if fname.startswith('s3:'):
video_bytes = self.client.get(fname)
vr = VideoReader(io.BytesIO(video_bytes),
width=self.new_width,
height=self.new_height,
num_threads=1,
ctx=cpu(0))
else:
vr = VideoReader(fname, width=self.new_width, height=self.new_height,
num_threads=1, ctx=cpu(0))
except:
print("video cannot be loaded by decord: ", fname)
return []
if self.mode == 'test':
tick = len(vr) / float(self.num_segment)
all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +
[int(tick * x) for x in range(self.num_segment)]))
while len(all_index) < (self.num_segment * self.test_num_segment):
all_index.append(all_index[-1])
all_index = np.sort(np.array(all_index))
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
elif self.mode == 'validation':
tick = len(vr) / float(self.num_segment)
all_index = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)])
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
average_duration = len(vr) // self.num_segment
if average_duration > 0:
all_index = list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,
size=self.num_segment))
elif len(vr) > self.num_segment:
all_index = list(np.sort(np.random.randint(len(vr), size=self.num_segment)))
else:
all_index = list(np.zeros((self.num_segment,)))
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None:
|
try:
has_client = True
except ImportError:
has_client = False
class SSRawFrameClsDataset(Dataset):
"""Load your own raw frame classification dataset."""
def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,
crop_size=224, short_side_size=256, new_height=256, new_width=340,
keep_aspect_ratio=True, num_segment=1, num_crop=1, test_num_segment=10,
test_num_crop=3, filename_tmpl='img_{:05}.jpg', args=None):
self.anno_path = anno_path
self.prefix = prefix
self.split = split
self.mode = mode
self.clip_len = clip_len
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.filename_tmpl = filename_tmpl
self.args = args
self.aug = False
self.rand_erase = False
self.client = None
if has_client:
self.client = Client('~/petreloss.conf')
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError(
"Unable to import `decord` which is required to read videos.")
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)
self.dataset_samples = list(cleaned.values[:, 0])
self.total_frames = list(cleaned.values[:, 1])
self.label_array = list(cleaned.values[:, -1])
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = Compose([
Resize(self.short_side_size,
interpolation='bilinear'),
CenterCrop(size=(self.crop_size,
self.crop_size)),
ClipToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = Compose([
Resize(size=(short_side_size),
interpolation='bilinear')
])
self.data_transform = Compose([
ClipToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_total_frames = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
self.test_seg.append((ck, cp))
self.test_dataset.append(self.dataset_samples[idx])
self.test_total_frames.append(self.total_frames[idx])
self.test_label_array.append(self.label_array[idx])
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample,
total_frame,
sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during training".format(
sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample,
total_frame,
sample_rate_scale=scale_t)
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
total_frame = self.total_frames[index]
buffer = self.load_frame(sample, total_frame)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn(
"video {} not correctly loaded during validation".
format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.load_frame(sample, total_frame)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split(
"/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
total_frame = self.test_total_frames[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.load_frame(sample, total_frame)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
total_frame = self.test_total_frames[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.load_frame(sample, total_frame)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_start = chunk_nb
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start::self.test_num_segment, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start::self.test_num_segment, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [transforms.ToPILImage()(frame) for frame in buffer]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=self.crop_size,
random_horizontal_flip=False if args.data_set == 'SSV2' else True,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3)
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3)
return buffer
def load_frame(self, sample, num_frames, sample_rate_scale=1):
"""Load video content using Decord"""
fname = sample
fname = os.path.join(self.prefix, fname)
if self.mode == 'test':
tick = num_frames / float(self.num_segment)
all_index = []
for t_seg in range(self.test_num_segment):
tmp_index = [
int(t_seg * tick / self.test_num_segment + tick * x)
for x in range(self.num_segment)
]
all_index.extend(tmp_index)
all_index = list(np.sort(np.array(all_index)))
imgs = []
for idx in all_index:
frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1))
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
imgs.append(img)
buffer = np.array(imgs)
return buffer
# handle temporal segments
average_duration = num_frames // self.num_segment
all_index = []
if average_duration > 0:
if self.mode == 'validation':
all_index = list(
np.multiply(list(range(self.num_segment)),
average_duration) +
np.ones(self.num_segment, dtype=int) *
(average_duration // 2))
else:
all_index = list(
np.multiply(list(range(self.num_segment)),
average_duration) +
np.random.randint(average_duration, size=self.num_segment))
elif num_frames > self.num_segment:
if self.mode == 'validation':
all_index = list(range(self.num_segment))
else:
all_index = list(
np.sort(
np.random.randint(num_frames, size=self.num_segment)))
else:
all_index = [0] * (self.num_segment - num_frames) + list(
range(num_frames))
all_index = list(np.array(all_index))
imgs = []
for idx in all_index:
frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1))
img_bytes = self.client.get(frame_fname)
img_np = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
imgs.append(img)
buffer = np.array(imgs)
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
class SSVideoClsDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,
crop_size=224, short_side_size=256, new_height=256,
new_width=340, keep_aspect_ratio=True, num_segment=1,
num_crop=1, test_num_segment=10, test_num_crop=3, args=None):
self.anno_path = anno_path
self.prefix = prefix
self.split = split
self.mode = mode
self.clip_len = clip_len
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.args = args
self.aug = False
self.rand_erase = False
self.client = None
if has_client:
self.client = Client('~/petreloss.conf')
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError("Unable to import `decord` which is required to read videos.")
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)
self.dataset_samples = list(cleaned.values[:, 0])
self.label_array = list(cleaned.values[:, 1])
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = Compose([
Resize(self.short_side_size, interpolation='bilinear'),
CenterCrop(size=(self.crop_size, self.crop_size)),
ClipToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = Compose([
Resize(size=(short_side_size), interpolation='bilinear')
])
self.data_transform = Compose([
ClipToTensor(),
Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
sample_label = self.label_array[idx]
self.test_label_array.append(sample_label)
self.test_dataset.append(self.dataset_samples[idx])
self.test_seg.append((ck, cp))
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during training".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during validation".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_start = chunk_nb # 0/1
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start::2, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start::2, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [
transforms.ToPILImage()(frame) for frame in buffer
]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(
buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=self.crop_size,
random_horizontal_flip=False if args.data_set == 'SSV2' else True,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False
)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3)
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3)
return buffer
def loadvideo_decord(self, sample, sample_rate_scale=1):
"""Load video content using Decord"""
fname = sample
fname = os.path.join(self.prefix, fname)
try:
if self.keep_aspect_ratio:
if fname.startswith('s3'):
video_bytes = self.client.get(fname)
vr = VideoReader(io.BytesIO(video_bytes),
num_threads=1,
ctx=cpu(0))
else:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
if fname.startswith('s3:'):
video_bytes = self.client.get(fname)
vr = VideoReader(io.BytesIO(video_bytes),
width=self.new_width,
height=self.new_height,
num_threads=1,
ctx=cpu(0))
else:
vr = VideoReader(fname, width=self.new_width, height=self.new_height,
num_threads=1, ctx=cpu(0))
except:
print("video cannot be loaded by decord: ", fname)
return []
if self.mode == 'test':
tick = len(vr) / float(self.num_segment)
all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +
[int(tick * x) for x in range(self.num_segment)]))
while len(all_index) < (self.num_segment * self.test_num_segment):
all_index.append(all_index[-1])
all_index = np.sort(np.array(all_index))
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
elif self.mode == 'validation':
tick = len(vr) / float(self.num_segment)
all_index = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)])
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
average_duration = len(vr) // self.num_segment
if average_duration > 0:
all_index = list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,
size=self.num_segment))
elif len(vr) > self.num_segment:
all_index = list(np.sort(np.random.randint(len(vr), size=self.num_segment)))
else:
all_index = list(np.zeros((self.num_segment,)))
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None: | frames, _ = random_short_side_scale_jitter( | 11 | 2023-11-27 12:41:46+00:00 | 12k |
16lemoing/dot | dot/models/shelf/cotracker_utils/models/core/cotracker/cotracker.py | [
{
"identifier": "BasicEncoder",
"path": "dot/models/shelf/cotracker_utils/models/core/cotracker/blocks.py",
"snippet": "class BasicEncoder(nn.Module):\n def __init__(\n self, input_dim=3, output_dim=128, stride=8, norm_fn=\"batch\", dropout=0.0\n ):\n super(BasicEncoder, self).__init__()\n self.stride = stride\n self.norm_fn = norm_fn\n self.in_planes = 64\n\n if self.norm_fn == \"group\":\n self.norm1 = nn.GroupNorm(num_groups=8, num_channels=self.in_planes)\n self.norm2 = nn.GroupNorm(num_groups=8, num_channels=output_dim * 2)\n\n elif self.norm_fn == \"batch\":\n self.norm1 = nn.BatchNorm2d(self.in_planes)\n self.norm2 = nn.BatchNorm2d(output_dim * 2)\n\n elif self.norm_fn == \"instance\":\n self.norm1 = nn.InstanceNorm2d(self.in_planes)\n self.norm2 = nn.InstanceNorm2d(output_dim * 2)\n\n elif self.norm_fn == \"none\":\n self.norm1 = nn.Sequential()\n\n self.conv1 = nn.Conv2d(\n input_dim,\n self.in_planes,\n kernel_size=7,\n stride=2,\n padding=3,\n padding_mode=\"zeros\",\n )\n self.relu1 = nn.ReLU(inplace=True)\n\n self.shallow = False\n if self.shallow:\n self.layer1 = self._make_layer(64, stride=1)\n self.layer2 = self._make_layer(96, stride=2)\n self.layer3 = self._make_layer(128, stride=2)\n self.conv2 = nn.Conv2d(128 + 96 + 64, output_dim, kernel_size=1)\n else:\n self.layer1 = self._make_layer(64, stride=1)\n self.layer2 = self._make_layer(96, stride=2)\n self.layer3 = self._make_layer(128, stride=2)\n self.layer4 = self._make_layer(128, stride=2)\n\n self.conv2 = nn.Conv2d(\n 128 + 128 + 96 + 64,\n output_dim * 2,\n kernel_size=3,\n padding=1,\n padding_mode=\"zeros\",\n )\n self.relu2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(output_dim * 2, output_dim, kernel_size=1)\n\n self.dropout = None\n if dropout > 0:\n self.dropout = nn.Dropout2d(p=dropout)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, dim, stride=1):\n layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)\n layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)\n layers = (layer1, layer2)\n\n self.in_planes = dim\n return nn.Sequential(*layers)\n\n def forward(self, x):\n _, _, H, W = x.shape\n\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n\n if self.shallow:\n a = self.layer1(x)\n b = self.layer2(a)\n c = self.layer3(b)\n a = F.interpolate(\n a,\n (H // self.stride, W // self.stride),\n mode=\"bilinear\",\n align_corners=True,\n )\n b = F.interpolate(\n b,\n (H // self.stride, W // self.stride),\n mode=\"bilinear\",\n align_corners=True,\n )\n c = F.interpolate(\n c,\n (H // self.stride, W // self.stride),\n mode=\"bilinear\",\n align_corners=True,\n )\n x = self.conv2(torch.cat([a, b, c], dim=1))\n else:\n a = self.layer1(x)\n b = self.layer2(a)\n c = self.layer3(b)\n d = self.layer4(c)\n a = F.interpolate(\n a,\n (H // self.stride, W // self.stride),\n mode=\"bilinear\",\n align_corners=True,\n )\n b = F.interpolate(\n b,\n (H // self.stride, W // self.stride),\n mode=\"bilinear\",\n align_corners=True,\n )\n c = F.interpolate(\n c,\n (H // self.stride, W // self.stride),\n mode=\"bilinear\",\n align_corners=True,\n )\n d = F.interpolate(\n d,\n (H // self.stride, W // self.stride),\n mode=\"bilinear\",\n align_corners=True,\n )\n x = self.conv2(torch.cat([a, b, c, d], dim=1))\n x = self.norm2(x)\n x = self.relu2(x)\n x = self.conv3(x)\n\n if self.training and self.dropout is not None:\n x = self.dropout(x)\n return x"
},
{
"identifier": "CorrBlock",
"path": "dot/models/shelf/cotracker_utils/models/core/cotracker/blocks.py",
"snippet": "class CorrBlock:\n def __init__(self, fmaps, num_levels=4, radius=4):\n B, S, C, H, W = fmaps.shape\n self.S, self.C, self.H, self.W = S, C, H, W\n\n self.num_levels = num_levels\n self.radius = radius\n self.fmaps_pyramid = []\n\n self.fmaps_pyramid.append(fmaps)\n for i in range(self.num_levels - 1):\n fmaps_ = fmaps.reshape(B * S, C, H, W)\n fmaps_ = F.avg_pool2d(fmaps_, 2, stride=2)\n _, _, H, W = fmaps_.shape\n fmaps = fmaps_.reshape(B, S, C, H, W)\n self.fmaps_pyramid.append(fmaps)\n\n def sample(self, coords):\n r = self.radius\n B, S, N, D = coords.shape\n assert D == 2\n\n H, W = self.H, self.W\n out_pyramid = []\n for i in range(self.num_levels):\n corrs = self.corrs_pyramid[i] # B, S, N, H, W\n _, _, _, H, W = corrs.shape\n\n dx = torch.linspace(-r, r, 2 * r + 1)\n dy = torch.linspace(-r, r, 2 * r + 1)\n delta = torch.stack(torch.meshgrid(dy, dx, indexing=\"ij\"), axis=-1).to(\n coords.device\n )\n\n centroid_lvl = coords.reshape(B * S * N, 1, 1, 2) / 2 ** i\n delta_lvl = delta.view(1, 2 * r + 1, 2 * r + 1, 2)\n coords_lvl = centroid_lvl + delta_lvl\n\n corrs = bilinear_sampler(corrs.reshape(B * S * N, 1, H, W), coords_lvl)\n corrs = corrs.view(B, S, N, -1)\n out_pyramid.append(corrs)\n\n out = torch.cat(out_pyramid, dim=-1) # B, S, N, LRR*2\n return out.contiguous().float()\n\n def corr(self, targets):\n B, S, N, C = targets.shape\n assert C == self.C\n assert S == self.S\n\n fmap1 = targets\n\n self.corrs_pyramid = []\n for fmaps in self.fmaps_pyramid:\n _, _, _, H, W = fmaps.shape\n fmap2s = fmaps.view(B, S, C, H * W)\n corrs = torch.matmul(fmap1, fmap2s)\n corrs = corrs.view(B, S, N, H, W)\n corrs = corrs / torch.sqrt(torch.tensor(C).float())\n self.corrs_pyramid.append(corrs)"
},
{
"identifier": "UpdateFormer",
"path": "dot/models/shelf/cotracker_utils/models/core/cotracker/blocks.py",
"snippet": "class UpdateFormer(nn.Module):\n \"\"\"\n Transformer model that updates track estimates.\n \"\"\"\n\n def __init__(\n self,\n space_depth=12,\n time_depth=12,\n input_dim=320,\n hidden_size=384,\n num_heads=8,\n output_dim=130,\n mlp_ratio=4.0,\n add_space_attn=True,\n ):\n super().__init__()\n self.out_channels = 2\n self.num_heads = num_heads\n self.hidden_size = hidden_size\n self.add_space_attn = add_space_attn\n self.input_transform = torch.nn.Linear(input_dim, hidden_size, bias=True)\n self.flow_head = torch.nn.Linear(hidden_size, output_dim, bias=True)\n\n self.time_blocks = nn.ModuleList(\n [\n AttnBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio)\n for _ in range(time_depth)\n ]\n )\n\n if add_space_attn:\n self.space_blocks = nn.ModuleList(\n [\n AttnBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio)\n for _ in range(space_depth)\n ]\n )\n assert len(self.time_blocks) >= len(self.space_blocks)\n self.initialize_weights()\n\n def initialize_weights(self):\n def _basic_init(module):\n if isinstance(module, nn.Linear):\n torch.nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n\n self.apply(_basic_init)\n\n def forward(self, input_tensor):\n x = self.input_transform(input_tensor)\n\n j = 0\n for i in range(len(self.time_blocks)):\n B, N, T, _ = x.shape\n x_time = rearrange(x, \"b n t c -> (b n) t c\", b=B, t=T, n=N)\n x_time = self.time_blocks[i](x_time)\n\n x = rearrange(x_time, \"(b n) t c -> b n t c \", b=B, t=T, n=N)\n if self.add_space_attn and (\n i % (len(self.time_blocks) // len(self.space_blocks)) == 0\n ):\n x_space = rearrange(x, \"b n t c -> (b t) n c \", b=B, t=T, n=N)\n x_space = self.space_blocks[j](x_space)\n x = rearrange(x_space, \"(b t) n c -> b n t c \", b=B, t=T, n=N)\n j += 1\n\n flow = self.flow_head(x)\n return flow"
},
{
"identifier": "meshgrid2d",
"path": "dot/models/shelf/cotracker_utils/models/core/model_utils.py",
"snippet": "def meshgrid2d(B, Y, X, stack=False, norm=False, device=\"cpu\"):\n # returns a meshgrid sized B x Y x X\n\n grid_y = torch.linspace(0.0, Y - 1, Y, device=torch.device(device))\n grid_y = torch.reshape(grid_y, [1, Y, 1])\n grid_y = grid_y.repeat(B, 1, X)\n\n grid_x = torch.linspace(0.0, X - 1, X, device=torch.device(device))\n grid_x = torch.reshape(grid_x, [1, 1, X])\n grid_x = grid_x.repeat(B, Y, 1)\n\n if stack:\n # note we stack in xy order\n # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample)\n grid = torch.stack([grid_x, grid_y], dim=-1)\n return grid\n else:\n return grid_y, grid_x"
},
{
"identifier": "bilinear_sample2d",
"path": "dot/models/shelf/cotracker_utils/models/core/model_utils.py",
"snippet": "def bilinear_sample2d(im, x, y, return_inbounds=False):\n # x and y are each B, N\n # output is B, C, N\n if len(im.shape) == 5:\n B, N, C, H, W = list(im.shape)\n else:\n B, C, H, W = list(im.shape)\n N = list(x.shape)[1]\n\n x = x.float()\n y = y.float()\n H_f = torch.tensor(H, dtype=torch.float32)\n W_f = torch.tensor(W, dtype=torch.float32)\n\n # inbound_mask = (x>-0.5).float()*(y>-0.5).float()*(x<W_f+0.5).float()*(y<H_f+0.5).float()\n\n max_y = (H_f - 1).int()\n max_x = (W_f - 1).int()\n\n x0 = torch.floor(x).int()\n x1 = x0 + 1\n y0 = torch.floor(y).int()\n y1 = y0 + 1\n\n x0_clip = torch.clamp(x0, 0, max_x)\n x1_clip = torch.clamp(x1, 0, max_x)\n y0_clip = torch.clamp(y0, 0, max_y)\n y1_clip = torch.clamp(y1, 0, max_y)\n dim2 = W\n dim1 = W * H\n\n base = torch.arange(0, B, dtype=torch.int64, device=x.device) * dim1\n base = torch.reshape(base, [B, 1]).repeat([1, N])\n\n base_y0 = base + y0_clip * dim2\n base_y1 = base + y1_clip * dim2\n\n idx_y0_x0 = base_y0 + x0_clip\n idx_y0_x1 = base_y0 + x1_clip\n idx_y1_x0 = base_y1 + x0_clip\n idx_y1_x1 = base_y1 + x1_clip\n\n # use the indices to lookup pixels in the flat image\n # im is B x C x H x W\n # move C out to last dim\n if len(im.shape) == 5:\n im_flat = (im.permute(0, 3, 4, 1, 2)).reshape(B * H * W, N, C)\n i_y0_x0 = torch.diagonal(im_flat[idx_y0_x0.long()], dim1=1, dim2=2).permute(\n 0, 2, 1\n )\n i_y0_x1 = torch.diagonal(im_flat[idx_y0_x1.long()], dim1=1, dim2=2).permute(\n 0, 2, 1\n )\n i_y1_x0 = torch.diagonal(im_flat[idx_y1_x0.long()], dim1=1, dim2=2).permute(\n 0, 2, 1\n )\n i_y1_x1 = torch.diagonal(im_flat[idx_y1_x1.long()], dim1=1, dim2=2).permute(\n 0, 2, 1\n )\n else:\n im_flat = (im.permute(0, 2, 3, 1)).reshape(B * H * W, C)\n i_y0_x0 = im_flat[idx_y0_x0.long()]\n i_y0_x1 = im_flat[idx_y0_x1.long()]\n i_y1_x0 = im_flat[idx_y1_x0.long()]\n i_y1_x1 = im_flat[idx_y1_x1.long()]\n\n # Finally calculate interpolated values.\n x0_f = x0.float()\n x1_f = x1.float()\n y0_f = y0.float()\n y1_f = y1.float()\n\n w_y0_x0 = ((x1_f - x) * (y1_f - y)).unsqueeze(2)\n w_y0_x1 = ((x - x0_f) * (y1_f - y)).unsqueeze(2)\n w_y1_x0 = ((x1_f - x) * (y - y0_f)).unsqueeze(2)\n w_y1_x1 = ((x - x0_f) * (y - y0_f)).unsqueeze(2)\n\n output = (\n w_y0_x0 * i_y0_x0 + w_y0_x1 * i_y0_x1 + w_y1_x0 * i_y1_x0 + w_y1_x1 * i_y1_x1\n )\n # output is B*N x C\n output = output.view(B, -1, C)\n output = output.permute(0, 2, 1)\n # output is B x C x N\n\n if return_inbounds:\n x_valid = (x > -0.5).byte() & (x < float(W_f - 0.5)).byte()\n y_valid = (y > -0.5).byte() & (y < float(H_f - 0.5)).byte()\n inbounds = (x_valid & y_valid).float()\n inbounds = inbounds.reshape(\n B, N\n ) # something seems wrong here for B>1; i'm getting an error here (or downstream if i put -1)\n return output, inbounds\n\n return output # B, C, N"
},
{
"identifier": "smart_cat",
"path": "dot/models/shelf/cotracker_utils/models/core/model_utils.py",
"snippet": "def smart_cat(tensor1, tensor2, dim):\n if tensor1 is None:\n return tensor2\n return torch.cat([tensor1, tensor2], dim=dim)"
},
{
"identifier": "get_2d_embedding",
"path": "dot/models/shelf/cotracker_utils/models/core/embeddings.py",
"snippet": "def get_2d_embedding(xy, C, cat_coords=True):\n B, N, D = xy.shape\n assert D == 2\n\n x = xy[:, :, 0:1]\n y = xy[:, :, 1:2]\n div_term = (\n torch.arange(0, C, 2, device=xy.device, dtype=torch.float32) * (1000.0 / C)\n ).reshape(1, 1, int(C / 2))\n\n pe_x = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32)\n pe_y = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32)\n\n pe_x[:, :, 0::2] = torch.sin(x * div_term)\n pe_x[:, :, 1::2] = torch.cos(x * div_term)\n\n pe_y[:, :, 0::2] = torch.sin(y * div_term)\n pe_y[:, :, 1::2] = torch.cos(y * div_term)\n\n pe = torch.cat([pe_x, pe_y], dim=2) # B, N, C*3\n if cat_coords:\n pe = torch.cat([xy, pe], dim=2) # B, N, C*3+3\n return pe"
},
{
"identifier": "get_1d_sincos_pos_embed_from_grid",
"path": "dot/models/shelf/cotracker_utils/models/core/embeddings.py",
"snippet": "def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):\n \"\"\"\n embed_dim: output dimension for each position\n pos: a list of positions to be encoded: size (M,)\n out: (M, D)\n \"\"\"\n assert embed_dim % 2 == 0\n omega = np.arange(embed_dim // 2, dtype=np.float64)\n omega /= embed_dim / 2.0\n omega = 1.0 / 10000 ** omega # (D/2,)\n\n pos = pos.reshape(-1) # (M,)\n out = np.einsum(\"m,d->md\", pos, omega) # (M, D/2), outer product\n\n emb_sin = np.sin(out) # (M, D/2)\n emb_cos = np.cos(out) # (M, D/2)\n\n emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)\n return emb"
},
{
"identifier": "get_2d_sincos_pos_embed",
"path": "dot/models/shelf/cotracker_utils/models/core/embeddings.py",
"snippet": "def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n \"\"\"\n if isinstance(grid_size, tuple):\n grid_size_h, grid_size_w = grid_size\n else:\n grid_size_h = grid_size_w = grid_size\n grid_h = np.arange(grid_size_h, dtype=np.float32)\n grid_w = np.arange(grid_size_w, dtype=np.float32)\n grid = np.meshgrid(grid_w, grid_h) # here w goes first\n grid = np.stack(grid, axis=0)\n\n grid = grid.reshape([2, 1, grid_size_h, grid_size_w])\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n if cls_token and extra_tokens > 0:\n pos_embed = np.concatenate(\n [np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0\n )\n return pos_embed"
}
] | import torch
import torch.nn as nn
from einops import rearrange
from dot.models.shelf.cotracker_utils.models.core.cotracker.blocks import (
BasicEncoder,
CorrBlock,
UpdateFormer,
)
from dot.models.shelf.cotracker_utils.models.core.model_utils import meshgrid2d, bilinear_sample2d, smart_cat
from dot.models.shelf.cotracker_utils.models.core.embeddings import (
get_2d_embedding,
get_1d_sincos_pos_embed_from_grid,
get_2d_sincos_pos_embed,
) | 8,025 | x = rearrange(x, "(b n) t d -> b n t d", b=B)
delta = self.updateformer(x)
delta = rearrange(delta, " b n t d -> (b n) t d")
delta_coords_ = delta[:, :, :2]
delta_feats_ = delta[:, :, 2:]
delta_feats_ = delta_feats_.reshape(B * N * S, self.latent_dim)
ffeats_ = ffeats.permute(0, 2, 1, 3).reshape(B * N * S, self.latent_dim)
ffeats_ = self.ffeat_updater(self.norm(delta_feats_)) + ffeats_
ffeats = ffeats_.reshape(B, N, S, self.latent_dim).permute(
0, 2, 1, 3
) # B,S,N,C
coords = coords + delta_coords_.reshape(B, N, S, 2).permute(0, 2, 1, 3)
coord_predictions.append(coords * self.stride)
vis_e = self.vis_predictor(ffeats.reshape(B * S * N, self.latent_dim)).reshape(
B, S, N
)
return coord_predictions, vis_e, feat_init
def forward(self, rgbs, queries, iters=4, cached_feat=None, feat_init=None, is_train=False):
B, T, C, H, W = rgbs.shape
B, N, __ = queries.shape
device = rgbs.device
assert B == 1
# INIT for the first sequence
# We want to sort points by the first frame they are visible to add them to the tensor of tracked points consequtively
first_positive_inds = queries[:, :, 0].long()
__, sort_inds = torch.sort(first_positive_inds[0], dim=0, descending=False)
inv_sort_inds = torch.argsort(sort_inds, dim=0)
first_positive_sorted_inds = first_positive_inds[0][sort_inds]
assert torch.allclose(
first_positive_inds[0], first_positive_inds[0][sort_inds][inv_sort_inds]
)
coords_init = queries[:, :, 1:].reshape(B, 1, N, 2).repeat(
1, self.S, 1, 1
) / float(self.stride)
rgbs = 2 * rgbs - 1.0
traj_e = torch.zeros((B, T, N, 2), device=device)
vis_e = torch.zeros((B, T, N), device=device)
ind_array = torch.arange(T, device=device)
ind_array = ind_array[None, :, None].repeat(B, 1, N)
track_mask = (ind_array >= first_positive_inds[:, None, :]).unsqueeze(-1)
# these are logits, so we initialize visibility with something that would give a value close to 1 after softmax
vis_init = torch.ones((B, self.S, N, 1), device=device).float() * 10
ind = 0
track_mask_ = track_mask[:, :, sort_inds].clone()
coords_init_ = coords_init[:, :, sort_inds].clone()
vis_init_ = vis_init[:, :, sort_inds].clone()
prev_wind_idx = 0
fmaps_ = None
vis_predictions = []
coord_predictions = []
wind_inds = []
while ind < T - self.S // 2:
rgbs_seq = rgbs[:, ind : ind + self.S]
S = S_local = rgbs_seq.shape[1]
if cached_feat is None:
if S < self.S:
rgbs_seq = torch.cat(
[rgbs_seq, rgbs_seq[:, -1, None].repeat(1, self.S - S, 1, 1, 1)],
dim=1,
)
S = rgbs_seq.shape[1]
rgbs_ = rgbs_seq.reshape(B * S, C, H, W)
if fmaps_ is None:
fmaps_ = self.fnet(rgbs_)
else:
fmaps_ = torch.cat(
[fmaps_[self.S // 2 :], self.fnet(rgbs_[self.S // 2 :])], dim=0
)
fmaps = fmaps_.reshape(
B, S, self.latent_dim, H // self.stride, W // self.stride
)
else:
fmaps = cached_feat[:, ind : ind + self.S]
if S < self.S:
fmaps = torch.cat(
[fmaps, fmaps[:, -1, None].repeat(1, self.S - S, 1, 1, 1)],
dim=1,
)
curr_wind_points = torch.nonzero(first_positive_sorted_inds < ind + self.S)
if curr_wind_points.shape[0] == 0:
ind = ind + self.S // 2
continue
wind_idx = curr_wind_points[-1] + 1
if wind_idx - prev_wind_idx > 0:
fmaps_sample = fmaps[
:, first_positive_sorted_inds[prev_wind_idx:wind_idx] - ind
]
feat_init_ = bilinear_sample2d(
fmaps_sample,
coords_init_[:, 0, prev_wind_idx:wind_idx, 0],
coords_init_[:, 0, prev_wind_idx:wind_idx, 1],
).permute(0, 2, 1)
feat_init_ = feat_init_.unsqueeze(1).repeat(1, self.S, 1, 1)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
torch.manual_seed(0)
def get_points_on_a_grid(grid_size, interp_shape, grid_center=(0, 0), device="cpu"):
if grid_size == 1:
return torch.tensor([interp_shape[1] / 2, interp_shape[0] / 2], device=device)[
None, None
]
grid_y, grid_x = meshgrid2d(
1, grid_size, grid_size, stack=False, norm=False, device=device
)
step = interp_shape[1] // 64
if grid_center[0] != 0 or grid_center[1] != 0:
grid_y = grid_y - grid_size / 2.0
grid_x = grid_x - grid_size / 2.0
grid_y = step + grid_y.reshape(1, -1) / float(grid_size - 1) * (
interp_shape[0] - step * 2
)
grid_x = step + grid_x.reshape(1, -1) / float(grid_size - 1) * (
interp_shape[1] - step * 2
)
grid_y = grid_y + grid_center[0]
grid_x = grid_x + grid_center[1]
xy = torch.stack([grid_x, grid_y], dim=-1).to(device)
return xy
def sample_pos_embed(grid_size, embed_dim, coords):
pos_embed = get_2d_sincos_pos_embed(embed_dim=embed_dim, grid_size=grid_size)
pos_embed = (
torch.from_numpy(pos_embed)
.reshape(grid_size[0], grid_size[1], embed_dim)
.float()
.unsqueeze(0)
.to(coords.device)
)
sampled_pos_embed = bilinear_sample2d(
pos_embed.permute(0, 3, 1, 2), coords[:, 0, :, 0], coords[:, 0, :, 1]
)
return sampled_pos_embed
class CoTracker(nn.Module):
def __init__(
self,
S=8,
stride=8,
add_space_attn=True,
num_heads=8,
hidden_size=384,
space_depth=12,
time_depth=12,
):
super(CoTracker, self).__init__()
self.S = S
self.stride = stride
self.hidden_dim = 256
self.latent_dim = latent_dim = 128
self.corr_levels = 4
self.corr_radius = 3
self.add_space_attn = add_space_attn
self.fnet = BasicEncoder(
output_dim=self.latent_dim, norm_fn="instance", dropout=0, stride=stride
)
self.updateformer = UpdateFormer(
space_depth=space_depth,
time_depth=time_depth,
input_dim=456,
hidden_size=hidden_size,
num_heads=num_heads,
output_dim=latent_dim + 2,
mlp_ratio=4.0,
add_space_attn=add_space_attn,
)
self.norm = nn.GroupNorm(1, self.latent_dim)
self.ffeat_updater = nn.Sequential(
nn.Linear(self.latent_dim, self.latent_dim),
nn.GELU(),
)
self.vis_predictor = nn.Sequential(
nn.Linear(self.latent_dim, 1),
)
def forward_iteration(
self,
fmaps,
coords_init,
feat_init=None,
vis_init=None,
track_mask=None,
iters=4,
):
B, S_init, N, D = coords_init.shape
assert D == 2
assert B == 1
B, S, __, H8, W8 = fmaps.shape
device = fmaps.device
if S_init < S:
coords = torch.cat(
[coords_init, coords_init[:, -1].repeat(1, S - S_init, 1, 1)], dim=1
)
vis_init = torch.cat(
[vis_init, vis_init[:, -1].repeat(1, S - S_init, 1, 1)], dim=1
)
else:
coords = coords_init.clone()
fcorr_fn = CorrBlock(
fmaps, num_levels=self.corr_levels, radius=self.corr_radius
)
ffeats = feat_init.clone()
times_ = torch.linspace(0, S - 1, S).reshape(1, S, 1)
pos_embed = sample_pos_embed(
grid_size=(H8, W8),
embed_dim=456,
coords=coords,
)
pos_embed = rearrange(pos_embed, "b e n -> (b n) e").unsqueeze(1)
times_embed = (
torch.from_numpy(get_1d_sincos_pos_embed_from_grid(456, times_[0]))[None]
.repeat(B, 1, 1)
.float()
.to(device)
)
coord_predictions = []
for __ in range(iters):
coords = coords.detach()
fcorr_fn.corr(ffeats)
fcorrs = fcorr_fn.sample(coords) # B, S, N, LRR
LRR = fcorrs.shape[3]
fcorrs_ = fcorrs.permute(0, 2, 1, 3).reshape(B * N, S, LRR)
flows_ = (coords - coords[:, 0:1]).permute(0, 2, 1, 3).reshape(B * N, S, 2)
flows_cat = get_2d_embedding(flows_, 64, cat_coords=True)
ffeats_ = ffeats.permute(0, 2, 1, 3).reshape(B * N, S, self.latent_dim)
if track_mask.shape[1] < vis_init.shape[1]:
track_mask = torch.cat(
[
track_mask,
torch.zeros_like(track_mask[:, 0]).repeat(
1, vis_init.shape[1] - track_mask.shape[1], 1, 1
),
],
dim=1,
)
concat = (
torch.cat([track_mask, vis_init], dim=2)
.permute(0, 2, 1, 3)
.reshape(B * N, S, 2)
)
transformer_input = torch.cat([flows_cat, fcorrs_, ffeats_, concat], dim=2)
x = transformer_input + pos_embed + times_embed
x = rearrange(x, "(b n) t d -> b n t d", b=B)
delta = self.updateformer(x)
delta = rearrange(delta, " b n t d -> (b n) t d")
delta_coords_ = delta[:, :, :2]
delta_feats_ = delta[:, :, 2:]
delta_feats_ = delta_feats_.reshape(B * N * S, self.latent_dim)
ffeats_ = ffeats.permute(0, 2, 1, 3).reshape(B * N * S, self.latent_dim)
ffeats_ = self.ffeat_updater(self.norm(delta_feats_)) + ffeats_
ffeats = ffeats_.reshape(B, N, S, self.latent_dim).permute(
0, 2, 1, 3
) # B,S,N,C
coords = coords + delta_coords_.reshape(B, N, S, 2).permute(0, 2, 1, 3)
coord_predictions.append(coords * self.stride)
vis_e = self.vis_predictor(ffeats.reshape(B * S * N, self.latent_dim)).reshape(
B, S, N
)
return coord_predictions, vis_e, feat_init
def forward(self, rgbs, queries, iters=4, cached_feat=None, feat_init=None, is_train=False):
B, T, C, H, W = rgbs.shape
B, N, __ = queries.shape
device = rgbs.device
assert B == 1
# INIT for the first sequence
# We want to sort points by the first frame they are visible to add them to the tensor of tracked points consequtively
first_positive_inds = queries[:, :, 0].long()
__, sort_inds = torch.sort(first_positive_inds[0], dim=0, descending=False)
inv_sort_inds = torch.argsort(sort_inds, dim=0)
first_positive_sorted_inds = first_positive_inds[0][sort_inds]
assert torch.allclose(
first_positive_inds[0], first_positive_inds[0][sort_inds][inv_sort_inds]
)
coords_init = queries[:, :, 1:].reshape(B, 1, N, 2).repeat(
1, self.S, 1, 1
) / float(self.stride)
rgbs = 2 * rgbs - 1.0
traj_e = torch.zeros((B, T, N, 2), device=device)
vis_e = torch.zeros((B, T, N), device=device)
ind_array = torch.arange(T, device=device)
ind_array = ind_array[None, :, None].repeat(B, 1, N)
track_mask = (ind_array >= first_positive_inds[:, None, :]).unsqueeze(-1)
# these are logits, so we initialize visibility with something that would give a value close to 1 after softmax
vis_init = torch.ones((B, self.S, N, 1), device=device).float() * 10
ind = 0
track_mask_ = track_mask[:, :, sort_inds].clone()
coords_init_ = coords_init[:, :, sort_inds].clone()
vis_init_ = vis_init[:, :, sort_inds].clone()
prev_wind_idx = 0
fmaps_ = None
vis_predictions = []
coord_predictions = []
wind_inds = []
while ind < T - self.S // 2:
rgbs_seq = rgbs[:, ind : ind + self.S]
S = S_local = rgbs_seq.shape[1]
if cached_feat is None:
if S < self.S:
rgbs_seq = torch.cat(
[rgbs_seq, rgbs_seq[:, -1, None].repeat(1, self.S - S, 1, 1, 1)],
dim=1,
)
S = rgbs_seq.shape[1]
rgbs_ = rgbs_seq.reshape(B * S, C, H, W)
if fmaps_ is None:
fmaps_ = self.fnet(rgbs_)
else:
fmaps_ = torch.cat(
[fmaps_[self.S // 2 :], self.fnet(rgbs_[self.S // 2 :])], dim=0
)
fmaps = fmaps_.reshape(
B, S, self.latent_dim, H // self.stride, W // self.stride
)
else:
fmaps = cached_feat[:, ind : ind + self.S]
if S < self.S:
fmaps = torch.cat(
[fmaps, fmaps[:, -1, None].repeat(1, self.S - S, 1, 1, 1)],
dim=1,
)
curr_wind_points = torch.nonzero(first_positive_sorted_inds < ind + self.S)
if curr_wind_points.shape[0] == 0:
ind = ind + self.S // 2
continue
wind_idx = curr_wind_points[-1] + 1
if wind_idx - prev_wind_idx > 0:
fmaps_sample = fmaps[
:, first_positive_sorted_inds[prev_wind_idx:wind_idx] - ind
]
feat_init_ = bilinear_sample2d(
fmaps_sample,
coords_init_[:, 0, prev_wind_idx:wind_idx, 0],
coords_init_[:, 0, prev_wind_idx:wind_idx, 1],
).permute(0, 2, 1)
feat_init_ = feat_init_.unsqueeze(1).repeat(1, self.S, 1, 1) | feat_init = smart_cat(feat_init, feat_init_, dim=2) | 5 | 2023-12-01 09:59:13+00:00 | 12k |
cswry/SeeSR | utils_data/make_paired_data_DAPE.py | [
{
"identifier": "RealESRGANDataset",
"path": "basicsr/data/realesrgan_dataset.py",
"snippet": "class RealESRGANDataset(data.Dataset):\n \"\"\"Modified dataset based on the dataset used for Real-ESRGAN model:\n Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.\n\n It loads gt (Ground-Truth) images, and augments them.\n It also generates blur kernels and sinc kernels for generating low-quality images.\n Note that the low-quality images are processed in tensors on GPUS for faster processing.\n\n Args:\n opt (dict): Config for train datasets. It contains the following keys:\n dataroot_gt (str): Data root path for gt.\n meta_info (str): Path for meta information file.\n io_backend (dict): IO backend type and other kwarg.\n use_hflip (bool): Use horizontal flips.\n use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).\n Please see more options in the codes.\n \"\"\"\n\n def __init__(self, opt):\n super(RealESRGANDataset, self).__init__()\n self.opt = opt\n self.file_client = None\n self.io_backend_opt = opt['io_backend']\n if 'crop_size' in opt:\n self.crop_size = opt['crop_size']\n else:\n self.crop_size = 512\n if 'image_type' not in opt:\n opt['image_type'] = 'png'\n\n # support multiple type of data: file path and meta data, remove support of lmdb\n self.paths = []\n if 'meta_info' in opt:\n with open(self.opt['meta_info']) as fin:\n paths = [line.strip().split(' ')[0] for line in fin]\n self.paths = [v for v in paths]\n if 'meta_num' in opt:\n self.paths = sorted(self.paths)[:opt['meta_num']]\n if 'gt_path' in opt:\n if isinstance(opt['gt_path'], str):\n self.paths.extend(sorted([str(x) for x in Path(opt['gt_path']).glob('*.'+opt['image_type'])]))\n else:\n self.paths.extend(sorted([str(x) for x in Path(opt['gt_path'][0]).glob('*.'+opt['image_type'])]))\n if len(opt['gt_path']) > 1:\n for i in range(len(opt['gt_path'])-1):\n self.paths.extend(sorted([str(x) for x in Path(opt['gt_path'][i+1]).glob('*.'+opt['image_type'])]))\n if 'imagenet_path' in opt:\n class_list = os.listdir(opt['imagenet_path'])\n for class_file in class_list:\n self.paths.extend(sorted([str(x) for x in Path(os.path.join(opt['imagenet_path'], class_file)).glob('*.'+'JPEG')]))\n if 'face_gt_path' in opt:\n if isinstance(opt['face_gt_path'], str):\n face_list = sorted([str(x) for x in Path(opt['face_gt_path']).glob('*.'+opt['image_type'])])\n self.paths.extend(face_list[:opt['num_face']])\n else:\n face_list = sorted([str(x) for x in Path(opt['face_gt_path'][0]).glob('*.'+opt['image_type'])])\n self.paths.extend(face_list[:opt['num_face']])\n if len(opt['face_gt_path']) > 1:\n for i in range(len(opt['face_gt_path'])-1):\n self.paths.extend(sorted([str(x) for x in Path(opt['face_gt_path'][0]).glob('*.'+opt['image_type'])])[:opt['num_face']])\n\n # limit number of pictures for test\n if 'num_pic' in opt:\n if 'val' or 'test' in opt:\n random.shuffle(self.paths)\n self.paths = self.paths[:opt['num_pic']]\n else:\n self.paths = self.paths[:opt['num_pic']]\n\n if 'mul_num' in opt:\n self.paths = self.paths * opt['mul_num']\n # print('>>>>>>>>>>>>>>>>>>>>>')\n # print(self.paths)\n\n # blur settings for the first degradation\n self.blur_kernel_size = opt['blur_kernel_size']\n self.kernel_list = opt['kernel_list']\n self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability\n self.blur_sigma = opt['blur_sigma']\n self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels\n self.betap_range = opt['betap_range'] # betap used in plateau blur kernels\n self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters\n\n # blur settings for the second degradation\n self.blur_kernel_size2 = opt['blur_kernel_size2']\n self.kernel_list2 = opt['kernel_list2']\n self.kernel_prob2 = opt['kernel_prob2']\n self.blur_sigma2 = opt['blur_sigma2']\n self.betag_range2 = opt['betag_range2']\n self.betap_range2 = opt['betap_range2']\n self.sinc_prob2 = opt['sinc_prob2']\n\n # a final sinc filter\n self.final_sinc_prob = opt['final_sinc_prob']\n\n self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21\n # TODO: kernel range is now hard-coded, should be in the configure file\n self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect\n self.pulse_tensor[10, 10] = 1\n\n def __getitem__(self, index):\n if self.file_client is None:\n self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)\n\n # -------------------------------- Load gt images -------------------------------- #\n # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.\n gt_path = self.paths[index]\n # avoid errors caused by high latency in reading files\n retry = 3\n while retry > 0:\n try:\n img_bytes = self.file_client.get(gt_path, 'gt')\n except (IOError, OSError) as e:\n # logger = get_root_logger()\n # logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}')\n # change another file to read\n index = random.randint(0, self.__len__()-1)\n gt_path = self.paths[index]\n time.sleep(1) # sleep 1s for occasional server congestion\n else:\n break\n finally:\n retry -= 1\n img_gt = imfrombytes(img_bytes, float32=True)\n # filter the dataset and remove images with too low quality\n img_size = os.path.getsize(gt_path)\n img_size = img_size/1024\n\n while img_gt.shape[0] * img_gt.shape[1] < 384*384 or img_size<100:\n index = random.randint(0, self.__len__()-1)\n gt_path = self.paths[index]\n\n time.sleep(0.1) # sleep 1s for occasional server congestion\n img_bytes = self.file_client.get(gt_path, 'gt')\n img_gt = imfrombytes(img_bytes, float32=True)\n img_size = os.path.getsize(gt_path)\n img_size = img_size/1024\n\n # -------------------- Do augmentation for training: flip, rotation -------------------- #\n img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot'])\n\n # crop or pad to 400\n # TODO: 400 is hard-coded. You may change it accordingly\n h, w = img_gt.shape[0:2]\n crop_pad_size = self.crop_size\n # pad\n if h < crop_pad_size or w < crop_pad_size:\n pad_h = max(0, crop_pad_size - h)\n pad_w = max(0, crop_pad_size - w)\n img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101)\n # crop\n if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:\n h, w = img_gt.shape[0:2]\n # randomly choose top and left coordinates\n top = random.randint(0, h - crop_pad_size)\n left = random.randint(0, w - crop_pad_size)\n # top = (h - crop_pad_size) // 2 -1\n # left = (w - crop_pad_size) // 2 -1\n img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...]\n\n # ------------------------ Generate kernels (used in the first degradation) ------------------------ #\n kernel_size = random.choice(self.kernel_range)\n if np.random.uniform() < self.opt['sinc_prob']:\n # this sinc filter setting is for kernels ranging from [7, 21]\n if kernel_size < 13:\n omega_c = np.random.uniform(np.pi / 3, np.pi)\n else:\n omega_c = np.random.uniform(np.pi / 5, np.pi)\n kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)\n else:\n kernel = random_mixed_kernels(\n self.kernel_list,\n self.kernel_prob,\n kernel_size,\n self.blur_sigma,\n self.blur_sigma, [-math.pi, math.pi],\n self.betag_range,\n self.betap_range,\n noise_range=None)\n # pad kernel\n pad_size = (21 - kernel_size) // 2\n kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))\n\n # ------------------------ Generate kernels (used in the second degradation) ------------------------ #\n kernel_size = random.choice(self.kernel_range)\n if np.random.uniform() < self.opt['sinc_prob2']:\n if kernel_size < 13:\n omega_c = np.random.uniform(np.pi / 3, np.pi)\n else:\n omega_c = np.random.uniform(np.pi / 5, np.pi)\n kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)\n else:\n kernel2 = random_mixed_kernels(\n self.kernel_list2,\n self.kernel_prob2,\n kernel_size,\n self.blur_sigma2,\n self.blur_sigma2, [-math.pi, math.pi],\n self.betag_range2,\n self.betap_range2,\n noise_range=None)\n\n # pad kernel\n pad_size = (21 - kernel_size) // 2\n kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))\n\n # ------------------------------------- the final sinc kernel ------------------------------------- #\n if np.random.uniform() < self.opt['final_sinc_prob']:\n kernel_size = random.choice(self.kernel_range)\n omega_c = np.random.uniform(np.pi / 3, np.pi)\n sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)\n sinc_kernel = torch.FloatTensor(sinc_kernel)\n else:\n sinc_kernel = self.pulse_tensor\n\n # BGR to RGB, HWC to CHW, numpy to tensor\n img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]\n kernel = torch.FloatTensor(kernel)\n kernel2 = torch.FloatTensor(kernel2)\n\n return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path}\n return return_d\n\n def __len__(self):\n return len(self.paths)"
},
{
"identifier": "SimpleDataset",
"path": "dataloaders/simple_dataset.py",
"snippet": "class SimpleDataset(Dataset):\n def __init__(self, opt, fix_size=512): \n \n self.opt = opt\n self.image_root = opt['gt_path']\n self.fix_size = fix_size\n exts = ['*.jpg', '*.png']\n self.image_list = []\n for image_root in self.image_root:\n for ext in exts:\n image_list = glob.glob(os.path.join(image_root, ext))\n self.image_list += image_list\n # if add lsdir dataset\n image_list = glob.glob(os.path.join(image_root, '00*', ext))\n self.image_list += image_list\n \n self.crop_preproc = transforms.Compose([\n # transforms.CenterCrop(fix_size),\n transforms.Resize(fix_size)\n # transforms.RandomHorizontalFlip(),\n ])\n\n self.img_preproc = transforms.Compose([\n transforms.ToTensor(),\n ])\n\n # blur settings for the first degradation\n self.blur_kernel_size = opt['blur_kernel_size']\n self.kernel_list = opt['kernel_list']\n self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability\n self.blur_sigma = opt['blur_sigma']\n self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels\n self.betap_range = opt['betap_range'] # betap used in plateau blur kernels\n self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters\n\n # blur settings for the second degradation\n self.blur_kernel_size2 = opt['blur_kernel_size2']\n self.kernel_list2 = opt['kernel_list2']\n self.kernel_prob2 = opt['kernel_prob2']\n self.blur_sigma2 = opt['blur_sigma2']\n self.betag_range2 = opt['betag_range2']\n self.betap_range2 = opt['betap_range2']\n self.sinc_prob2 = opt['sinc_prob2']\n\n # a final sinc filter\n self.final_sinc_prob = opt['final_sinc_prob']\n\n self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21\n # TODO: kernel range is now hard-coded, should be in the configure file\n self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect\n self.pulse_tensor[10, 10] = 1\n\n print(f'The dataset length: {len(self.image_list)}')\n\n\n def __getitem__(self, index):\n image = Image.open(self.image_list[index]).convert('RGB') \n # width, height = image.size \n # if width > height:\n # width_after = self.fix_size\n # height_after = int(height*width_after/width)\n # elif height > width:\n # height_after = self.fix_size\n # width_after = int(width*height_after/height)\n # elif height == width:\n # height_after = self.fix_size\n # width_after = self.fix_size\n image = image.resize((self.fix_size, self.fix_size),Image.LANCZOS)\n # image = self.crop_preproc(image)\n image = self.img_preproc(image)\n\n # ------------------------ Generate kernels (used in the first degradation) ------------------------ #\n kernel_size = random.choice(self.kernel_range)\n if np.random.uniform() < self.opt['sinc_prob']:\n # this sinc filter setting is for kernels ranging from [7, 21]\n if kernel_size < 13:\n omega_c = np.random.uniform(np.pi / 3, np.pi)\n else:\n omega_c = np.random.uniform(np.pi / 5, np.pi)\n kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)\n else:\n kernel = random_mixed_kernels(\n self.kernel_list,\n self.kernel_prob,\n kernel_size,\n self.blur_sigma,\n self.blur_sigma, [-math.pi, math.pi],\n self.betag_range,\n self.betap_range,\n noise_range=None)\n # pad kernel\n pad_size = (21 - kernel_size) // 2\n kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))\n\n # ------------------------ Generate kernels (used in the second degradation) ------------------------ #\n kernel_size = random.choice(self.kernel_range)\n if np.random.uniform() < self.opt['sinc_prob2']:\n if kernel_size < 13:\n omega_c = np.random.uniform(np.pi / 3, np.pi)\n else:\n omega_c = np.random.uniform(np.pi / 5, np.pi)\n kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)\n else:\n kernel2 = random_mixed_kernels(\n self.kernel_list2,\n self.kernel_prob2,\n kernel_size,\n self.blur_sigma2,\n self.blur_sigma2, [-math.pi, math.pi],\n self.betag_range2,\n self.betap_range2,\n noise_range=None)\n\n # pad kernel\n pad_size = (21 - kernel_size) // 2\n kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))\n\n # ------------------------------------- the final sinc kernel ------------------------------------- #\n if np.random.uniform() < self.opt['final_sinc_prob']:\n kernel_size = random.choice(self.kernel_range)\n omega_c = np.random.uniform(np.pi / 3, np.pi)\n sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)\n sinc_kernel = torch.FloatTensor(sinc_kernel)\n else:\n sinc_kernel = self.pulse_tensor\n\n # BGR to RGB, HWC to CHW, numpy to tensor\n # img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]\n kernel = torch.FloatTensor(kernel)\n kernel2 = torch.FloatTensor(kernel2)\n\n return_d = {'gt': image, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'lq_path': self.image_list[index]}\n return return_d\n \n\n def __len__(self):\n return len(self.image_list)"
},
{
"identifier": "ram",
"path": "ram/models/ram.py",
"snippet": "def ram(pretrained='', **kwargs):\n model = RAM(**kwargs)\n if pretrained:\n if kwargs['vit'] == 'swin_b':\n model, msg = load_checkpoint_swinbase(model, pretrained, kwargs)\n elif kwargs['vit'] == 'swin_l':\n model, msg = load_checkpoint_swinlarge(model, pretrained, kwargs)\n else:\n model, msg = load_checkpoint(model, pretrained)\n print('vit:', kwargs['vit'])\n# print('msg', msg)\n return model"
},
{
"identifier": "inference_ram",
"path": "ram/inference.py",
"snippet": "def inference_ram(image, model):\n\n with torch.no_grad():\n tags, tags_chinese = model.generate_tag(image)\n\n return tags[0],tags_chinese[0]"
},
{
"identifier": "DiffJPEG",
"path": "basicsr/utils/diffjpeg.py",
"snippet": "class DiffJPEG(nn.Module):\n \"\"\"This JPEG algorithm result is slightly different from cv2.\n DiffJPEG supports batch processing.\n\n Args:\n differentiable(bool): If True, uses custom differentiable rounding function, if False, uses standard torch.round\n \"\"\"\n\n def __init__(self, differentiable=True):\n super(DiffJPEG, self).__init__()\n if differentiable:\n rounding = diff_round\n else:\n rounding = torch.round\n\n self.compress = CompressJpeg(rounding=rounding)\n self.decompress = DeCompressJpeg(rounding=rounding)\n\n def forward(self, x, quality):\n \"\"\"\n Args:\n x (Tensor): Input image, bchw, rgb, [0, 1]\n quality(float): Quality factor for jpeg compression scheme.\n \"\"\"\n factor = quality\n if isinstance(factor, (int, float)):\n factor = quality_to_factor(factor)\n else:\n for i in range(factor.size(0)):\n factor[i] = quality_to_factor(factor[i])\n h, w = x.size()[-2:]\n h_pad, w_pad = 0, 0\n # why should use 16\n if h % 16 != 0:\n h_pad = 16 - h % 16\n if w % 16 != 0:\n w_pad = 16 - w % 16\n x = F.pad(x, (0, w_pad, 0, h_pad), mode='constant', value=0)\n\n y, cb, cr = self.compress(x, factor=factor)\n recovered = self.decompress(y, cb, cr, (h + h_pad), (w + w_pad), factor=factor)\n recovered = recovered[:, :, 0:h, 0:w]\n return recovered"
},
{
"identifier": "USMSharp",
"path": "basicsr/utils/img_process_util.py",
"snippet": "class USMSharp(torch.nn.Module):\n\n def __init__(self, radius=50, sigma=0):\n super(USMSharp, self).__init__()\n if radius % 2 == 0:\n radius += 1\n self.radius = radius\n kernel = cv2.getGaussianKernel(radius, sigma)\n kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0)\n self.register_buffer('kernel', kernel)\n\n def forward(self, img, weight=0.5, threshold=10):\n blur = filter2D(img, self.kernel)\n residual = img - blur\n\n mask = torch.abs(residual) * 255 > threshold\n mask = mask.float()\n soft_mask = filter2D(mask, self.kernel)\n sharp = img + weight * residual\n sharp = torch.clip(sharp, 0, 1)\n return soft_mask * sharp + (1 - soft_mask) * img"
},
{
"identifier": "filter2D",
"path": "basicsr/utils/img_process_util.py",
"snippet": "def filter2D(img, kernel):\n \"\"\"PyTorch version of cv2.filter2D\n\n Args:\n img (Tensor): (b, c, h, w)\n kernel (Tensor): (b, k, k)\n \"\"\"\n k = kernel.size(-1)\n b, c, h, w = img.size()\n if k % 2 == 1:\n img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect')\n else:\n raise ValueError('Wrong kernel size')\n\n ph, pw = img.size()[-2:]\n\n if kernel.size(0) == 1:\n # apply the same kernel to all batch images\n img = img.view(b * c, 1, ph, pw)\n kernel = kernel.view(1, 1, k, k)\n return F.conv2d(img, kernel, padding=0).view(b, c, h, w)\n else:\n img = img.view(1, b * c, ph, pw)\n kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k)\n return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w)"
},
{
"identifier": "paired_random_crop",
"path": "basicsr/data/transforms.py",
"snippet": "def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None):\n \"\"\"Paired random crop. Support Numpy array and Tensor inputs.\n\n It crops lists of lq and gt images with corresponding locations.\n\n Args:\n img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n img_lqs (list[ndarray] | ndarray): LQ images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n gt_patch_size (int): GT patch size.\n scale (int): Scale factor.\n gt_path (str): Path to ground-truth. Default: None.\n\n Returns:\n list[ndarray] | ndarray: GT images and LQ images. If returned results\n only have one element, just return ndarray.\n \"\"\"\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n if not isinstance(img_lqs, list):\n img_lqs = [img_lqs]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_lq, w_lq = img_lqs[0].size()[-2:]\n h_gt, w_gt = img_gts[0].size()[-2:]\n else:\n h_lq, w_lq = img_lqs[0].shape[0:2]\n h_gt, w_gt = img_gts[0].shape[0:2]\n lq_patch_size = gt_patch_size // scale\n\n if h_gt != h_lq * scale or w_gt != w_lq * scale:\n raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_lq < lq_patch_size or w_lq < lq_patch_size:\n raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '\n f'({lq_patch_size}, {lq_patch_size}). '\n f'Please remove {gt_path}.')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_lq - lq_patch_size)\n left = random.randint(0, w_lq - lq_patch_size)\n\n # crop lq patch\n if input_type == 'Tensor':\n img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]\n else:\n img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]\n\n # crop corresponding gt patch\n top_gt, left_gt = int(top * scale), int(left * scale)\n if input_type == 'Tensor':\n img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n if len(img_lqs) == 1:\n img_lqs = img_lqs[0]\n return img_gts, img_lqs"
},
{
"identifier": "triplet_random_crop",
"path": "basicsr/data/transforms.py",
"snippet": "def triplet_random_crop(img_gts, img_lqs, img_segs, gt_patch_size, scale, gt_path=None):\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n if not isinstance(img_lqs, list):\n img_lqs = [img_lqs]\n if not isinstance(img_segs, list):\n img_segs = [img_segs]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_lq, w_lq = img_lqs[0].size()[-2:]\n h_gt, w_gt = img_gts[0].size()[-2:]\n h_seg, w_seg = img_segs[0].size()[-2:]\n else:\n h_lq, w_lq = img_lqs[0].shape[0:2]\n h_gt, w_gt = img_gts[0].shape[0:2]\n h_seg, w_seg = img_segs[0].shape[0:2]\n lq_patch_size = gt_patch_size // scale\n\n if h_gt != h_lq * scale or w_gt != w_lq * scale:\n raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_lq < lq_patch_size or w_lq < lq_patch_size:\n raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '\n f'({lq_patch_size}, {lq_patch_size}). '\n f'Please remove {gt_path}.')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_lq - lq_patch_size)\n left = random.randint(0, w_lq - lq_patch_size)\n\n # crop lq patch\n if input_type == 'Tensor':\n img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]\n else:\n img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]\n\n # crop corresponding gt patch\n top_gt, left_gt = int(top * scale), int(left * scale)\n if input_type == 'Tensor':\n img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]\n\n if input_type == 'Tensor':\n img_segs = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_segs]\n else:\n img_segs = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_segs]\n\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n if len(img_lqs) == 1:\n img_lqs = img_lqs[0]\n if len(img_segs) == 1:\n img_segs = img_segs[0]\n\n return img_gts, img_lqs, img_segs"
},
{
"identifier": "random_add_gaussian_noise_pt",
"path": "basicsr/data/degradations.py",
"snippet": "def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):\n noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)\n out = img + noise\n if clip and rounds:\n out = torch.clamp((out * 255.0).round(), 0, 255) / 255.\n elif clip:\n out = torch.clamp(out, 0, 1)\n elif rounds:\n out = (out * 255.0).round() / 255.\n return out"
},
{
"identifier": "random_add_poisson_noise_pt",
"path": "basicsr/data/degradations.py",
"snippet": "def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):\n noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)\n out = img + noise\n if clip and rounds:\n out = torch.clamp((out * 255.0).round(), 0, 255) / 255.\n elif clip:\n out = torch.clamp(out, 0, 1)\n elif rounds:\n out = (out * 255.0).round() / 255.\n return out"
},
{
"identifier": "random_add_speckle_noise_pt",
"path": "basicsr/data/degradations.py",
"snippet": "def random_add_speckle_noise_pt(img, speckle_std):\n std_range = speckle_std\n std_l = std_range[0]\n std_r = std_range[1]\n mean=0\n std=random.uniform(std_l/255.,std_r/255.)\n gauss=torch.normal(mean=mean,std=std,size=img.size()).to(img.device)\n noisy=img+gauss*img\n noisy=torch.clamp(noisy,0,1)\n return noisy"
},
{
"identifier": "random_add_saltpepper_noise_pt",
"path": "basicsr/data/degradations.py",
"snippet": "def random_add_saltpepper_noise_pt(imgs, saltpepper_amount, saltpepper_svsp):\n p_range = saltpepper_amount\n p = random.uniform(p_range[0], p_range[1])\n q_range = saltpepper_svsp\n q = random.uniform(q_range[0], q_range[1])\n\n imgs = imgs.permute(0,2,3,1)\n\n outputs = []\n for i in range(imgs.size(0)):\n img = imgs[i]\n out = img.clone()\n flipped = np.random.choice([True, False], size=img.shape,\n p=[p, 1 - p])\n salted = np.random.choice([True, False], size=img.shape,\n p=[q, 1 - q])\n peppered = ~salted\n temp = flipped & salted\n out[flipped & salted] = 1\n out[flipped & peppered] = 0.\n noisy = torch.clamp(out, 0, 1)\n\n outputs.append(noisy.permute(2,0,1))\n if len(outputs)>1:\n return torch.cat(outputs, dim=0)\n else:\n return outputs[0].unsqueeze(0)"
},
{
"identifier": "bivariate_Gaussian",
"path": "basicsr/data/degradations.py",
"snippet": "def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):\n \"\"\"Generate a bivariate isotropic or anisotropic Gaussian kernel.\n\n In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.\n\n Args:\n kernel_size (int):\n sig_x (float):\n sig_y (float):\n theta (float): Radian measurement.\n grid (ndarray, optional): generated by :func:`mesh_grid`,\n with the shape (K, K, 2), K is the kernel size. Default: None\n isotropic (bool):\n\n Returns:\n kernel (ndarray): normalized kernel.\n \"\"\"\n if grid is None:\n grid, _, _ = mesh_grid(kernel_size)\n if isotropic:\n sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])\n else:\n sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)\n kernel = pdf2(sigma_matrix, grid)\n kernel = kernel / np.sum(kernel)\n return kernel"
}
] | import os
import cv2
import torch
import torch.nn.functional as F
import argparse
import sys
import random
import torch.nn.functional as F
from pytorch_lightning import seed_everything
from basicsr.data.realesrgan_dataset import RealESRGANDataset
from dataloaders.simple_dataset import SimpleDataset
from ram.models import ram
from ram import inference_ram as inference
from torchvision.transforms import Normalize, Compose
from basicsr.utils import DiffJPEG, USMSharp
from basicsr.utils.img_process_util import filter2D
from basicsr.data.transforms import paired_random_crop, triplet_random_crop
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt, random_add_speckle_noise_pt, random_add_saltpepper_noise_pt, bivariate_Gaussian | 9,180 | '''
* SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution
* Modified from diffusers by Rongyuan Wu
* 24/12/2023
'''
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument("--gt_path", nargs='+', default=['PATH 1', 'PATH 2'], help='the path of high-resolution images')
parser.add_argument("--save_dir", type=str, default='preset/datasets/train_datasets/training_for_dape', help='the save path of the training dataset.')
parser.add_argument("--start_gpu", type=int, default=1, help='if you have 5 GPUs, you can set it to 1/2/3/4/5 on five gpus for parallel processing., which will save your time. ')
parser.add_argument("--batch_size", type=int, default=10, help='smaller batch size means much time but more extensive degradation for making the training dataset.')
parser.add_argument("--epoch", type=int, default=1, help='decide how many epochs to create for the dataset.')
args = parser.parse_args()
print(f'====== START GPU: {args.start_gpu} =========')
seed_everything(24+args.start_gpu*1000)
args_training_dataset = {}
# Please set your gt path here. If you have multi dirs, you can set it as ['PATH1', 'PATH2', 'PATH3', ...]
args_training_dataset['gt_path'] = args.gt_path
#################### REALESRGAN SETTING ###########################
args_training_dataset['queue_size'] = 160
args_training_dataset['crop_size'] = 512
args_training_dataset['io_backend'] = {}
args_training_dataset['io_backend']['type'] = 'disk'
args_training_dataset['blur_kernel_size'] = 21
args_training_dataset['kernel_list'] = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
args_training_dataset['kernel_prob'] = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
args_training_dataset['sinc_prob'] = 0.1
args_training_dataset['blur_sigma'] = [0.2, 3]
args_training_dataset['betag_range'] = [0.5, 4]
args_training_dataset['betap_range'] = [1, 2]
args_training_dataset['blur_kernel_size2'] = 11
args_training_dataset['kernel_list2'] = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
args_training_dataset['kernel_prob2'] = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
args_training_dataset['sinc_prob2'] = 0.1
args_training_dataset['blur_sigma2'] = [0.2, 1.5]
args_training_dataset['betag_range2'] = [0.5, 4.0]
args_training_dataset['betap_range2'] = [1, 2]
args_training_dataset['final_sinc_prob'] = 0.8
args_training_dataset['use_hflip'] = True
args_training_dataset['use_rot'] = False
| '''
* SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution
* Modified from diffusers by Rongyuan Wu
* 24/12/2023
'''
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument("--gt_path", nargs='+', default=['PATH 1', 'PATH 2'], help='the path of high-resolution images')
parser.add_argument("--save_dir", type=str, default='preset/datasets/train_datasets/training_for_dape', help='the save path of the training dataset.')
parser.add_argument("--start_gpu", type=int, default=1, help='if you have 5 GPUs, you can set it to 1/2/3/4/5 on five gpus for parallel processing., which will save your time. ')
parser.add_argument("--batch_size", type=int, default=10, help='smaller batch size means much time but more extensive degradation for making the training dataset.')
parser.add_argument("--epoch", type=int, default=1, help='decide how many epochs to create for the dataset.')
args = parser.parse_args()
print(f'====== START GPU: {args.start_gpu} =========')
seed_everything(24+args.start_gpu*1000)
args_training_dataset = {}
# Please set your gt path here. If you have multi dirs, you can set it as ['PATH1', 'PATH2', 'PATH3', ...]
args_training_dataset['gt_path'] = args.gt_path
#################### REALESRGAN SETTING ###########################
args_training_dataset['queue_size'] = 160
args_training_dataset['crop_size'] = 512
args_training_dataset['io_backend'] = {}
args_training_dataset['io_backend']['type'] = 'disk'
args_training_dataset['blur_kernel_size'] = 21
args_training_dataset['kernel_list'] = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
args_training_dataset['kernel_prob'] = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
args_training_dataset['sinc_prob'] = 0.1
args_training_dataset['blur_sigma'] = [0.2, 3]
args_training_dataset['betag_range'] = [0.5, 4]
args_training_dataset['betap_range'] = [1, 2]
args_training_dataset['blur_kernel_size2'] = 11
args_training_dataset['kernel_list2'] = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
args_training_dataset['kernel_prob2'] = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
args_training_dataset['sinc_prob2'] = 0.1
args_training_dataset['blur_sigma2'] = [0.2, 1.5]
args_training_dataset['betag_range2'] = [0.5, 4.0]
args_training_dataset['betap_range2'] = [1, 2]
args_training_dataset['final_sinc_prob'] = 0.8
args_training_dataset['use_hflip'] = True
args_training_dataset['use_rot'] = False
| train_dataset = SimpleDataset(args_training_dataset, fix_size=512) | 1 | 2023-11-27 08:50:33+00:00 | 12k |
mu-cai/ViP-LLaVA | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,343 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-12-02 05:52:18+00:00 | 12k |
Meituan-AutoML/Lenna | model/llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "AutoTokenizerForMOD",
"path": "model/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "model/llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f\"<extra_id_{i}>\" for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens(\"<pad>\", special_tokens=True)\n tokenizer.pad_token = \"<pad>\"\n assert tokenizer.pad_token_id is not None\n sentinels = \"\".join([f\"<extra_id_{i}>\" for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "attn_bias_shape",
"path": "model/llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(\n attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id\n):\n if attn_impl == \"flash\":\n return None\n elif attn_impl in [\"torch\", \"triton\"]:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f\"attn_impl={attn_impl!r} is an invalid setting.\")"
},
{
"identifier": "build_attn_bias",
"path": "model/llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(\n attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8\n):\n if attn_impl == \"flash\":\n return None\n elif attn_impl in [\"torch\", \"triton\"]:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(\n build_alibi_bias(\n n_heads,\n seq_len,\n full=not causal,\n alibi_bias_max=alibi_bias_max,\n device=device,\n dtype=dtype,\n )\n )\n return attn_bias\n else:\n raise ValueError(f\"attn_impl={attn_impl!r} is an invalid setting.\")"
},
{
"identifier": "MPTBlock",
"path": "model/llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n def __init__(\n self,\n d_model: int,\n n_heads: int,\n expansion_ratio: int,\n attn_config: Dict = {\n \"attn_type\": \"multihead_attention\",\n \"attn_pdrop\": 0.0,\n \"attn_impl\": \"triton\",\n \"qk_ln\": False,\n \"clip_qkv\": None,\n \"softmax_scale\": None,\n \"prefix_lm\": False,\n \"attn_uses_sequence_id\": False,\n \"alibi\": False,\n \"alibi_bias_max\": 8,\n },\n resid_pdrop: float = 0.0,\n norm_type: str = \"low_precision_layernorm\",\n verbose: int = 0,\n device: Optional[str] = None,\n **kwargs\n ):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config[\"attn_type\"]]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(\n attn_impl=attn_config[\"attn_impl\"],\n clip_qkv=attn_config[\"clip_qkv\"],\n qk_ln=attn_config[\"qk_ln\"],\n softmax_scale=attn_config[\"softmax_scale\"],\n attn_pdrop=attn_config[\"attn_pdrop\"],\n d_model=d_model,\n n_heads=n_heads,\n verbose=verbose,\n device=device,\n )\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(\n d_model=d_model, expansion_ratio=expansion_ratio, device=device\n )\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(\n self,\n x: torch.Tensor,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attn_bias: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.ByteTensor] = None,\n is_causal: bool = True,\n ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(\n a,\n past_key_value=past_key_value,\n attn_bias=attn_bias,\n attention_mask=attention_mask,\n is_causal=is_causal,\n )\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "MPTConfig",
"path": "model/llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = \"mpt\"\n\n def __init__(\n self,\n d_model: int = 2048,\n n_heads: int = 16,\n n_layers: int = 24,\n expansion_ratio: int = 4,\n max_seq_len: int = 2048,\n vocab_size: int = 50368,\n resid_pdrop: float = 0.0,\n emb_pdrop: float = 0.0,\n learned_pos_emb: bool = True,\n attn_config: Dict = attn_config_defaults,\n init_device: str = \"cpu\",\n logit_scale: Optional[Union[float, str]] = None,\n no_bias: bool = False,\n verbose: int = 0,\n embedding_fraction: float = 1.0,\n norm_type: str = \"low_precision_layernorm\",\n use_cache: bool = False,\n init_config: Dict = init_config_defaults,\n **kwargs,\n ):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if \"name\" in kwargs:\n del kwargs[\"name\"]\n if \"loss_fn\" in kwargs:\n del kwargs[\"loss_fn\"]\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for k, v in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(\n self.attn_config, attn_config_defaults\n )\n self.init_config = self._set_config_defaults(\n self.init_config, init_config_defaults\n )\n if self.d_model % self.n_heads != 0:\n raise ValueError(\"d_model must be divisible by n_heads\")\n if any(\n (\n prob < 0 or prob > 1\n for prob in [\n self.attn_config[\"attn_pdrop\"],\n self.resid_pdrop,\n self.emb_pdrop,\n ]\n )\n ):\n raise ValueError(\n \"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\"\n )\n if self.attn_config[\"attn_impl\"] not in [\"torch\", \"flash\", \"triton\"]:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config[\"prefix_lm\"] and self.attn_config[\"attn_impl\"] not in [\n \"torch\",\n \"triton\",\n ]:\n raise NotImplementedError(\n \"prefix_lm only implemented with torch and triton attention.\"\n )\n if self.attn_config[\"alibi\"] and self.attn_config[\"attn_impl\"] not in [\n \"torch\",\n \"triton\",\n ]:\n raise NotImplementedError(\n \"alibi only implemented with torch and triton attention.\"\n )\n if self.attn_config[\"attn_uses_sequence_id\"] and self.attn_config[\n \"attn_impl\"\n ] not in [\"torch\", \"triton\"]:\n raise NotImplementedError(\n \"attn_uses_sequence_id only implemented with torch and triton attention.\"\n )\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError(\n \"model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!\"\n )\n if isinstance(self.logit_scale, str) and self.logit_scale != \"inv_sqrt_d_model\":\n raise ValueError(\n f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\"\n )\n if self.init_config.get(\"name\", None) is None:\n raise ValueError(\n f\"self.init_config={self.init_config!r} 'name' needs to be set.\"\n )\n if not self.learned_pos_emb and (not self.attn_config[\"alibi\"]):\n raise ValueError(\n f\"Positional information must be provided to the model using either learned_pos_emb or alibi.\"\n )"
},
{
"identifier": "SharedEmbedding",
"path": "model/llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n def forward(self, input: Tensor, unembed: bool = False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "model/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if \"bidirectional_mask\" not in batch:\n if batch.get(\"mode\", None) == \"icl_task\":\n batch[\"bidirectional_mask\"] = batch[\"attention_mask\"].clone()\n for i, continuation_indices in enumerate(batch[\"continuation_indices\"]):\n batch[\"bidirectional_mask\"][i, continuation_indices] = 0\n elif \"labels\" in batch and \"attention_mask\" in batch:\n batch[\"bidirectional_mask\"] = torch.logical_and(\n torch.eq(batch[\"attention_mask\"], 1), torch.eq(batch[\"labels\"], -100)\n ).type_as(batch[\"attention_mask\"])\n else:\n raise KeyError(\n \"No bidirectional_mask in batch and not sure how to construct one.\"\n )"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "model/llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(\n f\"Cannot convert model to Prefix LM. \"\n + f\"Model does not belong to set of supported HF models:\"\n + f\"\\n{_SUPPORTED_HF_MODELS}\"\n )"
},
{
"identifier": "init_empty_weights",
"path": "model/llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool = False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device(\"meta\"), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "model/llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {\n \"layernorm\": torch.nn.LayerNorm,\n \"low_precision_layernorm\": LPLayerNorm,\n \"rmsnorm\": RMSNorm,\n \"low_precision_rmsnorm\": LPRMSNorm,\n}"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "model/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {\n \"default_\": torch_default_param_init_fn_,\n \"baseline_\": baseline_param_init_fn_,\n \"kaiming_uniform_\": kaiming_uniform_param_init_fn_,\n \"kaiming_normal_\": kaiming_normal_param_init_fn_,\n \"neox_init_\": neox_param_init_fn_,\n \"small_init_\": small_param_init_fn_,\n \"xavier_uniform_\": xavier_uniform_param_init_fn_,\n \"xavier_normal_\": xavier_normal_param_init_fn_,\n}"
},
{
"identifier": "generic_param_init_fn_",
"path": "model/llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(\n module: nn.Module,\n init_fn_,\n n_layers: int,\n d_model: Optional[int] = None,\n init_div_is_residual: Union[int, float, str, bool] = True,\n emb_init_std: Optional[float] = None,\n emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,\n verbose: int = 0,\n **kwargs,\n):\n del kwargs\n if verbose > 1:\n warnings.warn(f\"If model has bias parameters they are initialized to 0.\")\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(\n init_div_is_residual, int\n ):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(\n f\"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}\"\n )\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(\n f\"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. \"\n + f\"Set `init_div_is_residual: false` in init config to disable this.\"\n )\n if isinstance(module, nn.Linear):\n if hasattr(module, \"_fused\"):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, \"_is_residual\", False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f\"Embedding layer initialized to 0.\")\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(\n f\"Embedding layer initialized using normal distribution with mean=0 and std={std!r}.\"\n )\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(\n f\"Uniform init requires a min and a max limit. User input: {lim}.\"\n )\n if lim[0] == lim[1]:\n warnings.warn(f\"Embedding layer initialized to {lim[0]}.\")\n else:\n if lim == 0:\n warnings.warn(f\"Embedding layer initialized to 0.\")\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(\n f\"Embedding layer initialized using uniform distribution in range {lim}.\"\n )\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(\n f\"Norm weights are set to 1. If norm layer has a bias it is initialized to 0.\"\n )\n if hasattr(module, \"weight\") and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, \"bias\") and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert (\n module.q_proj_weight is None\n and module.k_proj_weight is None\n and (module.v_proj_weight is None)\n )\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for s, e in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert (\n module.q_proj_weight is not None\n and module.k_proj_weight is not None\n and (module.v_proj_weight is not None)\n )\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(\n module.out_proj, \"_is_residual\", False\n ):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(\n f\"{module.__class__.__name__} parameters are not initialized by param_init_fn.\"\n )"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import (PreTrainedModel, PreTrainedTokenizer,
PreTrainedTokenizerFast)
from transformers.modeling_outputs import (BaseModelOutputWithPast,
CausalLMOutputWithPast)
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .configuration_mpt import MPTConfig
from .custom_embedding import SharedEmbedding
from .hf_prefixlm_converter import (add_bidirectional_mask_if_missing,
convert_hf_causal_lm_to_prefix_lm)
from .meta_init_context import init_empty_weights
from .norm import NORM_CLASS_REGISTRY
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,329 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = "model"
_no_split_modules = ["MPTBlock"]
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config["attn_impl"]
self.prefix_lm = config.attn_config["prefix_lm"]
self.attn_uses_sequence_id = config.attn_config["attn_uses_sequence_id"]
self.alibi = config.attn_config["alibi"]
self.alibi_bias_max = config.attn_config["alibi_bias_max"]
if config.init_device == "mixed":
if dist.get_local_rank() == 0:
config.init_device = "cpu"
else:
config.init_device = "meta"
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = " | ".join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(
f"Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options})."
)
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = "model"
_no_split_modules = ["MPTBlock"]
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config["attn_impl"]
self.prefix_lm = config.attn_config["prefix_lm"]
self.attn_uses_sequence_id = config.attn_config["attn_uses_sequence_id"]
self.alibi = config.attn_config["alibi"]
self.alibi_bias_max = config.attn_config["alibi_bias_max"]
if config.init_device == "mixed":
if dist.get_local_rank() == 0:
config.init_device = "cpu"
else:
config.init_device = "meta"
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = " | ".join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(
f"Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options})."
)
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction | self.wte = SharedEmbedding( | 6 | 2023-12-04 11:03:26+00:00 | 12k |
xmu-xiaoma666/X-Dreamer | unet_2d_condition_multiatt.py | [
{
"identifier": "AttentionProcessor",
"path": "attention_processor.py",
"snippet": "class Attention(nn.Module):\nclass AttnProcessor:\nclass LoRAAttnProcessor(nn.Module):\nclass CustomDiffusionAttnProcessor(nn.Module):\nclass AttnAddedKVProcessor:\nclass AttnAddedKVProcessor2_0:\nclass LoRAAttnAddedKVProcessor(nn.Module):\nclass XFormersAttnAddedKVProcessor:\nclass XFormersAttnProcessor:\nclass AttnProcessor2_0:\nclass LoRAXFormersAttnProcessor(nn.Module):\nclass LoRAAttnProcessor2_0(nn.Module):\nclass CustomDiffusionXFormersAttnProcessor(nn.Module):\nclass SlicedAttnProcessor:\nclass SlicedAttnAddedKVProcessor:\nclass SpatialNorm(nn.Module):\n def __init__(\n self,\n query_dim: int,\n cross_attention_dim: Optional[int] = None,\n heads: int = 8,\n dim_head: int = 64,\n dropout: float = 0.0,\n bias=False,\n upcast_attention: bool = False,\n upcast_softmax: bool = False,\n cross_attention_norm: Optional[str] = None,\n cross_attention_norm_num_groups: int = 32,\n added_kv_proj_dim: Optional[int] = None,\n norm_num_groups: Optional[int] = None,\n spatial_norm_dim: Optional[int] = None,\n out_bias: bool = True,\n scale_qk: bool = True,\n only_cross_attention: bool = False,\n eps: float = 1e-5,\n rescale_output_factor: float = 1.0,\n residual_connection: bool = False,\n _from_deprecated_attn_block=False,\n processor: Optional[\"AttnProcessor\"] = None,\n ):\n def set_use_memory_efficient_attention_xformers(\n self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None\n ):\n def set_attention_slice(self, slice_size):\n def set_processor(self, processor: \"AttnProcessor\"):\n def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, index=None, came_posfeat=None, **cross_attention_kwargs):\n def batch_to_head_dim(self, tensor):\n def head_to_batch_dim(self, tensor, out_dim=3):\n def get_attention_scores(self, query, key, attention_mask=None):\n def get_attention_scores_for_query(self, query, key, attention_mask=None):\n def prepare_attention_mask(self, attention_mask, target_length, batch_size=None, out_dim=3):\n def norm_encoder_hidden_states(self, encoder_hidden_states):\n def __call__(\n self,\n attn: Attention,\n hidden_states,\n encoder_hidden_states=None,\n attention_mask=None,\n temb=None,\n #############################\n index=None,\n came_posfeat = None\n ############################\n ):\n def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, **kwargs):\n def __call__(\n self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0, temb=None\n ):\n def __init__(\n self,\n train_kv=True,\n train_q_out=True,\n hidden_size=None,\n cross_attention_dim=None,\n out_bias=True,\n dropout=0.0,\n ):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n def __init__(self):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0):\n def __init__(self, attention_op: Optional[Callable] = None):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n def __init__(self, attention_op: Optional[Callable] = None):\n def __call__(\n self,\n attn: Attention,\n hidden_states: torch.FloatTensor,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n temb: Optional[torch.FloatTensor] = None,\n #############################\n index=None,\n came_posfeat=None\n ############################\n ):\n def __init__(self):\n def __call__(\n self,\n attn: Attention,\n hidden_states,\n encoder_hidden_states=None,\n attention_mask=None,\n temb=None,\n ):\n def __init__(\n self,\n hidden_size,\n cross_attention_dim,\n rank=4,\n attention_op: Optional[Callable] = None,\n network_alpha=None,\n **kwargs,\n ):\n def __call__(\n self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0, temb=None\n ):\n def __init__(self, hidden_size, cross_attention_dim=None, rank=4, network_alpha=None, **kwargs):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0):\n def __init__(\n self,\n train_kv=True,\n train_q_out=False,\n hidden_size=None,\n cross_attention_dim=None,\n out_bias=True,\n dropout=0.0,\n attention_op: Optional[Callable] = None,\n ):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n def __init__(self, slice_size):\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):\n def __init__(self, slice_size):\n def __call__(self, attn: \"Attention\", hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None):\n def __init__(\n self,\n f_channels,\n zq_channels,\n ):\n def forward(self, f, zq):\nLORA_ATTENTION_PROCESSORS = (\n LoRAAttnProcessor,\n LoRAAttnProcessor2_0,\n LoRAXFormersAttnProcessor,\n LoRAAttnAddedKVProcessor,\n)"
},
{
"identifier": "UNetMidBlock2DCrossAttn",
"path": "unet_2d_blocks.py",
"snippet": "class UNetMidBlock2DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n attention_type=\"default\",\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n\n for _ in range(num_layers):\n if not dual_cross_attention:\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n attention_type=attention_type,\n )\n )\n else:\n attentions.append(\n DualTransformer2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ###################################################\n index: Optional[torch.FloatTensor] = None,\n came_posfeat: Optional[torch.FloatTensor] = None,\n ###################################################\n ) -> torch.FloatTensor:\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n else:\n hidden_states, attention_map = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n #########################\n index=index,\n came_posfeat = came_posfeat,\n #############################\n )#[0]\n ##########################################\n hidden_states = hidden_states[0]\n ############################################\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states, attention_map"
},
{
"identifier": "UNetMidBlock2DSimpleCrossAttn",
"path": "unet_2d_blocks.py",
"snippet": "class UNetMidBlock2DSimpleCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attention_head_dim=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n skip_time_act=False,\n only_cross_attention=False,\n cross_attention_norm=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n\n self.attention_head_dim = attention_head_dim\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n self.num_heads = in_channels // self.attention_head_dim\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n ]\n attentions = []\n\n for _ in range(num_layers):\n processor = (\n AttnAddedKVProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") else AttnAddedKVProcessor()\n )\n\n attentions.append(\n Attention(\n query_dim=in_channels,\n cross_attention_dim=in_channels,\n heads=self.num_heads,\n dim_head=self.attention_head_dim,\n added_kv_proj_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n bias=True,\n upcast_softmax=True,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n processor=processor,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n\n if attention_mask is None:\n # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.\n mask = None if encoder_hidden_states is None else encoder_attention_mask\n else:\n # when attention_mask is defined: we don't even check for encoder_attention_mask.\n # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.\n # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.\n # then we can simplify this whole if/else block to:\n # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask\n mask = attention_mask\n\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n # attn\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=mask,\n **cross_attention_kwargs,\n )\n\n # resnet\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "unet_2d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n attention_type=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n\n if down_block_type == \"DownBlock2D\":\n return DownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"ResnetDownsampleBlock2D\":\n return ResnetDownsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif down_block_type == \"AttnDownBlock2D\":\n if add_downsample is False:\n downsample_type = None\n else:\n downsample_type = downsample_type or \"conv\" # default to 'conv'\n return AttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n downsample_type=downsample_type,\n )\n elif down_block_type == \"CrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock2D\")\n return CrossAttnDownBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D\")\n return SimpleCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"SkipDownBlock2D\":\n return SkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnSkipDownBlock2D\":\n return AttnSkipDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"DownEncoderBlock2D\":\n return DownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"AttnDownEncoderBlock2D\":\n return AttnDownEncoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"KDownBlock2D\":\n return KDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif down_block_type == \"KCrossAttnDownBlock2D\":\n return KCrossAttnDownBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n add_self_attention=True if not add_downsample else False,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "unet_2d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n attention_type=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock2D\":\n return UpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"ResnetUpsampleBlock2D\":\n return ResnetUpsampleBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n elif up_block_type == \"CrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock2D\")\n return CrossAttnUpBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n )\n elif up_block_type == \"SimpleCrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D\")\n return SimpleCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"AttnUpBlock2D\":\n if add_upsample is False:\n upsample_type = None\n else:\n upsample_type = upsample_type or \"conv\" # default to 'conv'\n\n return AttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n upsample_type=upsample_type,\n )\n elif up_block_type == \"SkipUpBlock2D\":\n return SkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"AttnSkipUpBlock2D\":\n return AttnSkipUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"UpDecoderBlock2D\":\n return UpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"AttnUpDecoderBlock2D\":\n return AttnUpDecoderBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n temb_channels=temb_channels,\n )\n elif up_block_type == \"KUpBlock2D\":\n return KUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n )\n elif up_block_type == \"KCrossAttnUpBlock2D\":\n return KCrossAttnUpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n )\n\n raise ValueError(f\"{up_block_type} does not exist.\")"
}
] | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import UNet2DConditionLoadersMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.activations import get_activation
from attention_processor import AttentionProcessor, AttnProcessor
from diffusers.models.embeddings import (
GaussianFourierProjection,
ImageHintTimeEmbedding,
ImageProjection,
ImageTimeEmbedding,
PositionNet,
TextImageProjection,
TextImageTimeEmbedding,
TextTimeEmbedding,
TimestepEmbedding,
Timesteps,
)
from diffusers.models.modeling_utils import ModelMixin
from unet_2d_blocks import (
UNetMidBlock2DCrossAttn,
UNetMidBlock2DSimpleCrossAttn,
get_down_block,
get_up_block,
)
import torch
import torch.nn as nn
import torch.utils.checkpoint | 10,544 | add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=reversed_layers_per_block[i] + 1,
transformer_layers_per_block=reversed_transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=blocks_time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=reversed_cross_attention_dim[i],
num_attention_heads=reversed_num_attention_heads[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = get_activation(act_fn)
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
if attention_type == "gated":
positive_len = 768
if isinstance(cross_attention_dim, int):
positive_len = cross_attention_dim
elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
positive_len = cross_attention_dim[0]
self.position_net = PositionNet(positive_len=positive_len, out_dim=cross_attention_dim)
@property
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "set_processor"):
processors[f"{name}.processor"] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
| # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet2DConditionOutput(BaseOutput):
"""
The output of [`UNet2DConditionModel`].
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor = None
class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
r"""
A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
Whether to flip the sin to cos in the time embedding.
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or
`UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
The tuple of upsample blocks to use.
only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
Whether to include self-attention in the basic transformer blocks, see
[`~models.attention.BasicTransformerBlock`].
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, normalization and activation layers is skipped in post-processing.
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
The dimension of the cross attention features.
transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
encoder_hid_dim (`int`, *optional*, defaults to None):
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
dimension to `cross_attention_dim`.
encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
num_attention_heads (`int`, *optional*):
The number of attention heads. If not defined, defaults to `attention_head_dim`
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
class_embed_type (`str`, *optional*, defaults to `None`):
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
addition_embed_type (`str`, *optional*, defaults to `None`):
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
"text". "text" will use the `TextTimeEmbedding` layer.
addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
Dimension for the timestep embeddings.
num_class_embeds (`int`, *optional*, defaults to `None`):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
class conditioning with `class_embed_type` equal to `None`.
time_embedding_type (`str`, *optional*, defaults to `positional`):
The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
time_embedding_dim (`int`, *optional*, defaults to `None`):
An optional override for the dimension of the projected time embedding.
time_embedding_act_fn (`str`, *optional*, defaults to `None`):
Optional activation function to use only once on the time embeddings before they are passed to the rest of
the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
timestep_post_act (`str`, *optional*, defaults to `None`):
The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
time_cond_proj_dim (`int`, *optional*, defaults to `None`):
The dimension of `cond_proj` layer in the timestep embedding.
conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
`class_embed_type="projection"`. Required when `class_embed_type="projection"`.
class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
embeddings with the class embeddings.
mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
`only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
`only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
otherwise.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: Union[int, Tuple[int]] = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: Union[int, Tuple[int]] = 1280,
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: int = 1.0,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
projection_class_embeddings_input_dim: Optional[int] = None,
attention_type: str = "default",
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads=64,
):
super().__init__()
self.sample_size = sample_size
if num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
)
if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
)
# input
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
if time_embedding_type == "fourier":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
if time_embed_dim % 2 != 0:
raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
self.time_proj = GaussianFourierProjection(
time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
)
timestep_input_dim = time_embed_dim
elif time_embedding_type == "positional":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
else:
raise ValueError(
f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
if encoder_hid_dim_type is None and encoder_hid_dim is not None:
encoder_hid_dim_type = "text_proj"
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
if encoder_hid_dim is None and encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
)
if encoder_hid_dim_type == "text_proj":
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
elif encoder_hid_dim_type == "text_image_proj":
# image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
self.encoder_hid_proj = TextImageProjection(
text_embed_dim=encoder_hid_dim,
image_embed_dim=cross_attention_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2
self.encoder_hid_proj = ImageProjection(
image_embed_dim=encoder_hid_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type is not None:
raise ValueError(
f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
)
else:
self.encoder_hid_proj = None
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
elif class_embed_type == "projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
)
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
# 2. it projects from an arbitrary input dimension.
#
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif class_embed_type == "simple_projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
)
self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
else:
self.class_embedding = None
if addition_embed_type == "text":
if encoder_hid_dim is not None:
text_time_embedding_from_dim = encoder_hid_dim
else:
text_time_embedding_from_dim = cross_attention_dim
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type == "text_image":
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
self.add_embedding = TextImageTimeEmbedding(
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
)
elif addition_embed_type == "text_time":
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif addition_embed_type == "image":
# Kandinsky 2.2
self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet
self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type is not None:
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
if time_embedding_act_fn is None:
self.time_embed_act = None
else:
self.time_embed_act = get_activation(time_embedding_act_fn)
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = only_cross_attention
only_cross_attention = [only_cross_attention] * len(down_block_types)
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = False
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(cross_attention_dim, int):
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
if isinstance(layers_per_block, int):
layers_per_block = [layers_per_block] * len(down_block_types)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block[i],
transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=blocks_time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim[i],
num_attention_heads=num_attention_heads[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock2DCrossAttn":
self.mid_block = UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block[-1],
in_channels=block_out_channels[-1],
temb_channels=blocks_time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim[-1],
num_attention_heads=num_attention_heads[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
attention_type=attention_type,
)
elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn":
self.mid_block = UNetMidBlock2DSimpleCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=blocks_time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
cross_attention_dim=cross_attention_dim[-1],
attention_head_dim=attention_head_dim[-1],
resnet_groups=norm_num_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
skip_time_act=resnet_skip_time_act,
only_cross_attention=mid_block_only_cross_attention,
cross_attention_norm=cross_attention_norm,
)
elif mid_block_type is None:
self.mid_block = None
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_num_attention_heads = list(reversed(num_attention_heads))
reversed_layers_per_block = list(reversed(layers_per_block))
reversed_cross_attention_dim = list(reversed(cross_attention_dim))
reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=reversed_layers_per_block[i] + 1,
transformer_layers_per_block=reversed_transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=blocks_time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=reversed_cross_attention_dim[i],
num_attention_heads=reversed_num_attention_heads[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = get_activation(act_fn)
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
if attention_type == "gated":
positive_len = 768
if isinstance(cross_attention_dim, int):
positive_len = cross_attention_dim
elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
positive_len = cross_attention_dim[0]
self.position_net = PositionNet(positive_len=positive_len, out_dim=cross_attention_dim)
@property
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "set_processor"):
processors[f"{name}.processor"] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
""" | self.set_attn_processor(AttnProcessor()) | 0 | 2023-11-27 13:44:01+00:00 | 12k |
zhenzhiwang/intercontrol | sample/more_people_global_joint_control.py | [
{
"identifier": "MultiControlGaussianDiffusion",
"path": "diffusion/more_people_control_diffusion.py",
"snippet": "class MultiControlGaussianDiffusion(ControlGaussianDiffusion):\n \n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n use_posterior=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n global_joints = self.humanml_to_global_joint(x)\n model_kwargs['y']['global_joint'] = th.zeros_like(global_joints, device = x.device)\n model_kwargs['y']['global_joint'][:-1,...] = global_joints[1:,...].clone().detach()\n #model_kwargs['y']['global_joint'][0,...] = global_joints[-1,...].clone().detach()\n model_kwargs['y']['global_joint'].requires_grad = False\n\n p_mean_variance_func = self.p_mean_variance_bfgs_posterior if use_posterior else self.p_mean_variance_bfgs_x0\n out = p_mean_variance_func(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n k_first = 5,\n k_last = 10,\n t_threshold = 10,\n )\n \n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n \n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n \n def global_joint_bfgs_optimize(self, x, model_kwargs=None):\n \"\"\"\n pred_joint: [bs, njoints, 3, seqlen]\n assume interaction between all people in a batch\n \"\"\"\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.humanml_to_global_joint(x)\n\n loss = 0 \n # loss for all time steps\n loss += self.group_motion_region_loss(pred_joint)\n loss += self.avoid_crowd_collision_loss(pred_joint)\n\n # loss for contact\n contact_mask = model_kwargs['y']['global_joint_mask']\n loss += self.crowd_contact_joint_loss(pred_joint, contact_mask)\n #loss += self.far_away_joint_loss(pred_joint[1::2, :,:,:], pred_joint[::2, :,:,:], far_away_mask)\n return loss\n \n\n def crowd_contact_joint_loss(self, pred_joint, mask):\n desired_distance = 0.02\n #pred_joint_0 = th.masked_select(pred_joint[0].permute(0,2,1), mask[0, :,:,:].bool().permute(0,2,1)).contiguous().reshape(-1,3)\n #pred_joint_1 = th.masked_select(pred_joint[1].permute(0,2,1), mask[1, :,:,:].bool().permute(0,2,1)).contiguous().reshape(-1,3)\n #pred_joint_2 = th.masked_select(pred_joint[2].permute(0,2,1), mask[2, :,:,:].bool().permute(0,2,1)).contiguous().reshape(-1,3)\n loss = ((pred_joint[0, 11,:,30:50] - pred_joint[1, 11,:,30:50])**2).mean()\n loss += ((pred_joint[1, 11,:,30:50] - pred_joint[2, 11,:,30:50])**2).mean()\n return loss\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert pred_joint.shape[1] == 1\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n # change root positions for multi-person purpose\n assert pred_joint.shape[0] == 3\n pred_joint[1, :,2,:] *= -1\n pred_joint[1, :,0,:] *= -1\n pred_joint[1, :,2,:] += 2\n pred_joint[2, :,0,:] += 1\n return pred_joint\n \n def group_motion_region_loss(self, pred_joint):\n position = pred_joint[:, 0, [0,2], :]\n cliped_pos = th.clamp(position, -5, 5)\n loss = self.mse_loss(position, cliped_pos) * 0.1\n return loss\n\n def compute_triangle_normals(self, triangles):\n # Compute the vectors from the first point to the other two points\n v1 = triangles[:, 1] - triangles[:, 0]\n v2 = triangles[:, 2] - triangles[:, 0]\n\n # Compute the cross product of v1 and v2 to get the normal vectors\n normals = th.cross(v1, v2, dim=1)\n\n # Normalize the normal vectors to unit length\n normals = th.nn.functional.normalize(normals, dim=1)\n return normals\n \n def avoid_crowd_collision_loss(self, joint):\n root = joint[:, 0, [0,2], :]\n diff = th.norm(root[1:,...] - root[:-1,...], dim = -2)\n loss = th.nn.functional.relu(0.5 - diff).mean()\n diff = th.norm(root[0,...] - root[2,...], dim = -2)\n loss += th.nn.functional.relu(0.5 - diff).mean()\n loss += th.nn.functional.relu(joint[:, 0, 1, :] - 1.1).mean()\n return loss\n \n def get_person_direction(self, joint):\n face_joint_indx = [1, 2, 16, 17]\n l_hip, r_hip, l_shoulder, r_shoulder = face_joint_indx\n across_hip = joint[..., r_hip, :] - joint[..., l_hip, :]\n across_hip = across_hip / across_hip.norm(dim=-1, keepdim=True)\n across_shoulder = joint[..., r_shoulder, :] - joint[..., l_shoulder, :]\n across_shoulder = across_shoulder / across_shoulder.norm(dim=-1, keepdim=True)\n across = (across_hip + across_shoulder) / 2\n across = across / across.norm(dim=-1, keepdim=True)\n y_axis = th.zeros_like(across)\n y_axis[..., 1] = 1\n forward = th.cross(y_axis, across, axis=-1)\n forward = forward / forward.norm(dim=-1, keepdim=True)\n return forward\n\n def face_to_face_loss(self, pred_joint, cond_joint, mask):\n \"\"\"\n pred_joint: [bs, njoints, 3, seqlen]\n cond_joint: [bs, njoints, 3, seqlen]\n mask: [bs, njoints, 3, seqlen]\n \"\"\"\n weight={'orientation': 1, 'position': 1, 'hand': 1}\n mask = mask.permute(0, 3, 1, 2).sum(dim=-1).sum(dim=-1).clamp(0,1)\n bs, njoints, ndim, seqlen = pred_joint.shape\n assert ndim == 3, \"joint_dim must be 3, got {}\".format(ndim)\n pred_joint, cond_joint = pred_joint.permute(0, 3, 1, 2), cond_joint.permute(0, 3, 1, 2)\n direction = self.get_person_direction(pred_joint)\n direction_cond = self.get_person_direction(cond_joint)\n inter_direction = self.get_inter_direction(pred_joint, cond_joint)\n cross_product = (th.cross(direction, inter_direction, dim=-1)[..., 2] + th.cross(inter_direction, direction_cond, dim=-1)[..., 2])/2\n threshold = 0.8\n cross_product[cross_product>threshold] = threshold\n mse_loss = th.nn.MSELoss(reduction='mean')\n position_gt = th.ones_like(cross_product, device = cross_product.device) * threshold\n loss_direction = (direction + direction_cond).abs().mean() * weight['orientation'] \n loss_position = mse_loss(cross_product, position_gt) * weight['position']\n \n '''\n # hand\n hand_direction = self.get_hand_direction(pred_joint)\n hand_direction_cond = self.get_hand_direction(cond_joint)\n inner_product = (hand_direction[...,:-1] * inter_direction[...,:-1]).sum(dim=-1)\n inner_product_cond = - (hand_direction_cond[...,:-1] * inter_direction[...,:-1]).sum(dim=-1)\n loss += ((inner_product + inner_product_cond) / 2 * mask).sum() / mask.sum() * weight['hand']\n '''\n return loss_direction + loss_position"
},
{
"identifier": "SpacedDiffusion",
"path": "diffusion/respace.py",
"snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t"
},
{
"identifier": "fixseed",
"path": "utils/fixseed.py",
"snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)"
},
{
"identifier": "edit_interactive_control_args",
"path": "utils/parser_util.py",
"snippet": "def edit_interactive_control_args():\n parser = ArgumentParser()\n add_base_options(parser)\n add_sampling_options(parser)\n add_edit_inpainting_options(parser)\n add_interactive_options(parser)\n args = parse_and_load_from_model(parser)\n return args"
},
{
"identifier": "load_controlmdm_and_diffusion",
"path": "utils/model_util.py",
"snippet": "def load_controlmdm_and_diffusion(args, data, device, ModelClass=ControlMDM, DiffusionClass=ControlGaussianDiffusion): \n model, diffusion = create_model_and_diffusion(args, data, ModelClass=ControlMDM, DiffusionClass=DiffusionClass)\n model_path = args.model_path\n print(f\"Loading checkpoints from [{model_path}]...\")\n state_dict = torch.load(model_path, map_location='cpu')\n load_model_wo_clip(model, state_dict)\n model.mean = data.dataset.t2m_dataset.mean\n model.std = data.dataset.t2m_dataset.std\n\n model.to(device)\n model.eval() # disable random masking\n model = wrap_model(model, args)\n return model, diffusion"
},
{
"identifier": "dist_util",
"path": "utils/dist_util.py",
"snippet": "GPUS_PER_NODE = 8\nSETUP_RETRY_COUNT = 3\ndef setup_dist(device=0):\ndef dev():\ndef load_state_dict(path, **kwargs):\ndef sync_params(params):\ndef _find_free_port():"
},
{
"identifier": "wrap_model",
"path": "model/cfg_sampler.py",
"snippet": "def wrap_model(model, args):\n if args.guidance_param not in [0., 1.]:\n return ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler\n elif args.guidance_param == 0:\n return UnconditionedModel(model)\n else:\n return model"
},
{
"identifier": "get_dataset_loader",
"path": "data_loaders/get_data.py",
"snippet": "def get_dataset_loader(name, batch_size, num_frames, split='train', load_mode='train', opt=None, short_db=False, cropping_sampler=False, size=None):\n if load_mode == 'text_only':\n load_mode = 'train'\n dataset = get_dataset(name, num_frames, split, load_mode, batch_size, opt, short_db, cropping_sampler, size)\n collate = get_collate_fn(name, load_mode)\n\n n_workers = 1 if load_mode in ['movement_train', 'evaluator_train'] else 8\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True,\n num_workers=n_workers, drop_last=True, collate_fn=collate\n )\n\n return loader"
},
{
"identifier": "recover_from_ric",
"path": "data_loaders/humanml/scripts/motion_process.py",
"snippet": "def recover_from_ric(data, joints_num):\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n return positions"
},
{
"identifier": "get_more_people_mask",
"path": "data_loaders/humanml_utils.py",
"snippet": "def get_more_people_mask(shape):\n bs = 3\n n_joint, n_xyz, seq_len = shape\n model_kwargs = {'y': {'text':[], 'global_joint_mask':[]}}\n model_kwargs['y']['global_joint_mask']=np.zeros((bs, n_joint, n_xyz, seq_len))\n \n if bs == 3:\n model_kwargs['y']['text'].append(\"A person steps forward slowly, and hold something with his right wrist and left wrist.\")\n model_kwargs['y']['text'].append(\"A person steps forward slowly, and hold something with his right wrist and left wrist.\")\n model_kwargs['y']['global_joint_mask'][0, 21, :,30:50] = 0.05\n model_kwargs['y']['global_joint_mask'][1, 17, :,30:50] = 0.05\n\n model_kwargs['y']['global_joint_mask'][1, 20, :,60:70] = 0.05\n model_kwargs['y']['global_joint_mask'][2, 16, :,60:70] = 0.05\n \n else:\n pass\n model_kwargs['y']['text'].append(\"A person steps forward slowly\")\n\n assert bs == model_kwargs['y']['global_joint_mask'].shape[0], \"bs must be {}, got {}\".format(bs, model_kwargs['y']['global_joint_mask'].shape[0])\n return model_kwargs, bs"
},
{
"identifier": "HML_JOINT_NAMES",
"path": "data_loaders/humanml_utils.py",
"snippet": "HML_JOINT_NAMES = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',\n 'right_ankle',\n 'spine3',\n 'left_foot',\n 'right_foot',\n 'neck',\n 'left_collar',\n 'right_collar',\n 'head',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',\n 'left_wrist',\n 'right_wrist',\n]"
},
{
"identifier": "plot_3d_motion",
"path": "data_loaders/humanml/utils/plot_script.py",
"snippet": "def plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=(8, 8), fps=120, radius=4,\n vis_mode='default', gt_frames=[], handshake_size=0, blend_size=0, step_sizes=[], lengths = [], joints2=None, painting_features=[], guidance=None):\n matplotlib.use('Agg')\n \"\"\"\n A wrapper around explicit_plot_3d_motion that \n uses gt_frames to determine the colors of the frames\n \"\"\"\n data = joints.copy().reshape(len(joints), -1, 3)\n frames_number = data.shape[0]\n frame_colors = ['blue' if index in gt_frames else 'orange' for index in range(frames_number)]\n if vis_mode == 'unfold':\n frame_colors = ['purple'] *handshake_size + ['blue']*blend_size + ['orange'] *(120-handshake_size*2-blend_size*2) +['orange']*blend_size\n frame_colors = ['orange'] *(120-handshake_size-blend_size) + ['orange']*blend_size + frame_colors*1024\n elif vis_mode == 'unfold_arb_len':\n for ii, step_size in enumerate(step_sizes):\n if ii == 0:\n frame_colors = ['orange']*(step_size - handshake_size - blend_size) + ['orange']*blend_size + ['purple'] * (handshake_size//2)\n continue\n if ii == len(step_sizes)-1:\n frame_colors += ['purple'] * (handshake_size//2) + ['orange'] * blend_size + ['orange'] * (lengths[ii] - handshake_size - blend_size)\n continue\n frame_colors += ['purple'] * (handshake_size // 2) + ['orange'] * blend_size + ['orange'] * (\n lengths[ii] - 2 * handshake_size - 2 * blend_size) + ['orange'] * blend_size + \\\n ['purple'] * (handshake_size // 2)\n elif vis_mode == 'gt':\n frame_colors = ['blue'] * frames_number\n explicit_plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=figsize, fps=fps, radius=radius, \n vis_mode=vis_mode, frame_colors=frame_colors, joints2=joints2, painting_features=painting_features, guidance=guidance)"
},
{
"identifier": "ControlMDM",
"path": "model/ControlMDM.py",
"snippet": "class ControlMDM(MDM):\n\n def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n ablation=None, activation=\"gelu\", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512,\n arch='trans_enc', emb_trans_dec=False, clip_version=None, args=None, **kargs):\n\n super(ControlMDM, self).__init__(modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim, ff_size, num_layers, num_heads, dropout,\n ablation, activation, legacy, data_rep, dataset, clip_dim,\n arch, emb_trans_dec, clip_version, **kargs)\n self.args = args\n self.num_layers = num_layers\n self.multi_person = args.multi_person\n self.upper_orientation_index = [0, 16, 17] # root, l_shoulder, r_shoulder\n self.lower_orientation_index = [0, 1, 2] # root, l_hip, r_hip\n\n # linear layers init with zeros\n if self.dataset == 'kit':\n self.first_zero_linear = nn.Linear(21*3*2 + 2*3, self.latent_dim)\n elif self.dataset == 'humanml':\n self.first_zero_linear = nn.Linear(22*3*2 + 2*3, self.latent_dim)\n else:\n raise NotImplementedError('Supporting only kit and humanml dataset, got {}'.format(self.dataset))\n \n nn.init.zeros_(self.first_zero_linear.weight)\n nn.init.zeros_(self.first_zero_linear.bias)\n self.mid_zero_linear = nn.ModuleList(\n [nn.Linear(self.latent_dim, self.latent_dim) for _ in range(self.num_layers)])\n for m in self.mid_zero_linear:\n nn.init.zeros_(m.weight)\n nn.init.zeros_(m.bias)\n\n if self.arch == 'trans_enc':\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n del self.seqTransEncoder\n self.seqTransEncoder_mdm = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n self.seqTransEncoder_control = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n else:\n raise ValueError('Supporting only trans_enc arch.')\n\n self.freeze_block(self.input_process)\n self.freeze_block(self.sequence_pos_encoder)\n self.freeze_block(self.seqTransEncoder_mdm)\n self.freeze_block(self.embed_timestep)\n if 'text' in self.cond_mode:\n self.freeze_block(self.embed_text)\n self.freeze_block(self.output_process)\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = torch.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = torch.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = torch.add(torch.mul(data, std), mean)\n return output\n \n def compute_triangle_normals(self, triangles):\n # Compute the vectors from the first point to the other two points\n v1 = triangles[:,:, 1] - triangles[:, :,0]\n v2 = triangles[:,:, 2] - triangles[:,:,0]\n\n # Compute the cross product of v1 and v2 to get the normal vectors\n normals = torch.cross(v2, v1, dim=-1)\n\n # Normalize the normal vectors to unit length\n normals = nn.functional.normalize(normals, dim=-1)\n return normals\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n curr_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert curr_joint.shape[1] == 1\n curr_joint = recover_from_ric(curr_joint, n_joints)\n curr_joint = curr_joint.view(-1, *curr_joint.shape[2:]).permute(0, 2, 3, 1)\n # change root positions for multi-person purpose\n if self.multi_person:\n curr_joint[1::2, :,2,:] *= -1\n curr_joint[1::2, :,0,:] *= -1\n curr_joint[1::2, :,2,:] += 2\n\n # more than 3 people\n #curr_joint[1, :,2,:] *= -1\n #curr_joint[1, :,0,:] *= -1\n #curr_joint[1, :,2,:] += 2\n #curr_joint[2, :,0,:] += 1\n return curr_joint\n\n def forward(self, x, timesteps, y=None):\n bs, njoints, nfeats, seqlen = x.shape\n control_bs, n_global_joints, xyz_dim, control_frames = y['global_joint'].shape\n assert bs == control_bs and seqlen == control_frames, \"bs {} != {} or seqlen {} != {}\".format(bs, control_bs, seqlen, control_frames)\n assert xyz_dim ==3, \"xyz_dim {} != 3\".format(xyz_dim)\n # prepare global joints for controlmdm\n curr_joint = self.humanml_to_global_joint(x).clone().detach() # [bs, njoints, 3, seqlen]\n curr_joint.requires_grad = False\n\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n\n # controlmdm\n # orientation\n upper_triangles = curr_joint[:,self.upper_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n lower_triangles = curr_joint[:,self.lower_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n upper_orientation = self.compute_triangle_normals(upper_triangles) # [seqlen, bs, 3]\n lower_orientation = self.compute_triangle_normals(lower_triangles) # [seqlen, bs, 3]\n\n # relative position to joint\n '''\n relative_position = torch.zeros_like(curr_joint, device = xseq.device, dtype=torch.float32) # [bs, njoints, 3, seqlen]\n relative_position[1::2,:,:,:] = ((y['global_joint'][::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,1::2,:,:].unsqueeze(2))*y['global_joint_mask'][::2,:,:,:].bool().float()).float().sum(1)\n relative_position[::2,:,:,:] = ((y['global_joint'][1::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,::2,:,:].unsqueeze(2))*y['global_joint_mask'][1::2,:,:,:].bool().float()).float().sum(1)\n '''\n relative_position = ((y['global_joint'].float() - curr_joint)*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_position = relative_position.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n\n # relative position to root\n relative_root = ((y['global_joint'].float() - curr_joint[:,[0],:,:])*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_root = relative_root.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n global_joint_feat = torch.cat((relative_position, relative_root, upper_orientation, lower_orientation), axis=-1) # [seqlen, bs, 22*3 *2 +3 +3]\n \n global_joint_feat = self.first_zero_linear(global_joint_feat) # [seqlen, bs, d]\n control_input = xseq + torch.cat((torch.zeros_like(emb, device = xseq.device, dtype=torch.float32), global_joint_feat), axis=0) # [seqlen+1, bs, d]\n control_output_list = self.seqTransEncoder_control.return_all_layers(control_input) # [seqlen+1, bs, d]\n for i in range(self.num_layers):\n control_output_list[i] = self.mid_zero_linear[i](control_output_list[i])\n \n output = self.seqTransEncoder_mdm.forward_with_condition(xseq, control_output_list)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output\n\n def trainable_parameters(self):\n return [p for name, p in self.named_parameters() if p.requires_grad]\n # return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n \n def trainable_parameter_names(self):\n return [name for name, p in self.named_parameters() if p.requires_grad]\n\n def freeze_block(self, block):\n block.eval()\n for p in block.parameters():\n p.requires_grad = False\n\n def unfreeze_block(self, block):\n block.train()\n for p in block.parameters():\n p.requires_grad = True\n \n def forward_without_control(self, x, timesteps, y=None): #\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n output = self.seqTransEncoder_mdm(xseq)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output"
}
] | from diffusion.more_people_control_diffusion import MultiControlGaussianDiffusion
from diffusion.respace import SpacedDiffusion
from utils.fixseed import fixseed
from utils.parser_util import edit_interactive_control_args
from utils.model_util import load_controlmdm_and_diffusion
from utils import dist_util
from model.cfg_sampler import wrap_model
from data_loaders.get_data import get_dataset_loader
from data_loaders.humanml.scripts.motion_process import recover_from_ric
from data_loaders.humanml_utils import get_more_people_mask, HML_JOINT_NAMES
from data_loaders.humanml.utils.plot_script import plot_3d_motion
from model.ControlMDM import ControlMDM
import os
import numpy as np
import torch
import data_loaders.humanml.utils.paramUtil as paramUtil
import shutil | 8,056 | # This code is based on https://github.com/openai/guided-diffusion
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
def main():
args = edit_interactive_control_args()
if args.multi_person == False:
args.multi_person = True
args.dataset = 'humanml' # we only support humanml for now
if args.use_posterior == False:
args.use_posterior = True
fixseed(args.seed)
out_path = args.output_dir
name = os.path.basename(os.path.dirname(args.model_path))
niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '')
max_frames = 100 if args.dataset in ['kit', 'humanml'] else 60
fps = 12.5 if args.dataset == 'kit' else 20
dist_util.setup_dist(args.device)
if out_path == '':
out_path = os.path.join(os.path.dirname(args.model_path),
'more_people_{}_{}_seed{}'.format(niter, args.interaction_json.split('/')[-1], args.seed))
print('Loading dataset...')
data = get_dataset_loader(name=args.dataset,
batch_size=args.batch_size,
num_frames=max_frames,
split='test',
load_mode='train',
size=1) # in train mode, you get both text and motion.
data.fixed_length = max_frames
total_num_samples = args.num_samples * args.num_repetitions
print("Creating model and diffusion...")
DiffusionClass = MultiControlGaussianDiffusion if args.filter_noise else SpacedDiffusion
| # This code is based on https://github.com/openai/guided-diffusion
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
def main():
args = edit_interactive_control_args()
if args.multi_person == False:
args.multi_person = True
args.dataset = 'humanml' # we only support humanml for now
if args.use_posterior == False:
args.use_posterior = True
fixseed(args.seed)
out_path = args.output_dir
name = os.path.basename(os.path.dirname(args.model_path))
niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '')
max_frames = 100 if args.dataset in ['kit', 'humanml'] else 60
fps = 12.5 if args.dataset == 'kit' else 20
dist_util.setup_dist(args.device)
if out_path == '':
out_path = os.path.join(os.path.dirname(args.model_path),
'more_people_{}_{}_seed{}'.format(niter, args.interaction_json.split('/')[-1], args.seed))
print('Loading dataset...')
data = get_dataset_loader(name=args.dataset,
batch_size=args.batch_size,
num_frames=max_frames,
split='test',
load_mode='train',
size=1) # in train mode, you get both text and motion.
data.fixed_length = max_frames
total_num_samples = args.num_samples * args.num_repetitions
print("Creating model and diffusion...")
DiffusionClass = MultiControlGaussianDiffusion if args.filter_noise else SpacedDiffusion | model, diffusion = load_controlmdm_and_diffusion(args, data, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) | 4 | 2023-11-27 05:28:02+00:00 | 12k |
AGI-Collective/Robin | robin/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "robin/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "robin/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "robin/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "robin/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "robin/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "robin/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "robin/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "robin/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "robin/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "robin/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "robin/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "robin/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "robin/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 9,432 | assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
self._attn_bias_initialized = True
if self.attn_impl == 'flash':
return (self.attn_bias, attention_mask)
if self.attn_bias is not None:
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
attn_bias = self.attn_bias
if self.prefix_lm:
assert isinstance(attn_bias, torch.Tensor)
assert isinstance(prefix_mask, torch.Tensor)
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name'] | MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config) | 11 | 2023-11-28 15:25:55+00:00 | 12k |
huangb23/VTimeLLM | vtimellm/model/vtimellm_chatglm.py | [
{
"identifier": "ChatGLMConfig",
"path": "vtimellm/model/chatglm/configuration_chatglm.py",
"snippet": "class ChatGLMConfig(PretrainedConfig):\n model_type = \"chatglm\"\n def __init__(\n self,\n num_layers=28,\n padded_vocab_size=65024,\n hidden_size=4096,\n ffn_hidden_size=13696,\n kv_channels=128,\n num_attention_heads=32,\n seq_length=2048,\n hidden_dropout=0.0,\n classifier_dropout=None,\n attention_dropout=0.0,\n layernorm_epsilon=1e-5,\n rmsnorm=True,\n apply_residual_connection_post_layernorm=False,\n post_layer_norm=True,\n add_bias_linear=False,\n add_qkv_bias=False,\n bias_dropout_fusion=True,\n multi_query_attention=False,\n multi_query_group_num=1,\n apply_query_key_layer_scaling=True,\n attention_softmax_in_fp32=True,\n fp32_residual_connection=False,\n quantization_bit=0,\n pre_seq_len=None,\n prefix_projection=False,\n **kwargs\n ):\n self.num_layers = num_layers\n self.vocab_size = padded_vocab_size\n self.padded_vocab_size = padded_vocab_size\n self.hidden_size = hidden_size\n self.ffn_hidden_size = ffn_hidden_size\n self.kv_channels = kv_channels\n self.num_attention_heads = num_attention_heads\n self.seq_length = seq_length\n self.hidden_dropout = hidden_dropout\n self.classifier_dropout = classifier_dropout\n self.attention_dropout = attention_dropout\n self.layernorm_epsilon = layernorm_epsilon\n self.rmsnorm = rmsnorm\n self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm\n self.post_layer_norm = post_layer_norm\n self.add_bias_linear = add_bias_linear\n self.add_qkv_bias = add_qkv_bias\n self.bias_dropout_fusion = bias_dropout_fusion\n self.multi_query_attention = multi_query_attention\n self.multi_query_group_num = multi_query_group_num\n self.apply_query_key_layer_scaling = apply_query_key_layer_scaling\n self.attention_softmax_in_fp32 = attention_softmax_in_fp32\n self.fp32_residual_connection = fp32_residual_connection\n self.quantization_bit = quantization_bit\n self.pre_seq_len = pre_seq_len\n self.prefix_projection = prefix_projection\n super().__init__(**kwargs)"
},
{
"identifier": "ChatGLMModel",
"path": "vtimellm/model/chatglm/modeling_chatglm.py",
"snippet": "class ChatGLMModel(ChatGLMPreTrainedModel):\n def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):\n super().__init__(config)\n if empty_init:\n init_method = skip_init\n else:\n init_method = default_init\n init_kwargs = {}\n if device is not None:\n init_kwargs[\"device\"] = device\n self.embedding = init_method(Embedding, config, **init_kwargs)\n self.num_layers = config.num_layers\n self.multi_query_group_num = config.multi_query_group_num\n self.kv_channels = config.kv_channels\n\n # Rotary positional embeddings\n self.seq_length = config.seq_length\n rotary_dim = (\n config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels\n )\n self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device,\n dtype=config.torch_dtype)\n self.encoder = init_method(GLMTransformer, config, **init_kwargs)\n self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,\n dtype=config.torch_dtype, **init_kwargs)\n self.pre_seq_len = config.pre_seq_len\n self.prefix_projection = config.prefix_projection\n if self.pre_seq_len is not None:\n for param in self.parameters():\n param.requires_grad = False\n self.prefix_tokens = torch.arange(self.pre_seq_len).long()\n self.prefix_encoder = PrefixEncoder(config)\n self.dropout = torch.nn.Dropout(0.1)\n\n def get_input_embeddings(self):\n return self.embedding.word_embeddings\n\n def get_prompt(self, batch_size, device, dtype=torch.half):\n prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)\n past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)\n past_key_values = past_key_values.view(\n batch_size,\n self.pre_seq_len,\n self.num_layers * 2,\n self.multi_query_group_num,\n self.kv_channels\n )\n # seq_len, b, nh, hidden_size\n past_key_values = self.dropout(past_key_values)\n past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)\n return past_key_values\n\n def forward(\n self,\n input_ids,\n position_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.BoolTensor] = None,\n full_attention_mask: Optional[torch.BoolTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n batch_size, seq_length = input_ids.shape\n\n if inputs_embeds is None:\n inputs_embeds = self.embedding(input_ids)\n\n if self.pre_seq_len is not None:\n if past_key_values is None:\n past_key_values = self.get_prompt(batch_size=batch_size, device=input_ids.device,\n dtype=inputs_embeds.dtype)\n if attention_mask is not None:\n attention_mask = torch.cat([attention_mask.new_ones((batch_size, self.pre_seq_len)),\n attention_mask], dim=-1)\n\n if full_attention_mask is None:\n if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):\n full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)\n\n # Rotary positional embeddings\n rotary_pos_emb = self.rotary_pos_emb(self.seq_length)\n if position_ids is not None:\n rotary_pos_emb = rotary_pos_emb[position_ids]\n else:\n rotary_pos_emb = rotary_pos_emb[None, :seq_length]\n rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()\n\n # Run encoder.\n hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(\n inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,\n kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states\n )\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n def quantize(self, weight_bit_width: int):\n from .quantization import quantize\n quantize(self.encoder, weight_bit_width)\n return self"
},
{
"identifier": "ChatGLMForConditionalGeneration",
"path": "vtimellm/model/chatglm/modeling_chatglm.py",
"snippet": "class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):\n def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n super().__init__(config)\n\n self.max_sequence_length = config.max_length\n self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)\n self.config = config\n self.quantized = False\n\n if self.config.quantization_bit:\n self.quantize(self.config.quantization_bit, empty_init=True)\n\n def _update_model_kwargs_for_generation(\n self,\n outputs: ModelOutput,\n model_kwargs: Dict[str, Any],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n ) -> Dict[str, Any]:\n # update past_key_values\n model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n outputs, standardize_cache_format=standardize_cache_format\n )\n\n # update attention mask\n if \"attention_mask\" in model_kwargs:\n attention_mask = model_kwargs[\"attention_mask\"]\n model_kwargs[\"attention_mask\"] = torch.cat(\n [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n )\n\n # update position ids\n if \"position_ids\" in model_kwargs:\n position_ids = model_kwargs[\"position_ids\"]\n new_position_id = position_ids[..., -1:].clone()\n new_position_id += 1\n model_kwargs[\"position_ids\"] = torch.cat(\n [position_ids, new_position_id], dim=-1\n )\n\n model_kwargs[\"is_first_forward\"] = False\n return model_kwargs\n\n def prepare_inputs_for_generation(\n self,\n input_ids: torch.LongTensor,\n past_key_values: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n is_first_forward: bool = True,\n **kwargs\n ) -> dict:\n # only last token for input_ids if past is not None\n if position_ids is None:\n position_ids = self.get_position_ids(input_ids, device=input_ids.device)\n if not is_first_forward:\n if past_key_values is not None:\n position_ids = position_ids[..., -1:]\n input_ids = input_ids[:, -1:]\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past_key_values,\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"return_last_logit\": True,\n \"use_cache\": use_cache\n }\n\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n labels: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n return_last_logit: Optional[bool] = False,\n ):\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids=input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = transformer_outputs[0]\n if return_last_logit:\n hidden_states = hidden_states[-1:]\n lm_logits = self.transformer.output_layer(hidden_states)\n lm_logits = lm_logits.transpose(0, 1).contiguous()\n\n loss = None\n if labels is not None:\n lm_logits = lm_logits.to(torch.float32)\n\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n lm_logits = lm_logits.to(hidden_states.dtype)\n loss = loss.to(hidden_states.dtype)\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(\n past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:\n \"\"\"\n This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or\n [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n beam_idx at every generation step.\n\n Output shares the same memory storage as `past`.\n \"\"\"\n return tuple(\n (\n layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),\n layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),\n )\n for layer_past in past\n )\n\n def process_response(self, output, history):\n content = \"\"\n history = deepcopy(history)\n for response in output.split(\"<|assistant|>\"):\n metadata, content = response.split(\"\\n\", maxsplit=1)\n if not metadata.strip():\n content = content.strip()\n history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\n content = content.replace(\"[[训练时间]]\", \"2023年\")\n else:\n history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\n if history[0][\"role\"] == \"system\" and \"tools\" in history[0]:\n content = \"\\n\".join(content.split(\"\\n\")[1:-1])\n def tool_call(**kwargs):\n return kwargs\n parameters = eval(content)\n content = {\"name\": metadata.strip(), \"parameters\": parameters}\n else:\n content = {\"name\": metadata.strip(), \"content\": content}\n return content, history\n\n @torch.inference_mode()\n def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, role: str = \"user\",\n max_length: int = 8192, num_beams=1, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,\n **kwargs):\n if history is None:\n history = []\n if logits_processor is None:\n logits_processor = LogitsProcessorList()\n logits_processor.append(InvalidScoreLogitsProcessor())\n gen_kwargs = {\"max_length\": max_length, \"num_beams\": num_beams, \"do_sample\": do_sample, \"top_p\": top_p,\n \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n inputs = tokenizer.build_chat_input(query, history=history, role=role)\n inputs = inputs.to(self.device)\n eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command(\"<|user|>\"),\n tokenizer.get_command(\"<|observation|>\")]\n outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)\n outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n response = tokenizer.decode(outputs)\n history.append({\"role\": role, \"content\": query})\n response, history = self.process_response(response, history)\n return response, history\n\n @torch.inference_mode()\n def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, role: str = \"user\",\n past_key_values=None,max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8,\n logits_processor=None, return_past_key_values=False, **kwargs):\n if history is None:\n history = []\n if logits_processor is None:\n logits_processor = LogitsProcessorList()\n logits_processor.append(InvalidScoreLogitsProcessor())\n eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command(\"<|user|>\"),\n tokenizer.get_command(\"<|observation|>\")]\n gen_kwargs = {\"max_length\": max_length, \"do_sample\": do_sample, \"top_p\": top_p,\n \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n if past_key_values is None:\n inputs = tokenizer.build_chat_input(query, history=history, role=role)\n else:\n inputs = tokenizer.build_chat_input(query, role=role)\n inputs = inputs.to(self.device)\n if past_key_values is not None:\n past_length = past_key_values[0][0].shape[0]\n if self.transformer.pre_seq_len is not None:\n past_length -= self.transformer.pre_seq_len\n inputs.position_ids += past_length\n attention_mask = inputs.attention_mask\n attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)\n inputs['attention_mask'] = attention_mask\n history.append({\"role\": role, \"content\": query})\n for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,\n eos_token_id=eos_token_id, return_past_key_values=return_past_key_values,\n **gen_kwargs):\n if return_past_key_values:\n outputs, past_key_values = outputs\n outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n response = tokenizer.decode(outputs)\n if response and response[-1] != \"�\":\n response, new_history = self.process_response(response, history)\n if return_past_key_values:\n yield response, new_history, past_key_values\n else:\n yield response, new_history\n\n @torch.inference_mode()\n def stream_generate(\n self,\n input_ids,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n return_past_key_values=False,\n **kwargs,\n ):\n batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n\n if generation_config is None:\n generation_config = self.generation_config\n generation_config = copy.deepcopy(generation_config)\n model_kwargs = generation_config.update(**kwargs)\n model_kwargs[\"use_cache\"] = generation_config.use_cache\n bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None\n\n has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n if has_default_max_length and generation_config.max_new_tokens is None:\n warnings.warn(\n f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n UserWarning,\n )\n elif generation_config.max_new_tokens is not None:\n generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n if not has_default_max_length:\n logger.warn(\n f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n \"Please refer to the documentation for more information. \"\n \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n UserWarning,\n )\n\n if input_ids_seq_length >= generation_config.max_length:\n input_ids_string = \"decoder_input_ids\" if self.config.is_encoder_decoder else \"input_ids\"\n logger.warning(\n f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n \" increasing `max_new_tokens`.\"\n )\n\n # 2. Set generation parameters if not already defined\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n logits_processor = self._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids_seq_length,\n encoder_input_ids=input_ids,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n logits_processor=logits_processor,\n )\n\n stopping_criteria = self._get_stopping_criteria(\n generation_config=generation_config, stopping_criteria=stopping_criteria\n )\n logits_warper = self._get_logits_warper(generation_config)\n\n unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n scores = None\n while True:\n model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n # forward pass to get next token\n outputs = self(\n **model_inputs,\n return_dict=True,\n output_attentions=False,\n output_hidden_states=False,\n )\n\n next_token_logits = outputs.logits[:, -1, :]\n\n # pre-process distribution\n next_token_scores = logits_processor(input_ids, next_token_logits)\n next_token_scores = logits_warper(input_ids, next_token_scores)\n\n # sample\n probs = nn.functional.softmax(next_token_scores, dim=-1)\n if generation_config.do_sample:\n next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n else:\n next_tokens = torch.argmax(probs, dim=-1)\n # update generated ids, model inputs, and length for next step\n input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n model_kwargs = self._update_model_kwargs_for_generation(\n outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder\n )\n unfinished_sequences = unfinished_sequences.mul(\n next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)\n )\n if return_past_key_values:\n yield input_ids, outputs.past_key_values\n else:\n yield input_ids\n # stop when each sentence is finished, or if we exceed the maximum length\n if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n break\n\n def quantize(self, bits: int, empty_init=False, device=None, **kwargs):\n if bits == 0:\n return\n\n from .quantization import quantize\n\n if self.quantized:\n logger.info(\"Already quantized.\")\n return self\n\n self.quantized = True\n\n self.config.quantization_bit = bits\n\n self.transformer.encoder = quantize(self.transformer.encoder, bits, empty_init=empty_init, device=device,\n **kwargs)\n return self"
},
{
"identifier": "VTimeLLMMetaModel",
"path": "vtimellm/model/vtimellm_arch.py",
"snippet": "class VTimeLLMMetaModel:\n\n def initialize_vision_modules(self, model_args):\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = nn.Linear(768, self.config.hidden_size)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))\n print(\"load mlp:\", pretrain_mm_mlp_adapter)"
},
{
"identifier": "VTimeLLMMetaForCausalLM",
"path": "vtimellm/model/vtimellm_arch.py",
"snippet": "class VTimeLLMMetaForCausalLM(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, position_ids, attention_mask, past_key_values, labels, images\n ):\n # print(position_ids, attention_mask)\n # if past_key_values:\n # print(past_key_values[-1][-1].shape)\n # print(input_ids.shape, position_ids.shape, attention_mask.shape, past_key_values.shape, images)\n if images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and images is not None and input_ids.shape[1] == 1:\n if self.get_model().config.model_type == 'chatglm':\n target_shape = past_key_values[-1][-1].shape[0] + 1\n else:\n target_shape = past_key_values[-1][-1].shape[-2] + 1\n attention_mask = torch.cat((attention_mask, torch.ones(\n (attention_mask.shape[0], target_shape - attention_mask.shape[1]),\n dtype=attention_mask.dtype,\n device=attention_mask.device\n )), dim=1)\n position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1\n return input_ids, position_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.get_model().mm_projector(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n # image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.get_model().mm_projector(images)\n # print([image.shape for image in image_features])\n \n _labels = labels\n _position_ids = position_ids\n _attention_mask = attention_mask\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids, dtype=torch.bool)\n else:\n attention_mask = attention_mask.bool()\n if position_ids is None:\n position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)\n if labels is None:\n labels = torch.full_like(input_ids, IGNORE_INDEX)\n\n # remove the padding using attention_mask -- TODO: double check\n input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]\n labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]\n\n new_input_embeds = []\n new_labels = []\n cur_image_idx = 0\n for batch_idx, cur_input_ids in enumerate(input_ids):\n num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()\n if num_images == 0:\n cur_image_features = image_features[cur_image_idx]\n cur_input_embeds_1 = self.get_model().get_input_embeddings()(cur_input_ids)\n cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)\n new_input_embeds.append(cur_input_embeds)\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n\n image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]\n cur_input_ids_noim = []\n cur_labels = labels[batch_idx]\n cur_labels_noim = []\n for i in range(len(image_token_indices) - 1):\n cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]])\n cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]])\n split_sizes = [x.shape[0] for x in cur_labels_noim]\n cur_input_embeds = self.get_model().get_input_embeddings()(torch.cat(cur_input_ids_noim))\n cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)\n cur_new_input_embeds = []\n cur_new_labels = []\n\n for i in range(num_images + 1):\n cur_new_input_embeds.append(cur_input_embeds_no_im[i])\n cur_new_labels.append(cur_labels_noim[i])\n if i < num_images:\n cur_image_features = image_features[cur_image_idx]\n cur_image_idx += 1\n cur_new_input_embeds.append(cur_image_features)\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))\n\n cur_new_input_embeds = torch.cat(cur_new_input_embeds)\n cur_new_labels = torch.cat(cur_new_labels)\n\n new_input_embeds.append(cur_new_input_embeds)\n new_labels.append(cur_new_labels)\n\n # Truncate sequences to max length as image embeddings can make the sequence longer\n tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)\n if tokenizer_model_max_length is not None:\n new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds]\n new_labels = [x[:tokenizer_model_max_length] for x in new_labels]\n\n # Combine them\n max_len = max(x.shape[0] for x in new_input_embeds)\n batch_size = len(new_input_embeds)\n\n new_input_embeds_padded = []\n new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)\n attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)\n position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)\n\n for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):\n cur_len = cur_new_embed.shape[0]\n if getattr(self.config, 'tokenizer_padding_side', 'right') == \"left\":\n new_input_embeds_padded.append(torch.cat((\n torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device),\n cur_new_embed\n ), dim=0))\n if cur_len > 0:\n new_labels_padded[i, -cur_len:] = cur_new_labels\n attention_mask[i, -cur_len:] = True\n position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)\n else:\n new_input_embeds_padded.append(torch.cat((\n cur_new_embed,\n torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)\n ), dim=0))\n if cur_len > 0:\n new_labels_padded[i, :cur_len] = cur_new_labels\n attention_mask[i, :cur_len] = True\n position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)\n\n new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)\n\n if _labels is None:\n new_labels = None\n else:\n new_labels = new_labels_padded\n\n if _attention_mask is None:\n attention_mask = None\n else:\n attention_mask = attention_mask.to(dtype=_attention_mask.dtype)\n\n if _position_ids is None:\n position_ids = None\n\n if self.get_model().config.model_type == 'chatglm':\n fake_input_ids = torch.full((new_input_embeds.shape[0], new_input_embeds.shape[1]), -10000, \n dtype=new_input_embeds.dtype, device=new_input_embeds.device)\n attention_mask = attention_mask.to(torch.int8)\n new_input_embeds = new_input_embeds.transpose(0, 1).contiguous()\n else:\n fake_input_ids = None\n # print(position_ids, attention_mask)\n return fake_input_ids, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels"
}
] | import torch
import torch.nn as nn
from typing import List, Optional, Tuple, Union
from transformers import AutoConfig, AutoModelForCausalLM
from .chatglm import ChatGLMConfig, ChatGLMModel, ChatGLMForConditionalGeneration
from .vtimellm_arch import VTimeLLMMetaModel, VTimeLLMMetaForCausalLM | 8,054 |
class VTimeLLMChatGLMConfig(ChatGLMConfig):
model_type = "VTimeLLM_ChatGLM"
class VTimeLLMChatGLMModel(ChatGLMModel, VTimeLLMMetaModel):
config_class = VTimeLLMChatGLMConfig
def __init__(self, config, empty_init=True, device=None):
super(VTimeLLMChatGLMModel, self).__init__(config, empty_init=empty_init, device=device)
|
class VTimeLLMChatGLMConfig(ChatGLMConfig):
model_type = "VTimeLLM_ChatGLM"
class VTimeLLMChatGLMModel(ChatGLMModel, VTimeLLMMetaModel):
config_class = VTimeLLMChatGLMConfig
def __init__(self, config, empty_init=True, device=None):
super(VTimeLLMChatGLMModel, self).__init__(config, empty_init=empty_init, device=device)
| class VTimeLLMChatGLMForCausalLM(ChatGLMForConditionalGeneration, VTimeLLMMetaForCausalLM): | 2 | 2023-11-28 06:33:42+00:00 | 12k |
moonbow721/DPoser | run/smplify.py | [
{
"identifier": "sde_lib",
"path": "lib/algorithms/advanced/sde_lib.py",
"snippet": "class SDE(abc.ABC):\n class RSDE(self.__class__):\nclass VPSDE(SDE):\nclass subVPSDE(SDE):\nclass VESDE(SDE):\n def __init__(self, N):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def reverse(self, score_fn, probability_flow=False):\n def __init__(self):\n def T(self):\n def sde(self, x, t, condition=None, mask=None, guide=False):\n def discretize(self, x, t, condition=None, mask=None):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def return_alpha_sigma(self, t):\n def __init__(self, sigma_min=0.01, sigma_max=50, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device))\n N = self.N\n T = self.T\n N = np.prod(shape[1:])\n G = sqrt_beta\n N = np.prod(shape[1:])\n N = np.prod(shape[1:])\n G = torch.sqrt(sigma ** 2 - adjacent_sigma ** 2)"
},
{
"identifier": "sampling",
"path": "lib/algorithms/advanced/sampling.py",
"snippet": "_CORRECTORS = {}\n_PREDICTORS = {}\ndef register_predictor(cls=None, *, name=None):\n def _register(cls):\ndef register_corrector(cls=None, *, name=None):\n def _register(cls):\ndef get_predictor(name):\ndef get_corrector(name):\ndef get_sampling_fn(config, sde, shape, inverse_scaler, eps, device=None):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def update_fn_guide(self, x_t, t, observation, mask, condition=None, grad_step=1.0):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def vesde_update_fn(self, x, t):\n def vpsde_update_fn(self, x, t):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\ndef shared_predictor_update_fn(x, t, observation, mask, sde, model, predictor, probability_flow, continuous):\ndef shared_corrector_update_fn(x, t, observation, mask, sde, model, corrector, continuous, snr, n_steps):\ndef get_pc_sampler(sde, shape, predictor, corrector, inverse_scaler, snr,\n n_steps=1, probability_flow=False, continuous=False,\n denoise=True, eps=1e-3, device='cuda'):\n def get_imputation_update_fn(update_fn):\n def imputation_update_fn(x, vec_t, observation, mask, model, args):\n def pc_sampler(model, observation=None, mask=None, z=None, start_step=0, args=None):\ndef get_ode_sampler(sde, shape, inverse_scaler,\n denoise=False, rtol=1e-5, atol=1e-5,\n method='RK45', eps=1e-3, device='cuda'):\n def denoise_update_fn(model, x):\n def drift_fn(model, x, t):\n def ode_sampler(model, z=None):\n def ode_func(t, x):\nclass Predictor(abc.ABC):\nclass Corrector(abc.ABC):\nclass EulerMaruyamaPredictor(Predictor):\nclass ReverseDiffusionPredictor(Predictor):\nclass AncestralSamplingPredictor(Predictor):\nclass NonePredictor(Predictor):\nclass LangevinCorrector(Corrector):\nclass AnnealedLangevinDynamics(Corrector):\nclass NoneCorrector(Corrector):"
},
{
"identifier": "utils",
"path": "lib/algorithms/advanced/utils.py",
"snippet": "_MODELS = {}\ndef register_model(cls=None, *, name=None):\n def _register(cls):\ndef get_model(name):\ndef get_sigmas(config):\ndef get_ddpm_params(config):\ndef create_model(config):\ndef get_model_fn(model, train=False):\n def model_fn(x, labels, condition, mask):\ndef get_score_fn(sde, model, train=False, continuous=False):\n def score_fn(x, t, condition, mask):\n def score_fn(x, t, condition, mask):\ndef to_flattened_numpy(x):\ndef from_flattened_numpy(x, shape):"
},
{
"identifier": "ScoreModelFC",
"path": "lib/algorithms/advanced/model.py",
"snippet": "class ScoreModelFC(nn.Module):\n \"\"\"\n Independent condition feature projection layers for each block\n \"\"\"\n\n def __init__(self, config, n_poses=21, pose_dim=6, hidden_dim=64,\n embed_dim=32, n_blocks=2):\n super(ScoreModelFC, self).__init__()\n\n self.config = config\n self.n_poses = n_poses\n self.joint_dim = pose_dim\n self.n_blocks = n_blocks\n\n self.act = get_act(config)\n\n self.pre_dense = nn.Linear(n_poses * pose_dim, hidden_dim)\n self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)\n self.pre_dense_cond = nn.Linear(hidden_dim, hidden_dim)\n self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)\n self.dropout = nn.Dropout(p=config.model.dropout)\n\n # time embedding\n self.time_embedding_type = config.model.embedding_type.lower()\n if self.time_embedding_type == 'fourier':\n self.gauss_proj = GaussianFourierProjection(embed_dim=embed_dim, scale=config.model.fourier_scale)\n elif self.time_embedding_type == 'positional':\n self.posit_proj = functools.partial(get_timestep_embedding, embedding_dim=embed_dim)\n else:\n assert 0\n\n self.shared_time_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n self.act,\n )\n self.register_buffer('sigmas', torch.tensor(get_sigmas(config), dtype=torch.float))\n\n for idx in range(n_blocks):\n setattr(self, f'b{idx + 1}_dense1', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense1_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))\n\n setattr(self, f'b{idx + 1}_dense2', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense2_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))\n\n self.post_dense = nn.Linear(hidden_dim, n_poses * pose_dim)\n\n def forward(self, batch, t, condition=None, mask=None):\n \"\"\"\n batch: [B, j*3] or [B, j*6]\n t: [B]\n Return: [B, j*3] or [B, j*6] same dim as batch\n \"\"\"\n bs = batch.shape[0]\n\n # batch = batch.view(bs, -1) # [B, j*3]\n\n # time embedding\n if self.time_embedding_type == 'fourier':\n # Gaussian Fourier features embeddings.\n used_sigmas = t\n temb = self.gauss_proj(torch.log(used_sigmas))\n elif self.time_embedding_type == 'positional':\n # Sinusoidal positional embeddings.\n timesteps = t\n used_sigmas = self.sigmas[t.long()]\n temb = self.posit_proj(timesteps)\n else:\n raise ValueError(f'time embedding type {self.time_embedding_type} unknown.')\n\n temb = self.shared_time_embed(temb)\n\n h = self.pre_dense(batch)\n h += self.pre_dense_t(temb)\n h = self.pre_gnorm(h)\n h = self.act(h)\n h = self.dropout(h)\n\n for idx in range(self.n_blocks):\n h1 = getattr(self, f'b{idx + 1}_dense1')(h)\n h1 += getattr(self, f'b{idx + 1}_dense1_t')(temb)\n h1 = getattr(self, f'b{idx + 1}_gnorm1')(h1)\n h1 = self.act(h1)\n # dropout, maybe\n h1 = self.dropout(h1)\n\n h2 = getattr(self, f'b{idx + 1}_dense2')(h1)\n h2 += getattr(self, f'b{idx + 1}_dense2_t')(temb)\n h2 = getattr(self, f'b{idx + 1}_gnorm2')(h2)\n h2 = self.act(h2)\n # dropout, maybe\n h2 = self.dropout(h2)\n\n h = h + h2\n\n res = self.post_dense(h) # [B, j*3]\n\n ''' normalize the output '''\n if self.config.model.scale_by_sigma:\n used_sigmas = used_sigmas.reshape((bs, 1))\n res = res / used_sigmas\n\n return res"
},
{
"identifier": "ExponentialMovingAverage",
"path": "lib/algorithms/ema.py",
"snippet": "class ExponentialMovingAverage:\n \"\"\"\n Maintains (exponential) moving average of a set of parameters.\n \"\"\"\n\n def __init__(self, parameters, decay=0.999, use_num_updates=True):\n \"\"\"\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the result of\n `model.parameters()`.\n decay: The exponential decay.\n use_num_updates: Whether to use number of updates when computing\n averages.\n \"\"\"\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n self.decay = decay\n self.num_updates = 0 if use_num_updates else None\n self.shadow_params = [p.clone().detach()\n for p in parameters if p.requires_grad]\n self.collected_params = []\n\n def update(self, parameters):\n \"\"\"\n Update currently maintained parameters.\n\n Call this every time the parameters are updated, such as the result of\n the `optimizer.step()` call.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the same set of\n parameters used to initialize this object.\n \"\"\"\n decay = self.decay\n if self.num_updates is not None:\n self.num_updates += 1\n decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))\n one_minus_decay = 1.0 - decay\n with torch.no_grad():\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n s_param.sub_(one_minus_decay * (s_param - param))\n\n def copy_to(self, parameters):\n \"\"\"\n Copy current parameters into given collection of parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored moving averages.\n \"\"\"\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n if param.requires_grad:\n param.data.copy_(s_param.data)\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n\n def state_dict(self):\n return dict(decay=self.decay, num_updates=self.num_updates,\n shadow_params=self.shadow_params)\n\n def load_state_dict(self, state_dict):\n self.decay = state_dict['decay']\n self.num_updates = state_dict['num_updates']\n self.shadow_params = state_dict['shadow_params']"
},
{
"identifier": "constants",
"path": "lib/body_model/constants.py",
"snippet": "SMPL_MEAN_PATH = join(curr_dir, './smpl_mean_params.npz')\r\nBEND_POSE_PATH = join(curr_dir, '../data/bend_pose.npz')\r\nCROP_IMG_HEIGHT = 256\r\nCROP_IMG_WIDTH = 192\r\nCROP_ASPECT_RATIO = CROP_IMG_HEIGHT / float(CROP_IMG_WIDTH)\r\nIMG_NORM_MEAN = [0.485, 0.456, 0.406]\r\nIMG_NORM_STD = [0.229, 0.224, 0.225]\r\nFOCAL_LENGTH = 5000.\r\nIMG_RES = 224\r\nJOINT_NAMES = [\r\n# 25 OpenPose joints (in the order provided by OpenPose)\r\n'OP Nose',\r\n'OP Neck',\r\n'OP RShoulder',\r\n'OP RElbow',\r\n'OP RWrist',\r\n'OP LShoulder',\r\n'OP LElbow',\r\n'OP LWrist',\r\n'OP MidHip',\r\n'OP RHip',\r\n'OP RKnee',\r\n'OP RAnkle',\r\n'OP LHip',\r\n'OP LKnee',\r\n'OP LAnkle',\r\n'OP REye',\r\n'OP LEye',\r\n'OP REar',\r\n'OP LEar',\r\n'OP LBigToe',\r\n'OP LSmallToe',\r\n'OP LHeel',\r\n'OP RBigToe',\r\n'OP RSmallToe',\r\n'OP RHeel',\r\n# 24 Ground Truth joints (superset of joints from different datasets)\r\n'Right Ankle',\r\n'Right Knee',\r\n'Right Hip',\r\n'Left Hip',\r\n'Left Knee',\r\n'Left Ankle',\r\n'Right Wrist',\r\n'Right Elbow',\r\n'Right Shoulder',\r\n'Left Shoulder',\r\n'Left Elbow',\r\n'Left Wrist',\r\n'Neck (LSP)',\r\n'Top of Head (LSP)',\r\n'Pelvis (MPII)',\r\n'Thorax (MPII)',\r\n'Spine (H36M)',\r\n'Jaw (H36M)',\r\n'Head (H36M)',\r\n'Nose',\r\n'Left Eye',\r\n'Right Eye',\r\n'Left Ear',\r\n'Right Ear'\r\n]\r\nJOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))}\r\nJOINT_MAP = {\r\n'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,\r\n'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,\r\n'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,\r\n'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,\r\n'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,\r\n'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,\r\n'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,\r\n'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,\r\n'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,\r\n'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,\r\n'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,\r\n'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,\r\n'Neck (LSP)': 47, 'Top of Head (LSP)': 48,\r\n'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,\r\n'Spine (H36M)': 51, 'Jaw (H36M)': 52,\r\n'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,\r\n'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27\r\n}\r\nH36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\r\nH36M_TO_J14 = H36M_TO_J17[:14]\r\nJ24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]\r\nJ24_TO_J14 = J24_TO_J17[:14]\r\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\r\nSMPL_POSE_FLIP_PERM = []\r\nJ24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\r\nJ49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\r\n + [25+i for i in J24_FLIP_PERM]\r"
},
{
"identifier": "camera_fitting_loss",
"path": "lib/body_model/fitting_losses.py",
"snippet": "def camera_fitting_loss(model_joints, camera_t, camera_t_est, camera_center, joints_2d, joints_conf,\r\n focal_length=5000, depth_loss_weight=100):\r\n \"\"\"\r\n Loss function for camera optimization.\r\n \"\"\"\r\n\r\n # Project model joints\r\n batch_size = model_joints.shape[0]\r\n rotation = torch.eye(3, device=model_joints.device).unsqueeze(0).expand(batch_size, -1, -1)\r\n projected_joints = perspective_projection(model_joints, rotation, camera_t,\r\n focal_length, camera_center)\r\n\r\n op_joints = ['OP RHip', 'OP LHip', 'OP RShoulder', 'OP LShoulder']\r\n op_joints_ind = [constants.JOINT_IDS[joint] for joint in op_joints]\r\n gt_joints = ['Right Hip', 'Left Hip', 'Right Shoulder', 'Left Shoulder']\r\n gt_joints_ind = [constants.JOINT_IDS[joint] for joint in gt_joints]\r\n reprojection_error_op = (joints_2d[:, op_joints_ind] -\r\n projected_joints[:, op_joints_ind]) ** 2\r\n reprojection_error_gt = (joints_2d[:, gt_joints_ind] -\r\n projected_joints[:, gt_joints_ind]) ** 2\r\n\r\n # Check if for each example in the batch all 4 OpenPose detections are valid, otherwise use the GT detections\r\n # OpenPose joints are more reliable for this task, so we prefer to use them if possible\r\n is_valid = (joints_conf[:, op_joints_ind].min(dim=-1)[0][:, None, None] > 0).float()\r\n reprojection_loss = (is_valid * reprojection_error_op + (1 - is_valid) * reprojection_error_gt).sum(dim=(1, 2))\r\n\r\n # Loss that penalizes deviation from depth estimate\r\n depth_loss = (depth_loss_weight ** 2) * (camera_t[:, 2] - camera_t_est[:, 2]) ** 2\r\n\r\n total_loss = reprojection_loss + depth_loss\r\n return total_loss.sum()\r"
},
{
"identifier": "body_fitting_loss",
"path": "lib/body_model/fitting_losses.py",
"snippet": "def body_fitting_loss(body_pose, betas, model_joints, camera_t, camera_center,\r\n joints_2d, joints_conf, pose_prior, quan_t,\r\n focal_length=5000, sigma=100, pose_prior_weight=4.78,\r\n shape_prior_weight=5, angle_prior_weight=15.2,\r\n output='mean', verbose=True):\r\n \"\"\"\r\n Loss function for body fitting\r\n \"\"\"\r\n\r\n batch_size = body_pose.shape[0]\r\n rotation = torch.eye(3, device=body_pose.device).unsqueeze(0).expand(batch_size, -1, -1)\r\n projected_joints = perspective_projection(model_joints, rotation, camera_t,\r\n focal_length, camera_center)\r\n\r\n # Weighted robust reprojection error\r\n reprojection_error = gmof(projected_joints - joints_2d, sigma)\r\n reprojection_loss = (joints_conf ** 2) * reprojection_error.sum(dim=-1) # sum along x-y\r\n\r\n # Pose prior loss\r\n if pose_prior is not None:\r\n pose_prior_loss = (pose_prior_weight ** 2) * pose_prior(body_pose, betas, quan_t)\r\n else:\r\n pose_prior_loss = 0.0\r\n\r\n # Angle prior for knees and elbows\r\n angle_prior_loss = (angle_prior_weight ** 2) * angle_prior(body_pose).sum(dim=-1)\r\n\r\n # Regularizer to prevent betas from taking large values\r\n shape_prior_loss = (shape_prior_weight ** 2) * (betas ** 2).sum(dim=-1)\r\n\r\n # sum along different joints\r\n total_loss = reprojection_loss.sum(dim=-1) + pose_prior_loss + angle_prior_loss + shape_prior_loss\r\n if verbose:\r\n print(f\"Reprojection Loss: {reprojection_loss.sum(dim=-1).mean().item():.2f}\")\r\n print(f\"Angle Prior Loss: {angle_prior_loss.mean().item():.2f}\")\r\n print(f\"Shape Prior Loss: {shape_prior_loss.mean().item():.2f}\")\r\n if pose_prior is not None:\r\n print(f\"Pose Prior Loss: {pose_prior_loss.mean().item():.2f}\")\r\n\r\n if output == 'sum':\r\n return total_loss.sum()\r\n elif output == 'reprojection':\r\n return reprojection_loss\r\n else:\r\n return total_loss.mean() # mean along batch\r"
},
{
"identifier": "N_POSES",
"path": "lib/dataset/AMASS.py",
"snippet": "N_POSES = 21\r"
},
{
"identifier": "Posenormalizer",
"path": "lib/dataset/AMASS.py",
"snippet": "class Posenormalizer:\r\n def __init__(self, data_path, device='cuda:0', normalize=True, min_max=True, rot_rep=None):\r\n assert rot_rep in ['rot6d', 'axis']\r\n self.normalize = normalize\r\n self.min_max = min_max\r\n self.rot_rep = rot_rep\r\n normalize_params = torch.load(os.path.join(data_path, '{}_normalize1.pt'.format(rot_rep)))\r\n self.min_poses, self.max_poses = normalize_params['min_poses'].to(device), normalize_params['max_poses'].to(device)\r\n normalize_params = torch.load(os.path.join(data_path, '{}_normalize2.pt'.format(rot_rep)))\r\n self.mean_poses, self.std_poses = normalize_params['mean_poses'].to(device), normalize_params['std_poses'].to(device)\r\n\r\n def offline_normalize(self, poses, from_axis=False):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n pose_shape = poses.shape\r\n if from_axis and self.rot_rep == 'rot6d':\r\n poses = axis_angle_to_rot6d(poses.reshape(-1, 3)).reshape(*pose_shape[:-1], -1)\r\n\r\n if not self.normalize:\r\n return poses\r\n\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1)\r\n max_poses = self.max_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n normalized_poses = 2 * (poses - min_poses) / (max_poses - min_poses) - 1\r\n\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1)\r\n std_poses = self.std_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n normalized_poses = (poses - mean_poses) / std_poses\r\n\r\n return normalized_poses\r\n\r\n def offline_denormalize(self, poses, to_axis=False):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n\r\n if not self.normalize:\r\n denormalized_poses = poses\r\n else:\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1)\r\n max_poses = self.max_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n denormalized_poses = 0.5 * ((poses + 1) * (max_poses - min_poses) + 2 * min_poses)\r\n\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1)\r\n std_poses = self.std_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n denormalized_poses = poses * std_poses + mean_poses\r\n\r\n if to_axis and self.rot_rep == 'rot6d':\r\n pose_shape = denormalized_poses.shape\r\n denormalized_poses = rot6d_to_axis_angle(denormalized_poses.reshape(-1, 6)).reshape(*pose_shape[:-1], -1)\r\n\r\n return denormalized_poses\r"
},
{
"identifier": "import_configs",
"path": "lib/utils/generic.py",
"snippet": "def import_configs(config_path):\n module_name, function_name = config_path.rsplit('.', 1)\n config_module = importlib.import_module(module_name)\n get_config = getattr(config_module, function_name)\n config = get_config()\n return config"
},
{
"identifier": "linear_interpolation",
"path": "lib/utils/misc.py",
"snippet": "def linear_interpolation(A, B, frames):\r\n alpha = torch.linspace(0, 1, frames, device=A.device)[:, None]\r\n interpolated = (1 - alpha) * A + alpha * B\r\n return interpolated\r"
}
] | import math
import torch
from torch import nn
from tqdm import tqdm
from lib.algorithms.advanced import sde_lib, sampling
from lib.algorithms.advanced import utils as mutils
from lib.algorithms.advanced.model import ScoreModelFC
from lib.algorithms.ema import ExponentialMovingAverage
from lib.body_model import constants
from lib.body_model.fitting_losses import camera_fitting_loss, body_fitting_loss
from lib.dataset.AMASS import N_POSES, Posenormalizer
from lib.utils.generic import import_configs
from lib.utils.misc import linear_interpolation | 7,548 |
class DPoser(nn.Module):
def __init__(self, batch_size=32, config_path='', args=None):
super().__init__()
self.device = args.device
self.batch_size = batch_size
config = import_configs(config_path)
self.Normalizer = Posenormalizer(
data_path=f'{args.dataset_folder}/{args.version}/train',
min_max=config.data.min_max, rot_rep=config.data.rot_rep, device=args.device)
diffusion_model = self.load_model(config, args)
if config.training.sde.lower() == 'vpsde':
sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max,
N=config.model.num_scales)
elif config.training.sde.lower() == 'subvpsde':
sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max,
N=config.model.num_scales)
elif config.training.sde.lower() == 'vesde':
sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max,
N=config.model.num_scales)
else:
raise NotImplementedError(f"SDE {config.training.sde} unknown.")
sde.N = args.sde_N # fewer sampling steps
self.sde = sde
self.score_fn = mutils.get_score_fn(sde, diffusion_model, train=False, continuous=config.training.continuous)
self.rsde = sde.reverse(self.score_fn, False)
# L2 loss
self.loss_fn = nn.MSELoss(reduction='none')
self.timesteps = torch.linspace(self.sde.T, 1e-3, self.sde.N, device=self.device)
def load_model(self, config, args):
POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6
model = ScoreModelFC(
config,
|
class DPoser(nn.Module):
def __init__(self, batch_size=32, config_path='', args=None):
super().__init__()
self.device = args.device
self.batch_size = batch_size
config = import_configs(config_path)
self.Normalizer = Posenormalizer(
data_path=f'{args.dataset_folder}/{args.version}/train',
min_max=config.data.min_max, rot_rep=config.data.rot_rep, device=args.device)
diffusion_model = self.load_model(config, args)
if config.training.sde.lower() == 'vpsde':
sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max,
N=config.model.num_scales)
elif config.training.sde.lower() == 'subvpsde':
sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max,
N=config.model.num_scales)
elif config.training.sde.lower() == 'vesde':
sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max,
N=config.model.num_scales)
else:
raise NotImplementedError(f"SDE {config.training.sde} unknown.")
sde.N = args.sde_N # fewer sampling steps
self.sde = sde
self.score_fn = mutils.get_score_fn(sde, diffusion_model, train=False, continuous=config.training.continuous)
self.rsde = sde.reverse(self.score_fn, False)
# L2 loss
self.loss_fn = nn.MSELoss(reduction='none')
self.timesteps = torch.linspace(self.sde.T, 1e-3, self.sde.N, device=self.device)
def load_model(self, config, args):
POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6
model = ScoreModelFC(
config, | n_poses=N_POSES, | 8 | 2023-11-29 15:55:50+00:00 | 12k |
raven38/EfficientDynamic3DGaussian | scene/dataset_readers.py | [
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras"
},
{
"identifier": "qvec2rotmat",
"path": "scene/colmap_loader.py",
"snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])"
},
{
"identifier": "read_extrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras"
},
{
"identifier": "read_points3D_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors"
},
{
"identifier": "read_points3D_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors"
},
{
"identifier": "getWorld2View2",
"path": "utils/graphics_utils.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "focal2fov",
"path": "utils/graphics_utils.py",
"snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))"
},
{
"identifier": "fov2focal",
"path": "utils/graphics_utils.py",
"snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))"
},
{
"identifier": "SH2RGB",
"path": "utils/sh_utils.py",
"snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5"
},
{
"identifier": "BasicPointCloud",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int, L: int):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)"
},
{
"identifier": "Camera",
"path": "scene/hyper_camera.py",
"snippet": "class Camera:\n \"\"\"Class to handle camera geometry.\"\"\"\n\n def __init__(self,\n orientation: np.ndarray,\n position: np.ndarray,\n focal_length: Union[np.ndarray, float],\n principal_point: np.ndarray,\n image_size: np.ndarray,\n skew: Union[np.ndarray, float] = 0.0,\n pixel_aspect_ratio: Union[np.ndarray, float] = 1.0,\n radial_distortion: Optional[np.ndarray] = None,\n tangential_distortion: Optional[np.ndarray] = None,\n dtype=np.float32):\n \"\"\"Constructor for camera class.\"\"\"\n if radial_distortion is None:\n radial_distortion = np.array([0.0, 0.0, 0.0], dtype)\n if tangential_distortion is None:\n tangential_distortion = np.array([0.0, 0.0], dtype)\n\n self.orientation = np.array(orientation, dtype)\n self.position = np.array(position, dtype)\n self.focal_length = np.array(focal_length, dtype)\n self.principal_point = np.array(principal_point, dtype)\n self.skew = np.array(skew, dtype)\n self.pixel_aspect_ratio = np.array(pixel_aspect_ratio, dtype)\n self.radial_distortion = np.array(radial_distortion, dtype)\n self.tangential_distortion = np.array(tangential_distortion, dtype)\n self.image_size = np.array(image_size, np.uint32)\n self.dtype = dtype\n\n @classmethod\n def from_json(cls, path: PathType):\n \"\"\"Loads a JSON camera into memory.\"\"\"\n with open(path, 'r') as fp:\n camera_json = json.load(fp)\n\n # Fix old camera JSON.\n if 'tangential' in camera_json:\n camera_json['tangential_distortion'] = camera_json['tangential']\n\n return cls(\n orientation=np.asarray(camera_json['orientation']),\n position=np.asarray(camera_json['position']),\n focal_length=camera_json['focal_length'],\n principal_point=np.asarray(camera_json['principal_point']),\n skew=camera_json['skew'],\n pixel_aspect_ratio=camera_json['pixel_aspect_ratio'],\n radial_distortion=np.asarray(camera_json['radial_distortion']),\n tangential_distortion=np.asarray(camera_json['tangential_distortion']),\n image_size=np.asarray(camera_json['image_size']),\n )\n\n def to_json(self):\n return {\n k: (v.tolist() if hasattr(v, 'tolist') else v)\n for k, v in self.get_parameters().items()\n }\n\n def get_parameters(self):\n return {\n 'orientation': self.orientation,\n 'position': self.position,\n 'focal_length': self.focal_length,\n 'principal_point': self.principal_point,\n 'skew': self.skew,\n 'pixel_aspect_ratio': self.pixel_aspect_ratio,\n 'radial_distortion': self.radial_distortion,\n 'tangential_distortion': self.tangential_distortion,\n 'image_size': self.image_size,\n }\n\n @property\n def scale_factor_x(self):\n return self.focal_length\n\n @property\n def scale_factor_y(self):\n return self.focal_length * self.pixel_aspect_ratio\n\n @property\n def principal_point_x(self):\n return self.principal_point[0]\n\n @property\n def principal_point_y(self):\n return self.principal_point[1]\n\n @property\n def has_tangential_distortion(self):\n return any(self.tangential_distortion != 0.0)\n\n @property\n def has_radial_distortion(self):\n return any(self.radial_distortion != 0.0)\n\n @property\n def image_size_y(self):\n return self.image_size[1]\n\n @property\n def image_size_x(self):\n return self.image_size[0]\n\n @property\n def image_shape(self):\n return self.image_size_y, self.image_size_x\n\n @property\n def optical_axis(self):\n return self.orientation[2, :]\n\n @property\n def translation(self):\n return -np.matmul(self.orientation, self.position)\n\n def pixel_to_local_rays(self, pixels: np.ndarray):\n \"\"\"Returns the local ray directions for the provided pixels.\"\"\"\n y = ((pixels[..., 1] - self.principal_point_y) / self.scale_factor_y)\n x = ((pixels[..., 0] - self.principal_point_x - y * self.skew) /\n self.scale_factor_x)\n\n if self.has_radial_distortion or self.has_tangential_distortion:\n x, y = _radial_and_tangential_undistort(\n x,\n y,\n k1=self.radial_distortion[0],\n k2=self.radial_distortion[1],\n k3=self.radial_distortion[2],\n p1=self.tangential_distortion[0],\n p2=self.tangential_distortion[1])\n\n dirs = np.stack([x, y, np.ones_like(x)], axis=-1)\n return dirs / np.linalg.norm(dirs, axis=-1, keepdims=True)\n\n def pixels_to_rays(self, pixels: np.ndarray) -> np.ndarray:\n \"\"\"Returns the rays for the provided pixels.\n\n Args:\n pixels: [A1, ..., An, 2] tensor or np.array containing 2d pixel positions.\n\n Returns:\n An array containing the normalized ray directions in world coordinates.\n \"\"\"\n if pixels.shape[-1] != 2:\n raise ValueError('The last dimension of pixels must be 2.')\n if pixels.dtype != self.dtype:\n raise ValueError(f'pixels dtype ({pixels.dtype!r}) must match camera '\n f'dtype ({self.dtype!r})')\n\n batch_shape = pixels.shape[:-1]\n pixels = np.reshape(pixels, (-1, 2))\n\n local_rays_dir = self.pixel_to_local_rays(pixels)\n rays_dir = np.matmul(self.orientation.T, local_rays_dir[..., np.newaxis])\n rays_dir = np.squeeze(rays_dir, axis=-1)\n\n # Normalize rays.\n rays_dir /= np.linalg.norm(rays_dir, axis=-1, keepdims=True)\n rays_dir = rays_dir.reshape((*batch_shape, 3))\n return rays_dir\n\n def pixels_to_points(self, pixels: np.ndarray, depth: np.ndarray):\n rays_through_pixels = self.pixels_to_rays(pixels)\n cosa = np.matmul(rays_through_pixels, self.optical_axis)\n points = (\n rays_through_pixels * depth[..., np.newaxis] / cosa[..., np.newaxis] +\n self.position)\n return points\n\n def points_to_local_points(self, points: np.ndarray):\n translated_points = points - self.position\n local_points = (np.matmul(self.orientation, translated_points.T)).T\n return local_points\n\n def project(self, points: np.ndarray):\n \"\"\"Projects a 3D point (x,y,z) to a pixel position (x,y).\"\"\"\n batch_shape = points.shape[:-1]\n points = points.reshape((-1, 3))\n local_points = self.points_to_local_points(points)\n\n # Get normalized local pixel positions.\n x = local_points[..., 0] / local_points[..., 2]\n y = local_points[..., 1] / local_points[..., 2]\n r2 = x**2 + y**2\n\n # Apply radial distortion.\n distortion = 1.0 + r2 * (\n self.radial_distortion[0] + r2 *\n (self.radial_distortion[1] + self.radial_distortion[2] * r2))\n\n # Apply tangential distortion.\n x_times_y = x * y\n x = (\n x * distortion + 2.0 * self.tangential_distortion[0] * x_times_y +\n self.tangential_distortion[1] * (r2 + 2.0 * x**2))\n y = (\n y * distortion + 2.0 * self.tangential_distortion[1] * x_times_y +\n self.tangential_distortion[0] * (r2 + 2.0 * y**2))\n\n # Map the distorted ray to the image plane and return the depth.\n pixel_x = self.focal_length * x + self.skew * y + self.principal_point_x\n pixel_y = (self.focal_length * self.pixel_aspect_ratio * y\n + self.principal_point_y)\n\n pixels = np.stack([pixel_x, pixel_y], axis=-1)\n return pixels.reshape((*batch_shape, 2))\n\n def get_pixel_centers(self):\n \"\"\"Returns the pixel centers.\"\"\"\n xx, yy = np.meshgrid(np.arange(self.image_size_x, dtype=self.dtype),\n np.arange(self.image_size_y, dtype=self.dtype))\n return np.stack([xx, yy], axis=-1) + 0.5\n\n def scale(self, scale: float):\n \"\"\"Scales the camera.\"\"\"\n if scale <= 0:\n raise ValueError('scale needs to be positive.')\n\n new_camera = Camera(\n orientation=self.orientation.copy(),\n position=self.position.copy(),\n focal_length=self.focal_length * scale,\n principal_point=self.principal_point.copy() * scale,\n skew=self.skew,\n pixel_aspect_ratio=self.pixel_aspect_ratio,\n radial_distortion=self.radial_distortion.copy(),\n tangential_distortion=self.tangential_distortion.copy(),\n image_size=np.array((int(round(self.image_size[0] * scale)),\n int(round(self.image_size[1] * scale)))),\n )\n return new_camera\n\n def look_at(self, position, look_at, up, eps=1e-6):\n \"\"\"Creates a copy of the camera which looks at a given point.\n\n Copies the provided vision_sfm camera and returns a new camera that is\n positioned at `camera_position` while looking at `look_at_position`.\n Camera intrinsics are copied by this method. A common value for the\n up_vector is (0, 1, 0).\n\n Args:\n position: A (3,) numpy array representing the position of the camera.\n look_at: A (3,) numpy array representing the location the camera\n looks at.\n up: A (3,) numpy array representing the up direction, whose\n projection is parallel to the y-axis of the image plane.\n eps: a small number to prevent divides by zero.\n\n Returns:\n A new camera that is copied from the original but is positioned and\n looks at the provided coordinates.\n\n Raises:\n ValueError: If the camera position and look at position are very close\n to each other or if the up-vector is parallel to the requested optical\n axis.\n \"\"\"\n\n look_at_camera = self.copy()\n optical_axis = look_at - position\n norm = np.linalg.norm(optical_axis)\n if norm < eps:\n raise ValueError('The camera center and look at position are too close.')\n optical_axis /= norm\n\n right_vector = np.cross(optical_axis, up)\n norm = np.linalg.norm(right_vector)\n if norm < eps:\n raise ValueError('The up-vector is parallel to the optical axis.')\n right_vector /= norm\n\n # The three directions here are orthogonal to each other and form a right\n # handed coordinate system.\n camera_rotation = np.identity(3)\n camera_rotation[0, :] = right_vector\n camera_rotation[1, :] = np.cross(optical_axis, right_vector)\n camera_rotation[2, :] = optical_axis\n\n look_at_camera.position = position\n look_at_camera.orientation = camera_rotation\n return look_at_camera\n\n def crop_image_domain(\n self, left: int = 0, right: int = 0, top: int = 0, bottom: int = 0):\n \"\"\"Returns a copy of the camera with adjusted image bounds.\n\n Args:\n left: number of pixels by which to reduce (or augment, if negative) the\n image domain at the associated boundary.\n right: likewise.\n top: likewise.\n bottom: likewise.\n\n The crop parameters may not cause the camera image domain dimensions to\n become non-positive.\n\n Returns:\n A camera with adjusted image dimensions. The focal length is unchanged,\n and the principal point is updated to preserve the original principal\n axis.\n \"\"\"\n\n crop_left_top = np.array([left, top])\n crop_right_bottom = np.array([right, bottom])\n new_resolution = self.image_size - crop_left_top - crop_right_bottom\n new_principal_point = self.principal_point - crop_left_top\n if np.any(new_resolution <= 0):\n raise ValueError('Crop would result in non-positive image dimensions.')\n\n new_camera = self.copy()\n new_camera.image_size = np.array([int(new_resolution[0]),\n int(new_resolution[1])])\n new_camera.principal_point = np.array([new_principal_point[0],\n new_principal_point[1]])\n return new_camera\n\n def copy(self):\n return copy.deepcopy(self)"
}
] | import torch
import os
import sys
import glob
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
from scene.hyper_camera import Camera as HyperNeRFCamera | 9,697 | cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height, time=time)
test_cam_infos.append(cam_info)
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
widht, height = train_cam_infos[0].width, train_cam_infos[0].height
# Sample N_views poses for validation - NeRF-like camera trajectory.
N_views = 120
val_poses = get_spiral(poses, near_fars, N_views=N_views)
val_times = torch.linspace(0.0, 1.0, val_poses.shape[0])
vis_cam_infos = []
for idx, (pose, time) in enumerate(zip(val_poses, val_times)):
p = pose
uid = idx
pose = np.eye(4)
pose[:3, :] = p[:3, :]
R = -pose[:3, :3]
R[:, 0] = -R[:, 0]
T = -pose[:3, 3].dot(R)
# R = pose[:3, :3]
# T = pose[:3, 3]
FovY = focal2fov(focal, height)
FovX = focal2fov(focal, width)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=None, image_name=None, width=width, height=height, time=time)
vis_cam_infos.append(cam_info)
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d_ours.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 2_000 # 100_000
print(f"Generating random point cloud ({num_pts})...")
threshold = 3
xyz_max = np.array([1.5*threshold, 1.5*threshold, -0*threshold])
xyz_min = np.array([-1.5*threshold, -1.5*threshold, -1.5*threshold])
xyz = np.concatenate([(np.random.random((num_pts, 1, 3)))* (xyz_max-xyz_min) + xyz_min, np.zeros((num_pts, 16, 3))], axis=1)
# xyz = np.concatenate([np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 2, 3))], axis=1)
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
pcd = fetchPly(ply_path)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
vis_cameras =vis_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path, time_delta=1/300)
return scene_info
def readHypernerfCamera(uid, camera, image_path, time):
height, width = int(camera.image_shape[0]), int(camera.image_shape[1])
image_name = os.path.basename(image_path).split(".")[0]
R = camera.orientation.T
# T = camera.translation.T
T = - camera.position @ R
image = Image.open(image_path)
FovY = focal2fov(camera.focal_length, height)
FovX = focal2fov(camera.focal_length, width)
return CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height, time=time)
def readHypernerfSceneInfo(path, eval):
# borrow code from https://github.com/hustvl/TiNeuVox/blob/main/lib/load_hyper.py
use_bg_points = False
with open(f'{path}/scene.json', 'r') as f:
scene_json = json.load(f)
with open(f'{path}/metadata.json', 'r') as f:
meta_json = json.load(f)
with open(f'{path}/dataset.json', 'r') as f:
dataset_json = json.load(f)
near = scene_json['near']
far = scene_json['far']
coord_scale = scene_json['scale']
scene_center = scene_json['center']
all_imgs = dataset_json['ids']
val_ids = dataset_json['val_ids']
add_cam = False
if len(val_ids) == 0:
i_train = np.array([i for i in np.arange(len(all_imgs)) if (i%4 == 0)])
i_test = i_train+2
i_test = i_test[:-1,]
else:
add_cam = True
train_ids = dataset_json['train_ids']
i_test = []
i_train = []
for i in range(len(all_imgs)):
id = all_imgs[i]
if id in val_ids:
i_test.append(i)
if id in train_ids:
i_train.append(i)
print('i_train',i_train)
print('i_test',i_test)
all_cams = [meta_json[i]['camera_id'] for i in all_imgs]
all_times = [meta_json[i]['time_id'] for i in all_imgs]
max_time = max(all_times)
all_times = [meta_json[i]['time_id']/max_time for i in all_imgs]
selected_time = set(all_times)
ratio = 0.5
all_cam_params = []
for im in all_imgs:
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
time: float
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
vis_cameras: list
nerf_normalization: dict
ply_path: str
time_delta: float
def normalize(v):
"""Normalize a vector."""
return v / np.linalg.norm(v)
def average_poses(poses):
"""
Calculate the average pose, which is then used to center all poses
using @center_poses. Its computation is as follows:
1. Compute the center: the average of pose centers.
2. Compute the z axis: the normalized average z axis.
3. Compute axis y': the average y axis.
4. Compute x' = y' cross product z, then normalize it as the x axis.
5. Compute the y axis: z cross product x.
Note that at step 3, we cannot directly use y' as y axis since it's
not necessarily orthogonal to z axis. We need to pass from x to y.
Inputs:
poses: (N_images, 3, 4)
Outputs:
pose_avg: (3, 4) the average pose
"""
# 1. Compute the center
center = poses[..., 3].mean(0) # (3)
# 2. Compute the z axis
z = normalize(poses[..., 2].mean(0)) # (3)
# 3. Compute axis y' (no need to normalize as it's not the final output)
y_ = poses[..., 1].mean(0) # (3)
# 4. Compute the x axis
x = normalize(np.cross(z, y_)) # (3)
# 5. Compute the y axis (as z and x are normalized, y is already of norm 1)
y = np.cross(x, z) # (3)
pose_avg = np.stack([x, y, z, center], 1) # (3, 4)
return pose_avg
def center_poses(poses, blender2opencv):
"""
Center the poses so that we can use NDC.
See https://github.com/bmild/nerf/issues/34
Inputs:
poses: (N_images, 3, 4)
Outputs:
poses_centered: (N_images, 3, 4) the centered poses
pose_avg: (3, 4) the average pose
"""
poses = poses @ blender2opencv
pose_avg = average_poses(poses) # (3, 4)
pose_avg_homo = np.eye(4)
pose_avg_homo[
:3
] = pose_avg # convert to homogeneous coordinate for faster computation
pose_avg_homo = pose_avg_homo
# by simply adding 0, 0, 0, 1 as the last row
last_row = np.tile(np.array([0, 0, 0, 1]), (len(poses), 1, 1)) # (N_images, 1, 4)
poses_homo = np.concatenate(
[poses, last_row], 1
) # (N_images, 4, 4) homogeneous coordinate
poses_centered = np.linalg.inv(pose_avg_homo) @ poses_homo # (N_images, 4, 4)
# poses_centered = poses_centered @ blender2opencv
poses_centered = poses_centered[:, :3] # (N_images, 3, 4)
return poses_centered, pose_avg_homo
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height, time=0)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
x_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("x")]
y_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("y")]
z_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("z")]
x_names = sorted(x_names, key = lambda x: int(x.replace('x', '')))
y_names = sorted(y_names, key = lambda y: int(y.replace('y', '')))
z_names = sorted(z_names, key = lambda z: int(z.replace('z', '')))
assert len(x_names) == len(y_names) == len(z_names)
x = np.zeros((colors.shape[0], len(x_names)))
y = np.zeros((colors.shape[0], len(y_names)))
z = np.zeros((colors.shape[0], len(z_names)))
for idx, attr_name in enumerate(x_names):
x[:, idx] = np.asarray(plydata.elements[0][attr_name])
for idx, attr_name in enumerate(y_names):
y[:, idx] = np.asarray(plydata.elements[0][attr_name])
for idx, attr_name in enumerate(z_names):
z[:, idx] = np.asarray(plydata.elements[0][attr_name])
# positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
positions = np.stack([x, y, z], axis=-1)
assert len(positions.shape) == 3
assert positions.shape[-1] == 3
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = []
for t in range(xyz.shape[1]):
dtype.extend([(f'x{t}', 'f4'), (f'y{t}', 'f4'), (f'z{t}', 'f4')])
dtype = dtype + [('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz[:, 0, :])
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz.reshape(xyz.shape[0], -1), normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D_ours.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
xyz = xyz[:, None, :]
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
# if 'time' in frames[0]:
# times = np.array([frame['time'] for idx, frame in enumerate(frames)])
# time_idx = times.argsort()
# else:
# time_idx = [0 for f in frames]
# print(times)
# print(time_idx)
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
FovY = fovy
FovX = fovx
time = frame['time'] if 'time' in frame else 0
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name,
width=image.size[0], height=image.size[1], time=time))
return cam_infos
# https://github.com/albertpumarola/D-NeRF/blob/main/load_blender.py
trans_t = lambda t : torch.Tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1]]).float()
rot_phi = lambda phi : torch.Tensor([
[1,0,0,0],
[0,np.cos(phi),-np.sin(phi),0],
[0,np.sin(phi), np.cos(phi),0],
[0,0,0,1]]).float()
rot_theta = lambda th : torch.Tensor([
[np.cos(th),0,-np.sin(th),0],
[0,1,0,0],
[np.sin(th),0, np.cos(th),0],
[0,0,0,1]]).float()
def pose_spherical(theta, phi, radius):
# https://github.com/albertpumarola/D-NeRF/blob/main/load_blender.py
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = torch.Tensor(np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]])) @ c2w
return c2w
def generateCamerasFromTransforms(path, transformsfile, extension=".png"):
cam_infos = []
# https://github.com/albertpumarola/D-NeRF/blob/main/load_blender.py
render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)
render_times = torch.linspace(0., 1., render_poses.shape[0])
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
cam_name = os.path.join(path, frames[0]["file_path"] + extension)
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
width = image.size[0]
height = image.size[1]
for idx, (c2w, time) in enumerate(zip(render_poses, render_times)):
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
fovy = focal2fov(fov2focal(fovx, width), height)
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX,
image=None, image_path=None, image_name=None,
width=width, height=height, time=time))
return cam_infos
def init_random_points(ply_path):
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
# time_length = max([c.time for c in train_cam_infos]) + 1
# time_length = 2
# xyz = np.random.random((num_pts, 1, 3)) * 2.6 - 1.3
# xyz = np.tile(xyz, (1, time_length, 1))
# xyz = np.concatenate([np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 2, 3)), np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 2, 3))], axis=1)
xyz = np.concatenate([np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 16, 3))], axis=1)
# xyz = np.concatenate([np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 3, 3)), np.ones((num_pts, 1, 3))], axis=1)
# xyz = np.concatenate([np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 2, 3)), np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 2, 3))], axis=1)
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
# try:
pcd = fetchPly(ply_path)
# except:
# pcd = None
return pcd
def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
vis_cam_infos = generateCamerasFromTransforms(path, "transforms_train.json")
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d_ours.ply")
pcd = init_random_points(ply_path)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
vis_cameras=vis_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path, time_delta=1/len(train_cam_infos))
return scene_info
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.eye(4)
m[:3] = np.stack([-vec0, vec1, vec2, pos], 1)
return m
def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, N_rots=2, N=120):
render_poses = []
rads = np.array(list(rads) + [1.0])
for theta in np.linspace(0.0, 2.0 * np.pi * N_rots, N + 1)[:-1]:
c = np.dot(
c2w[:3, :4],
np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0])
* rads,
)
z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.0])))
render_poses.append(viewmatrix(z, up, c))
return render_poses
def get_spiral(c2ws_all, near_fars, rads_scale=1.0, N_views=120):
"""
Generate a set of poses using NeRF's spiral camera trajectory as validation poses.
"""
# center pose
c2w = average_poses(c2ws_all)
# Get average pose
up = normalize(c2ws_all[:, :3, 1].sum(0))
# Find a reasonable "focus depth" for this dataset
dt = 0.75
close_depth, inf_depth = near_fars.min() * 0.9, near_fars.max() * 5.0
focal = 1.0 / ((1.0 - dt) / close_depth + dt / inf_depth)
# Get radii for spiral path
zdelta = near_fars.min() * 0.2
tt = c2ws_all[:, :3, 3]
rads = np.percentile(np.abs(tt), 90, 0) * rads_scale
render_poses = render_path_spiral(
c2w, up, rads, focal, zdelta, zrate=0.5, N=N_views
)
return np.stack(render_poses)
def readDynerfSceneInfo(path, eval):
blender2opencv = np.eye(4)
downsample = 2
poses_arr = np.load(os.path.join(path, "poses_bounds.npy"))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]) # (N_cams, 3, 5)
near_fars = poses_arr[:, -2:]
videos = glob.glob(os.path.join(path, "cam??"))
videos = sorted(videos)
assert len(videos) == poses_arr.shape[0]
H, W, focal = poses[0, :, -1]
focal = focal / downsample
poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1)
poses, pose_avg = center_poses(
poses, blender2opencv
) # Re-center poses so that the average is near the center.
near_original = near_fars.min()
scale_factor = near_original * 0.75
near_fars /= (
scale_factor # rescale nearest plane so that it is at z = 4/3.
)
# print(scale_factor)
poses[..., 3] /= scale_factor
image_dirs = [video.replace('.mp4', '') for video in videos]
val_index = [0]
images = [sorted(glob.glob(os.path.join(d, "*.png")), key=lambda x:int(os.path.splitext(os.path.basename(x))[0]))[:300] for d in image_dirs]
train_cam_infos = []
for idx, image_paths in enumerate(images):
if idx in val_index:
continue
p = poses[idx]
for image_path in image_paths:
image_name = os.path.basename(image_path).split(".")[0]
time = float(image_name) / 300
image = Image.open(image_path)
uid = idx * 1000 + int(image_name)
pose = np.eye(4)
pose[:3, :] = p[:3, :]
R = -pose[:3, :3]
R[:, 0] = -R[:, 0]
T = -pose[:3, 3].dot(R)
height = image.height
width = image.width
FovY = focal2fov(focal, height)
FovX = focal2fov(focal, width)
# R = pose[:3, :3]
# T = pose[:3, 3]
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height, time=time)
train_cam_infos.append(cam_info)
test_cam_infos = []
for idx, image_paths in enumerate(images):
if idx not in val_index:
continue
p = poses[idx]
for image_path in image_paths:
image_name = os.path.basename(image_path).split(".")[0]
time = float(image_name) / 300
image = Image.open(image_path)
uid = idx * 1000 + int(image_name)
pose = np.eye(4)
pose[:3, :] = p[:3, :]
R = -pose[:3, :3]
R[:, 0] = -R[:, 0]
T = -pose[:3, 3].dot(R)
# R = pose[:3, :3]
# T = pose[:3, 3]
height = image.height
width = image.width
FovY = focal2fov(focal, height)
FovX = focal2fov(focal, width)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height, time=time)
test_cam_infos.append(cam_info)
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
widht, height = train_cam_infos[0].width, train_cam_infos[0].height
# Sample N_views poses for validation - NeRF-like camera trajectory.
N_views = 120
val_poses = get_spiral(poses, near_fars, N_views=N_views)
val_times = torch.linspace(0.0, 1.0, val_poses.shape[0])
vis_cam_infos = []
for idx, (pose, time) in enumerate(zip(val_poses, val_times)):
p = pose
uid = idx
pose = np.eye(4)
pose[:3, :] = p[:3, :]
R = -pose[:3, :3]
R[:, 0] = -R[:, 0]
T = -pose[:3, 3].dot(R)
# R = pose[:3, :3]
# T = pose[:3, 3]
FovY = focal2fov(focal, height)
FovX = focal2fov(focal, width)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=None, image_name=None, width=width, height=height, time=time)
vis_cam_infos.append(cam_info)
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d_ours.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 2_000 # 100_000
print(f"Generating random point cloud ({num_pts})...")
threshold = 3
xyz_max = np.array([1.5*threshold, 1.5*threshold, -0*threshold])
xyz_min = np.array([-1.5*threshold, -1.5*threshold, -1.5*threshold])
xyz = np.concatenate([(np.random.random((num_pts, 1, 3)))* (xyz_max-xyz_min) + xyz_min, np.zeros((num_pts, 16, 3))], axis=1)
# xyz = np.concatenate([np.random.random((num_pts, 1, 3)) * 2.6 - 1.3, np.zeros((num_pts, 2, 3))], axis=1)
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
pcd = fetchPly(ply_path)
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
vis_cameras =vis_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path, time_delta=1/300)
return scene_info
def readHypernerfCamera(uid, camera, image_path, time):
height, width = int(camera.image_shape[0]), int(camera.image_shape[1])
image_name = os.path.basename(image_path).split(".")[0]
R = camera.orientation.T
# T = camera.translation.T
T = - camera.position @ R
image = Image.open(image_path)
FovY = focal2fov(camera.focal_length, height)
FovX = focal2fov(camera.focal_length, width)
return CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height, time=time)
def readHypernerfSceneInfo(path, eval):
# borrow code from https://github.com/hustvl/TiNeuVox/blob/main/lib/load_hyper.py
use_bg_points = False
with open(f'{path}/scene.json', 'r') as f:
scene_json = json.load(f)
with open(f'{path}/metadata.json', 'r') as f:
meta_json = json.load(f)
with open(f'{path}/dataset.json', 'r') as f:
dataset_json = json.load(f)
near = scene_json['near']
far = scene_json['far']
coord_scale = scene_json['scale']
scene_center = scene_json['center']
all_imgs = dataset_json['ids']
val_ids = dataset_json['val_ids']
add_cam = False
if len(val_ids) == 0:
i_train = np.array([i for i in np.arange(len(all_imgs)) if (i%4 == 0)])
i_test = i_train+2
i_test = i_test[:-1,]
else:
add_cam = True
train_ids = dataset_json['train_ids']
i_test = []
i_train = []
for i in range(len(all_imgs)):
id = all_imgs[i]
if id in val_ids:
i_test.append(i)
if id in train_ids:
i_train.append(i)
print('i_train',i_train)
print('i_test',i_test)
all_cams = [meta_json[i]['camera_id'] for i in all_imgs]
all_times = [meta_json[i]['time_id'] for i in all_imgs]
max_time = max(all_times)
all_times = [meta_json[i]['time_id']/max_time for i in all_imgs]
selected_time = set(all_times)
ratio = 0.5
all_cam_params = []
for im in all_imgs: | camera = HyperNeRFCamera.from_json(f'{path}/camera/{im}.json') | 11 | 2023-11-30 02:22:56+00:00 | 12k |
zd11024/NaviLLM | train.py | [
{
"identifier": "all_gather",
"path": "tools/common_utils.py",
"snippet": "def all_gather(data):\n \"\"\"\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n \"\"\"\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n # serialized to a Tensor\n origin_size = None\n if not isinstance(data, torch.Tensor):\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n else:\n origin_size = data.size()\n tensor = data.reshape(-1)\n\n tensor_type = tensor.dtype\n\n # obtain Tensor size of each rank\n local_size = torch.LongTensor([tensor.numel()]).to(\"cuda\")\n size_list = [torch.LongTensor([0]).to(\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.FloatTensor(size=(max_size,)).cuda().to(tensor_type))\n if local_size != max_size:\n padding = torch.FloatTensor(size=(max_size - local_size,)).cuda().to(tensor_type)\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n if origin_size is None:\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n else:\n buffer = tensor[:size]\n data_list.append(buffer)\n\n if origin_size is not None:\n new_shape = [-1] + list(origin_size[1:])\n resized_list = []\n for data in data_list:\n # suppose the difference of tensor size exist in first dimension\n data = data.reshape(new_shape)\n resized_list.append(data)\n\n return resized_list\n else:\n return data_list"
},
{
"identifier": "read_args",
"path": "tools/parser.py",
"snippet": "def read_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--data_dir', type=str, default='data', help=\"dataset root path\")\n parser.add_argument('--cfg_file', type=str, default=None, help='dataset configs', required=True)\n parser.add_argument('--pretrained_model_name_or_path', default=None, type=str, required=True, help=\"path to tokenizer\")\n\n # local fusion\n parser.add_argument('--off_batch_task', action='store_true', default=False, help=\"whether all process is training same task\")\n parser.add_argument('--debug', action=\"store_true\", help=\"debug mode\")\n parser.add_argument('--seed', type=int, default=0)\n\n parser.add_argument(\"--num_epochs\", type=int, default=30)\n parser.add_argument(\"--resume_from_checkpoint\", type=str, default=None, help=\"path to ckpt to resume from\")\n parser.add_argument(\"--from_scratch\", action=\"store_true\")\n parser.add_argument(\"--batch_size\", type=int, default=1)\n parser.add_argument(\"--val_batch_size\", type=int, default=2)\n parser.add_argument(\"--lr\", default=1e-5, type=float)\n parser.add_argument(\"--feat_dropout\", type=float, default=0.4)\n parser.add_argument(\"--num_warmup_steps\", type=int, default=0)\n parser.add_argument(\"--num_steps_per_epoch\", type=int, default=-1)\n parser.add_argument(\"--gradient_accumulation_step\", type=int, default=2)\n parser.add_argument(\n \"--precision\",\n choices=[\"amp_bf16\", \"amp_bfloat16\", \"bf16\", \"fp16\", \"fp32\"],\n default=\"fp32\",\n help=\"Floating point precision.\",\n )\n parser.add_argument(\"--workers\", type=int, default=0)\n\n # distributed training args\n parser.add_argument('--world_size', type=int, default=0, help='number of gpus')\n parser.add_argument('--local_rank', type=int, default=-1)\n parser.add_argument(\n \"--dist-url\",\n default=\"env://\",\n type=str,\n help=\"url used to set up distributed training\",\n )\n parser.add_argument(\n \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n )\n parser.add_argument(\n \"--horovod\",\n default=False,\n action=\"store_true\",\n help=\"Use horovod for distributed training.\",\n )\n parser.add_argument(\n \"--no-set-device-rank\",\n default=False,\n action=\"store_true\",\n help=\"Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).\",\n )\n\n # Save checkpoints\n parser.add_argument('--output_dir', type=str, default=None, required=True, help=\"output logs and ckpts\")\n parser.add_argument(\"--max_saved_checkpoints\", type=int, default=0)\n parser.add_argument(\"--save_ckpt_per_epochs\", type=int, default=10)\n parser.add_argument(\"--save_latest_states\", action='store_true')\n parser.add_argument(\"--save_pred_results\", action=\"store_true\")\n parser.add_argument(\"--save_detail_results\", action=\"store_true\")\n\n # training\n parser.add_argument('--mode', type=str, default=\"train\", choices=[\"train\", \"test\"])\n parser.add_argument(\"--stage\", type=str, required=True, choices=[\"pretrain\", \"multi\"])\n parser.add_argument('--ignoreid', default=-100, type=int, help=\"criterion: ignore label\")\n parser.add_argument('--enable_og', action='store_true', default=False, help=\"object grounding task\")\n parser.add_argument(\"--enable_summarize\", action=\"store_true\", help=\"perform EQA or generate instructions\")\n parser.add_argument(\"--enable_fgr2r\", action=\"store_true\", help=\"perform fgr2r for R2R\")\n parser.add_argument(\"--gen_loss_coef\", type=float, default=1.)\n parser.add_argument(\"--obj_loss_coef\", type=float, default=1.)\n parser.add_argument(\"--teacher_forcing_coef\", type=float, default=1.)\n parser.add_argument(\"--fuse_obj\", action=\"store_true\", help=\"whether fuse object features for REVERIE and SOON\")\n\n # datasets\n parser.add_argument(\"--multi_endpoints\", type=int, default=1)\n parser.add_argument(\"--path_type\", type=str, default=\"trusted_path\", choices=[\"planner_path\", \"trusted_path\"])\n\n # evaluation\n parser.add_argument('--test_datasets', type=str, default=None, nargs='+')\n parser.add_argument('--validation_split', type=str, default=\"val_unseen\", help=\"validation split: val_seen, val_unseen, test\")\n parser.add_argument(\"--do_sample\", action=\"store_true\", help=\"do_sample in evaluation\")\n parser.add_argument(\"--temperature\", type=float, default=1.)\n\n\n # others\n parser.add_argument(\n \"--max_datapoints\",\n default=None,\n type=int,\n help=\"The number of datapoints used for debug.\"\n )\n\n args = parser.parse_args()\n\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n\n ###################### configurations #########################\n # single-gpu or multi-gpu\n device_id = init_distributed_device(args)\n global_cfg = EasyDict(yaml.safe_load(open(str(Path(args.cfg_file).resolve()))))\n\n args.data_dir = Path(args.data_dir).resolve()\n\n # off-line image features from Matterport3D\n args.image_feat_size = global_cfg.Feature.image_feat_size\n args.obj_feat_size = global_cfg.Feature.obj_feat_size\n\n ############# Configurations ###############\n args.angle_feat_size = global_cfg.Feature.angle_feat_size\n args.enc_full_graph = global_cfg.Model.enc_full_graph\n args.expert_policy = global_cfg.Model.expert_policy\n args.num_pano_layers = global_cfg.Model.num_pano_layers\n\n os.makedirs(args.output_dir, exist_ok=True)\n log_file = Path(args.output_dir) / 'log.txt'\n\n logger = create_logger(log_file, rank=args.rank)\n logger.info('**********************Start logging**********************')\n gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'\n logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)\n for key, val in vars(args).items():\n logger.info('{:16} {}'.format(key, val))\n log_config_to_file(global_cfg, logger=logger)\n\n print(\" + rank: {}, + device_id: {}\".format(args.local_rank, device_id))\n print(f\"Start running training on rank {args.rank}.\")\n\n if os.path.exists(os.path.join(args.output_dir, \"latest_states.pt\")):\n state_path = os.path.join(args.output_dir, \"latest_states.pt\")\n logger.info(\"Resume checkponit from {}\".format(state_path))\n args.resume_from_checkpoint = state_path\n\n return args, global_cfg, logger, device_id"
},
{
"identifier": "random_seed",
"path": "tools/parser.py",
"snippet": "def random_seed(seed=0, rank=0):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n random.seed(seed)\n np.random.seed(seed)"
},
{
"identifier": "create_dataloaders",
"path": "tasks/loaders.py",
"snippet": "def create_dataloaders(args, config, logger, training, device, feat_db=None, obj_feat_db=None, stage=\"multi\"):\n if training==False and stage=='pretrain':\n return None, None\n\n dataset_cfg = copy.deepcopy(config.Dataset)\n dataset_cfg.update(\n config.Pretrain if stage==\"pretrain\" else config.Multi\n )\n dataset_cfg.update(config.Feature)\n\n dataloaders = {}\n agents = {}\n if args.test_datasets is not None and not training:\n dataset_list = args.test_datasets\n else:\n dataset_list = copy.deepcopy(dataset_cfg.SOURCE)\n for k, task_name in enumerate(dataset_list):\n # load dataset by names\n dataset = load_dataset(task_name.lower(), args, dataset_cfg, training=training, logger=logger, source=task_name)\n\n # assign feature database\n if task_name in [\"R2R\", \"REVERIE\", \"CVDN\", \"SOON\", \"EQA\", \"R2R_AUG\", \"REVERIE_AUG\"]:\n task_feat_db = feat_db['mp3d']\n elif task_name in [\"ScanQA\"]:\n task_feat_db = feat_db['scan_qa']\n elif task_name in [\"LLaVA\"]:\n task_feat_db = feat_db[\"coco\"]\n else:\n raise NotImplementedError\n \n # assign object database\n if args.enable_og:\n if task_name in [\"REVERIE\", \"REVERIE_AUG\"]:\n task_obj_feat_db = obj_feat_db['reverie']\n elif task_name == \"SOON\":\n task_obj_feat_db = obj_feat_db['soon']\n else:\n task_obj_feat_db = None\n else:\n task_obj_feat_db = None\n\n dataset.init_feat_db(feat_db=task_feat_db, obj_feat_db=task_obj_feat_db)\n\n\n logger.info(f\"{task_name}: {len(dataset)} samples loaded\")\n\n task_loader, pre_epoch = build_dataloader(\n dataset, distributed=args.distributed,\n training=training, batch_size=args.batch_size if training else args.val_batch_size, num_workers=args.workers\n )\n\n if training:\n ratio = dataset_cfg.Ratio[k]\n dataloaders[task_name] = (task_loader, ratio, pre_epoch)\n else:\n dataloaders[task_name] = PrefetchLoader(task_loader, device=device)\n\n # load agents\n agents[task_name] = load_agent(task_name.lower(), args, getattr(dataset, \"shortest_distances\", None), getattr(dataset, \"shortest_paths\", None))\n\n\n if training:\n meta_loader = MetaLoader(\n dataloaders,\n accum_steps=args.gradient_accumulation_step,\n distributed=args.distributed,\n device=device,\n off_batch_task=args.off_batch_task\n )\n meta_loader = PrefetchLoader(meta_loader, device)\n\n if args.num_steps_per_epoch!=-1:\n meta_loader.num_batches = args.num_steps_per_epoch\n else:\n return dataloaders, agents\n return meta_loader, agents"
},
{
"identifier": "create_feature_db",
"path": "tasks/feature_db.py",
"snippet": "def create_feature_db(config: Dict, image_feat_size: int, args) -> Dict[str, ImageFeaturesDB]:\n ret = {}\n for source in config:\n path = config[source] if config[source].startswith(\"/\") else os.path.join(args.data_dir, config[source])\n ret[source] = ImageFeaturesDB(\n path, \n image_feat_size\n )\n return ret"
},
{
"identifier": "create_object_feature_db",
"path": "tasks/feature_db.py",
"snippet": "def create_object_feature_db(config: Dict, obj_feat_size: int, args):\n ret = {}\n for source in config:\n path = config[source] if config[source].startswith(\"/\") else os.path.join(args.data_dir, config[source])\n if source == 'reverie':\n ret[source] = REVERIEObjectFeatureDB(\n path, \n obj_feat_size\n )\n elif source == 'soon':\n ret[source] = SOONObjectFeatureDB(\n path,\n obj_feat_size\n )\n return ret"
},
{
"identifier": "NavModel",
"path": "models/nav_model.py",
"snippet": "class NavModel(nn.Module):\n def __init__(self, args, logger, model_config):\n super().__init__()\n self.args = args\n config = init_vis_config(args, model_config)\n self.config = config\n\n # Large Language Model\n if args.resume_from_checkpoint is not None or args.from_scratch:\n logger.info(\"Initialize the model from config.\")\n model_config = AutoConfig.from_pretrained(config.pretrained_model_name_or_path)\n self.lang_model = ModifiedOPTForCasualLM(model_config, config) if 'opt' in config.pretrained_model_name_or_path \\\n else ModifiedLlamaForCausalLM(model_config, config)\n else:\n self.lang_model = ModifiedOPTForCasualLM.from_pretrained(config.pretrained_model_name_or_path, config) if \"opt\" in config.pretrained_model_name_or_path \\\n else ModifiedLlamaForCausalLM.from_pretrained(config.pretrained_model_name_or_path, config)\n \n self.lang_model.init_tokenizer(config.pretrained_model_name_or_path)\n\n self.hidden_size = self.lang_model.hidden_size\n self.model_type = self.lang_model.model_type\n\n # Panorama Encoding\n config.output_size = self.hidden_size\n self.img_embeddings = ImageEmbeddings(config, use_obj=args.enable_og, fuse_obj=args.fuse_obj)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.hidden_size)\n\n # global encoding\n self.gmap_pos_embeddings = nn.Sequential(\n nn.Linear(config.angle_feat_size + 3, self.hidden_size),\n nn.LayerNorm(self.hidden_size, eps=1e-12)\n )\n self.gmap_step_embeddings = nn.Embedding(config.max_action_steps, self.hidden_size)\n\n # local encoding\n self.vp_pos_embeddings = nn.Sequential(\n nn.Linear(config.angle_feat_size * 2 + 6, self.hidden_size),\n nn.LayerNorm(self.hidden_size, eps=1e-12)\n )\n\n self.obj_pos_embeddings = nn.Sequential(\n nn.Linear(config.angle_feat_size + 3, self.hidden_size),\n nn.LayerNorm(self.hidden_size, eps=1e-12)\n )\n\n if self.config.obj_feat_size > 0:\n self.og_head = nn.Sequential(\n nn.Linear(self.hidden_size, 100)\n ).to(self.lang_model.model_type) \n\n # Classfification from candidates\n self.out_head = nn.Sequential(\n nn.Linear(self.hidden_size, 100)\n ).to(self.lang_model.model_type)\n\n self.instruction = None\n self.history = None\n self.hist_vis = None\n\n self.drop_env = nn.Dropout(p=args.feat_dropout)\n\n logger.info(\"model type: {}\".format(self.model_type))\n\n\n def forward(self, mode: str, batch: Dict[str, Any], **kwargs) -> Dict[str, Any]:\n batch = collections.defaultdict(lambda: None, batch)\n\n if mode == 'panorama': # batch['view_img_fts'] [B, 36, D=768] --> dropout\n batch['view_img_fts'] = self.drop_env(batch['view_img_fts'])\n if 'obj_img_fts' in batch:\n batch['obj_img_fts'] = self.drop_env(batch['obj_img_fts'])\n return self.img_embeddings.forward_panorama_per_step(\n batch['view_img_fts'],\n batch['view_lens'],\n batch['loc_fts'],\n batch['nav_types'],\n batch['obj_img_fts'],\n batch['obj_lens'],\n batch['obj_loc_fts'],\n )\n\n elif mode == 'navigation':\n return self.forward_navigation(mode, batch, **kwargs)\n\n elif mode == \"summarization\" or mode == 'embodied_qa':\n return self.forward_summarization(mode, batch, **kwargs)\n\n elif mode == \"3dqa\":\n return self.forward_3dqa(mode, batch, **kwargs)\n \n elif mode == 'object_grounding':\n return self.forward_object_grounding(mode, batch, **kwargs)\n\n else:\n raise NotImplementedError('wrong mode: %s' % mode)\n \n\n def forward_navigation(\n self, \n mode, \n batch: Dict[str, Any], \n training: bool=True, \n **kwargs\n ) -> Dict[str, Any]:\n\n data_type = batch['data_type']\n vp_img_embeds = batch['vp_img_embeds']\n batch_size = vp_img_embeds.size(0)\n gmap_img_embeds, gmap_step_ids, gmap_pos_fts, \\\n gmap_masks, gmap_pair_dists, gmap_visited_masks, gmap_vpids \\\n = batch['gmap_img_embeds'], batch['gmap_step_ids'], batch['gmap_pos_fts'], \\\n batch['gmap_masks'], batch['gmap_pair_dists'], batch['gmap_visited_masks'], batch['gmap_vpids'],\n\n # global branch [B, Nums, D=768]\n gmap_embeds = torch.zeros_like(gmap_img_embeds)\n for b_ix in range(len(data_type)):\n gmap_embeds[b_ix:b_ix + 1] = gmap_img_embeds[b_ix:b_ix + 1] + \\\n self.gmap_step_embeddings(gmap_step_ids[b_ix:b_ix + 1]) + \\\n self.gmap_pos_embeddings(gmap_pos_fts[b_ix:b_ix + 1])\n\n\n ##### local branch #####\n vp_img_embeds, vp_pos_fts, vp_nav_masks, vp_cand_vpids = \\\n batch['vp_img_embeds'], batch['vp_pos_fts'], batch['vp_nav_masks'], batch['vp_cand_vpids']\n\n pano_masks = batch['pano_masks']\n\n vp_embeds = torch.zeros_like(vp_img_embeds)\n for b_ix in range(len(data_type)):\n vp_embeds[b_ix:b_ix + 1] = vp_img_embeds[b_ix:b_ix + 1] \\\n + self.vp_pos_embeddings(vp_pos_fts[b_ix:b_ix + 1])\n\n ##### fuse embeds #####\n gmap_embeds.masked_fill_(gmap_visited_masks.unsqueeze(-1), 0.)\n gmap_embeds.masked_fill_(gmap_masks.logical_not().unsqueeze(-1), 0.)\n cand_token_type_ids = torch.zeros((gmap_embeds.shape[0], gmap_embeds.shape[1])).int().to(gmap_embeds.device)\n\n local_vp_embeds = vp_embeds\n local_vp_embeds.masked_fill_(pano_masks.logical_not().unsqueeze(-1), 0.)\n\n fuse_embeds = torch.clone(gmap_embeds)\n\n for i in range(batch_size):\n visited_nodes = set([vp for vp, mask in zip(gmap_vpids[i], gmap_visited_masks[i]) if mask])\n tmp = {}\n bw_logits = 0\n for j, cand_vpid in enumerate(vp_cand_vpids[i]):\n if j > 0:\n if cand_vpid in visited_nodes:\n bw_logits += local_vp_embeds[i, j]\n else:\n tmp[cand_vpid] = local_vp_embeds[i, j]\n for j, vp in enumerate(gmap_vpids[i]):\n if j > 0 and vp not in visited_nodes:\n if vp in tmp:\n fuse_embeds[i, j] += tmp[vp]\n else:\n # fuse_embeds[i, j] += bw_logits\n cand_token_type_ids[i, j] = 1\n\n fuse_embeds += self.token_type_embeddings(cand_token_type_ids).to(fuse_embeds.device)\n fuse_embeds.masked_fill_(gmap_visited_masks.unsqueeze(-1), 0.)\n fuse_embeds.masked_fill_(gmap_masks.logical_not().unsqueeze(-1), 0.)\n\n cand_masks = torch.clone(gmap_masks & gmap_visited_masks.logical_not())\n cand_nums = cand_masks.sum(dim=-1)\n instruction = batch['instruction']\n history = batch['history']\n hist_vis = batch['hist_vis']\n hist_vis_input = []\n for vis in hist_vis:\n hist_vis_input.extend(vis)\n if hist_vis_input != []:\n hist_vis_input = torch.stack(hist_vis_input, dim=0)\n else:\n hist_vis_input = None\n\n hist_nums = [len(his) for his in history]\n\n text_input = self.lang_model.tokenize(batch[\"prompts\"]).to(fuse_embeds.device)\n\n # cand_embeds = fuse_embeds[cand_masks] # .to(self.model_type)\n cand_embeds = []\n inv_perms = []\n for bn in range(batch_size):\n # random permute\n cand_embed = fuse_embeds[bn][cand_masks[bn]][1:]\n rand_perm = torch.randperm(cand_embed.shape[0])\n inv_perm = torch.arange(cand_embed.shape[0])\n inv_perm[rand_perm] = torch.arange(cand_embed.shape[0])\n inv_perms.append(inv_perm)\n cand_embeds.append(cand_embed[rand_perm]) # remove stop features\n cand_embeds = torch.cat(cand_embeds, dim=0)\n\n output = self.lang_model(\n input_ids=text_input['input_ids'],\n attention_mask=text_input['attention_mask'],\n cand_vis=cand_embeds,\n hist_vis=hist_vis_input,\n )\n loss, hidden_states = output.loss, output.hidden_states\n\n fuse_logits = torch.zeros((fuse_embeds.shape[0], fuse_embeds.shape[1])).to(\n fuse_embeds.device).to(self.model_type)\n \n predictions = self.out_head(hidden_states[text_input['input_ids']==self.lang_model.cls_token_id[0]])\n \n for i in range(batch_size):\n fuse_logits[i][cand_masks[i]] = torch.cat([predictions[i, 0:1],predictions[i, 1:cand_nums[i]][inv_perms[i]]],dim=0)\n \n fuse_logits.masked_fill_(cand_masks.logical_not(), -float('inf'))\n\n return {\n 'fuse_embeds': fuse_embeds.detach(),\n 'fuse_logits': fuse_logits,\n }\n\n \n\n def forward_summarization(\n self, \n mode, \n batch: Dict[str, Any], \n training: bool=True, \n **kwargs\n ) -> Dict[str, Any]:\n\n vp_img_embeds = batch['vp_img_embeds']\n batch_size = vp_img_embeds.size(0)\n vp_img_embeds, vp_pos_fts, \\\n vp_nav_masks, vp_cand_vpids = \\\n batch['vp_img_embeds'], batch['vp_pos_fts'], \\\n batch['vp_nav_masks'], batch['vp_cand_vpids']\n \n # remove `stop`\n vp_img_embeds = vp_img_embeds[:, 1:, :]\n vp_nav_masks = vp_nav_masks[:, 1:]\n\n vp_pos_fts = torch.zeros(vp_img_embeds.shape[:2]+(14,), dtype=torch.float).to(vp_img_embeds.device)\n token_type_ids = torch.zeros(vp_img_embeds.shape[:2], dtype=torch.int).to(vp_img_embeds.device)\n vp_img_embeds += self.vp_pos_embeddings(vp_pos_fts)\n vp_img_embeds += self.token_type_embeddings(token_type_ids)\n\n instruction = batch['instruction']\n labels = batch['answer']\n history = batch['history']\n hist_vis = batch['hist_vis']\n data_type = batch['data_type']\n hist_vis_input = []\n\n for vis in hist_vis:\n hist_vis_input.extend(vis)\n if hist_vis_input != []:\n hist_vis_input = torch.stack(hist_vis_input, dim=0)\n else:\n hist_vis_input = None\n\n hist_nums = [len(his) for his in history]\n cand_nums = vp_nav_masks.sum(1)\n \n all_text = []\n\n for bn in range(batch_size):\n prompt = batch[\"prompts\"][bn]\n if data_type[0] == 'eqa' or data_type[0] == 'fgr2r':\n label = labels[bn] + f\"{self.lang_model.tokenizer.eos_token}\"\n else:\n label = batch[\"instruction\"][bn] + f\"{self.lang_model.tokenizer.eos_token}\"\n if training:\n all_text.append([prompt, label])\n else:\n all_text.append(prompt)\n\n text_input = self.lang_model.tokenize(all_text).to(vp_img_embeds.device)\n if training:\n labels = text_input['input_ids'].clone()\n labels[text_input['token_type_ids'][:, -labels.shape[-1]:] == 0] = -100\n outputs = self.lang_model(\n input_ids=text_input['input_ids'],\n attention_mask=text_input['attention_mask'],\n labels=labels,\n cand_vis=vp_img_embeds[vp_nav_masks],\n hist_vis=hist_vis_input,\n )\n loss, logits, hidden_states = outputs.loss, outputs.logits, outputs.hidden_states\n outputs = {\n \"loss\": loss\n }\n else:\n trie = kwargs.get('trie', None)\n logits_processor = [TrieLogitsProcessor(trie)] if trie is not None else []\n\n generate_ids = self.lang_model.generate(\n input_ids=text_input['input_ids'],\n attention_mask=text_input['attention_mask'],\n cand_vis=vp_img_embeds[vp_nav_masks],\n hist_vis=hist_vis_input,\n eos_token_id=self.lang_model.tokenizer.eos_token_id,\n max_new_tokens=50,\n do_sample=False,\n logits_processor=logits_processor\n ).tolist()\n\n generate_ids = [s[text_input[\"input_ids\"].shape[1]:] for i, s in enumerate(generate_ids)]\n generated_sentences = self.lang_model.tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n outputs = {\n \"generated_sentences\": generated_sentences\n }\n\n return outputs\n \n\n def forward_3dqa(\n self, \n mode, \n batch: Dict[str, Any], \n training: bool=True, \n **kwargs\n ) -> Dict[str, Any]:\n batch_size = len(batch['question'])\n data_type = batch['data_type']\n all_text = []\n for bn in range(batch_size):\n prompt = batch[\"prompts\"][bn]\n if training:\n ans = batch[\"answers\"][bn][0]+ f\"{self.lang_model.tokenizer.eos_token}\"\n all_text.append([prompt, ans])\n else:\n all_text.append(prompt)\n \n view_img_fts = pad_tensors_wgrad([batch[\"features\"][bn] for bn in range(batch_size)])\n view_lens = torch.tensor([batch[\"features\"][bn].shape[0] for bn in range(batch_size)]).to(view_img_fts.device)\n pano_outputs = self.img_embeddings.forward_panorama_per_step(\n view_img_fts=view_img_fts,\n view_lens=view_lens,\n )\n pano_embeds, pano_masks = pano_outputs[\"pano_embeds\"], pano_outputs[\"pano_masks\"]\n vp_pos_fts = torch.zeros(pano_embeds.shape[:2]+(14,), dtype=torch.float).to(pano_embeds.device)\n token_type_ids = torch.zeros(pano_embeds.shape[:2], dtype=torch.int).to(pano_embeds.device)\n pano_embeds += self.vp_pos_embeddings(vp_pos_fts)\n pano_embeds += self.token_type_embeddings(token_type_ids)\n\n text_input = self.lang_model.tokenize(all_text).to(pano_embeds.device)\n if training:\n labels = text_input['input_ids'].clone()\n labels[text_input['token_type_ids'][:, -labels.shape[-1]:] == 0] = -100\n outputs = self.lang_model(\n input_ids=text_input['input_ids'],\n attention_mask=text_input['attention_mask'],\n labels=labels,\n cand_vis=pano_embeds[pano_masks],\n )\n else:\n\n generate_ids = self.lang_model.generate(\n input_ids=text_input['input_ids'],\n attention_mask=text_input['attention_mask'],\n cand_vis=pano_embeds[pano_masks],\n eos_token_id=self.lang_model.tokenizer.eos_token_id,\n **kwargs\n ).tolist()\n\n generate_ids = [s[text_input[\"input_ids\"].shape[1]:] for i, s in enumerate(generate_ids)]\n generated_sentences = self.lang_model.tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n outputs = {\n \"generated_sentences\": generated_sentences\n }\n\n return outputs\n\n\n def forward_object_grounding(\n self, \n mode, \n batch: Dict[str, Any], \n training: bool=True, \n **kwargs\n ) -> Dict[str, Any]:\n\n data_type = batch['data_type']\n obj_embeds, obj_masks, obj_loc_fts = batch['obj_embeds'], batch['obj_masks'], batch['obj_loc_fts']\n\n batch_size = obj_embeds.size(0)\n obj_embeds = obj_embeds + self.obj_pos_embeddings(obj_loc_fts)\n\n cand_nums = obj_masks.sum(dim=1) + 1 # add not exist\n\n instruction = batch['instruction']\n history = batch['history']\n hist_vis = batch['hist_vis']\n hist_vis_input = []\n for vis in hist_vis:\n hist_vis_input.extend(vis)\n if hist_vis_input != []:\n hist_vis_input = torch.stack(hist_vis_input, dim=0)\n else:\n hist_vis_input = None\n\n hist_nums = [len(his) for his in history]\n\n text_input = self.lang_model.tokenize(batch[\"prompts\"]).to(obj_embeds.device)\n output = self.lang_model(\n input_ids=text_input['input_ids'],\n attention_mask=text_input['attention_mask'],\n cand_vis=obj_embeds[obj_masks],\n hist_vis=hist_vis_input,\n )\n loss, hidden_states = output.loss, output.hidden_states\n\n predictions = self.out_head(hidden_states[text_input['input_ids']==self.lang_model.cls_token_id[0]])\n for i in range(batch_size):\n predictions[i, cand_nums[i]:] = float('-inf')\n\n return {\n 'obj_logits': predictions\n }"
},
{
"identifier": "dist_models",
"path": "tools/optims.py",
"snippet": "def dist_models(args, model, logger):\n logger.info(\"*************** init model *************** \")\n # args.rank: global rank.\n total_gpus = torch.cuda.device_count()\n device_id = args.rank % total_gpus\n\n model.to(device_id)\n \n optimizer = torch.optim.AdamW([p for n, p in model.named_parameters() if p.requires_grad], lr=args.lr)\n\n lr_scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.num_warmup_steps)\n\n resume_from_epoch = check_checkpoint(\n args, model, optimizer, lr_scheduler, logger,\n )\n param_sums = sum(p.numel() for p in model.parameters() if p.requires_grad)\n logger.info(\"model initialized with {:.2f} M trainable parameters\".format(param_sums/1000**2))\n if args.distributed:\n from torch.nn.parallel import DistributedDataParallel as DDP\n model = DDP(model, device_ids=[device_id], find_unused_parameters=True)\n\n # args.batch_size: BATCH_SIZE_PER_GPU\n logger.info('Training in distributed mode : total_batch_size: %d' % (total_gpus * args.batch_size))\n else:\n total_gpus = 1\n logger.info('Training with a single process')\n\n return model, optimizer, resume_from_epoch, lr_scheduler"
},
{
"identifier": "save_checkpoint",
"path": "tools/optims.py",
"snippet": "def save_checkpoint(model, model_path, optimizer=None, epoch: int=0, save_states: bool=False):\n if hasattr(model, 'module'):\n model = model.module\n \n state_dict = {\n \"model_state_dict\": model.state_dict()\n }\n if save_states:\n state_dict.update({\n \"optimizer\": optimizer.state_dict(),\n \"epoch\": epoch,\n })\n\n torch.save(state_dict, model_path)"
},
{
"identifier": "Trie",
"path": "tools/trie.py",
"snippet": "class Trie:\n\n def __init__(self, bos, eos):\n self.root = TreeNode()\n self.bos = bos\n self.eos = eos\n\n def insert(self, word: List[int]):\n cur = self.root\n for c in word:\n cur = cur.child[c]\n\n def get_child_index(self, cur: TreeNode) -> List[int]:\n if len(cur.child)==0:\n return [self.eos]\n return list(cur.child.keys())\n \n def get_next_node(self, cur: TreeNode, w: int) -> TreeNode:\n if len(cur.child)==0:\n return cur\n return cur.child[w]"
}
] | import os
import json
import torch
import random
import torch.nn as nn
from tqdm import tqdm
from pathlib import Path
from typing import Dict
from tools.common_utils import all_gather
from tools.parser import read_args, random_seed
from tasks.loaders import create_dataloaders
from tasks.feature_db import create_feature_db, create_object_feature_db
from models.nav_model import NavModel
from tools.optims import dist_models, save_checkpoint
from tools.trie import Trie | 9,557 |
if args.rank == 0:
verbose_dict = dict(
step=step,
name=name,
# index=batch['sample_idx'],
loss=loss_metric.average,
entropy=entropy_metric.average,
instr_pred_metric=instr_pred_metric.average,
lr=lr_scheduler.get_last_lr()[0],
)
for k in dataset_cfg.SOURCE:
verbose_dict[k] = loss_stats[k].average
pbar.set_postfix(verbose_dict)
pbar.update()
if step == num_batches_per_epoch-1:
logger.info("***** train [{}] epoch *****".format(epoch))
train_stat_str = 'Loss: %.2f\n' % loss_metric.average
train_stat_str += "Instr_pred: %.2f\n" % instr_pred_metric.average
for task in dataset_cfg.SOURCE:
train_stat_str += "%s: %.2f\n" % (task, loss_stats[task].average)
logger.info(train_stat_str)
break
@torch.no_grad()
def val_one_epoch(
args,
global_cfg,
model,
optimizer,
criterion,
dataloaders,
agents,
epoch,
logger,
) -> Dict[str, Dict[str, float]]:
model.eval()
entropy_metric = Metrics()
loss_str = "\n[Eval] {} epoch {}\n".format(args.validation_split, epoch)
task_results = {}
for name, loader in dataloaders.items():
logger.info("***** validate {} split on {} task *****".format(args.validation_split, name))
dataset = dataloaders[name].get_dataset()
agent = agents[name]
preds = agent.validate(
name,
args,
global_cfg,
model,
loader,
entropy_metric=entropy_metric
)
all_preds = all_gather(preds)
all_preds = merge_dist_results(all_preds)
if args.rank == 0 and not args.validation_split.startswith('test'):
score_summary, item_metrics = dataset.eval_metrics(all_preds, logger=logger, name=name)
task_results[name] = score_summary
loss_str += "\n [Eval] dataset=[{}] \n".format(name)
for metric, val in score_summary.items():
if metric == 'sr':
loss_str += '\n[Eval] ||| %s: %.2f' % (metric, val)
else:
loss_str += ', %s: %.2f' % (metric, val)
if args.rank== 0 and args.save_pred_results:
dataset.save_json(
all_preds,
os.path.join(args.output_dir, f"{name}_{args.validation_split}.json"),
item_metrics=item_metrics if args.save_detail_results else None
)
logger.info(loss_str)
return task_results
def merge_dist_results(results):
outs = []
for res in results:
outs.extend(res)
return outs
def calc_overall_score(results, cfg):
score = 0.
for task in results:
if task not in cfg.Multi.SOURCE:
continue
if task == 'R2R':
score += results[task]['spl'] / 60
elif task == 'REVERIE':
score += results[task]['spl'] / 36.63
elif task == 'CVDN':
pass
elif task == 'SOON':
score += results[task]['spl'] / 26.58
elif task == 'EQA':
pass
elif task == "ScanQA":
pass
else:
raise NotImplementedError(f"The method for calculating the score of {task} is not Implemented.")
return score
def main():
args, global_cfg, logger, device_id = read_args()
random_seed(args.seed + args.rank)
##################### DATASET #####################
|
class Metrics(object):
def __init__(self):
self.num = 0
self.total = 0
def accumulate(self, x):
self.num += 1
self.total += x
@property
def average(self):
if self.num == 0:
return 0
return self.total / self.num
def train_one_epoch(
args,
global_cfg,
model,
optimizer,
lr_scheduler,
criterion,
dataloaders,
agents,
epoch,
logger,
stage='multi'
):
model.train()
entropy_metric = Metrics()
loss_metric = Metrics()
instr_pred_metric = Metrics()
num_batches_per_epoch = dataloaders.num_batches
total_training_steps = num_batches_per_epoch * args.num_epochs
pbar = tqdm(
range(dataloaders.num_batches),
disable=args.rank!=0,
total=total_training_steps,
initial=(epoch * num_batches_per_epoch)
)
dataset_cfg = global_cfg.Pretrain if stage=='pretrain' else global_cfg.Multi
loss_stats = {k: Metrics() for k in dataset_cfg.SOURCE}
for step, (name, batch) in enumerate(dataloaders):
loss_coef = dataset_cfg.LOSS_COEF.get(name, 1.)
# perform embodied tasks
# the actual batch_size equals to args.batch_size * world_size * (args.gradient_accumulation_step)
dataset = dataloaders.loader.get_dataset(name)
agent = agents.get(name)
loss = agent.train(
name,
batch,
args,
global_cfg,
model=model,
criterion=criterion,
dataset=dataset,
step=step,
entropy_metric=entropy_metric,
instr_pred_metric=instr_pred_metric
)
loss_metric.accumulate(loss.item())
loss_stats[name].accumulate(loss.item())
if (step+1) % args.gradient_accumulation_step==0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 40.)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
if args.rank == 0:
verbose_dict = dict(
step=step,
name=name,
# index=batch['sample_idx'],
loss=loss_metric.average,
entropy=entropy_metric.average,
instr_pred_metric=instr_pred_metric.average,
lr=lr_scheduler.get_last_lr()[0],
)
for k in dataset_cfg.SOURCE:
verbose_dict[k] = loss_stats[k].average
pbar.set_postfix(verbose_dict)
pbar.update()
if step == num_batches_per_epoch-1:
logger.info("***** train [{}] epoch *****".format(epoch))
train_stat_str = 'Loss: %.2f\n' % loss_metric.average
train_stat_str += "Instr_pred: %.2f\n" % instr_pred_metric.average
for task in dataset_cfg.SOURCE:
train_stat_str += "%s: %.2f\n" % (task, loss_stats[task].average)
logger.info(train_stat_str)
break
@torch.no_grad()
def val_one_epoch(
args,
global_cfg,
model,
optimizer,
criterion,
dataloaders,
agents,
epoch,
logger,
) -> Dict[str, Dict[str, float]]:
model.eval()
entropy_metric = Metrics()
loss_str = "\n[Eval] {} epoch {}\n".format(args.validation_split, epoch)
task_results = {}
for name, loader in dataloaders.items():
logger.info("***** validate {} split on {} task *****".format(args.validation_split, name))
dataset = dataloaders[name].get_dataset()
agent = agents[name]
preds = agent.validate(
name,
args,
global_cfg,
model,
loader,
entropy_metric=entropy_metric
)
all_preds = all_gather(preds)
all_preds = merge_dist_results(all_preds)
if args.rank == 0 and not args.validation_split.startswith('test'):
score_summary, item_metrics = dataset.eval_metrics(all_preds, logger=logger, name=name)
task_results[name] = score_summary
loss_str += "\n [Eval] dataset=[{}] \n".format(name)
for metric, val in score_summary.items():
if metric == 'sr':
loss_str += '\n[Eval] ||| %s: %.2f' % (metric, val)
else:
loss_str += ', %s: %.2f' % (metric, val)
if args.rank== 0 and args.save_pred_results:
dataset.save_json(
all_preds,
os.path.join(args.output_dir, f"{name}_{args.validation_split}.json"),
item_metrics=item_metrics if args.save_detail_results else None
)
logger.info(loss_str)
return task_results
def merge_dist_results(results):
outs = []
for res in results:
outs.extend(res)
return outs
def calc_overall_score(results, cfg):
score = 0.
for task in results:
if task not in cfg.Multi.SOURCE:
continue
if task == 'R2R':
score += results[task]['spl'] / 60
elif task == 'REVERIE':
score += results[task]['spl'] / 36.63
elif task == 'CVDN':
pass
elif task == 'SOON':
score += results[task]['spl'] / 26.58
elif task == 'EQA':
pass
elif task == "ScanQA":
pass
else:
raise NotImplementedError(f"The method for calculating the score of {task} is not Implemented.")
return score
def main():
args, global_cfg, logger, device_id = read_args()
random_seed(args.seed + args.rank)
##################### DATASET ##################### | feat_db = create_feature_db(global_cfg.Feature.feature_database, global_cfg.Feature.image_feat_size, args) | 4 | 2023-11-28 06:59:37+00:00 | 12k |
KylinYee/R2-Talker-code | test.py | [
{
"identifier": "NeRFDataset_Test",
"path": "nerf/provider.py",
"snippet": "class NeRFDataset_Test:\n def __init__(self, opt, device, downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.downscale = downscale\n self.scale = opt.scale # camera radius scale to make sure camera are inside the bounding box.\n self.offset = opt.offset # camera offset\n self.bound = opt.bound # bounding box half length, also used as the radius to random sample poses.\n self.fp16 = opt.fp16\n\n self.start_index = opt.data_range[0]\n self.end_index = opt.data_range[1]\n\n self.training = False\n self.num_rays = -1\n\n # load nerf-compatible format data.\n \n with open(opt.pose, 'r') as f:\n transform = json.load(f)\n\n # load image size\n self.H = int(transform['cy']) * 2 // downscale\n self.W = int(transform['cx']) * 2 // downscale\n \n # read images\n frames = transform[\"frames\"]\n\n # use a slice of the dataset\n if self.end_index == -1: # abuse...\n self.end_index = len(frames)\n\n frames = frames[self.start_index:self.end_index]\n\n print(f'[INFO] load {len(frames)} frames.')\n\n # only load pre-calculated aud features when not live-streaming\n if not self.opt.asr:\n\n aud_features = np.load(self.opt.aud)\n\n if self.opt.cond_type == 'idexp':\n aud_features = aud_features.reshape(-1, 68, 3)\n aud_features = torch.from_numpy(aud_features)\n\n idexp_lm3d_mean = aud_features.mean(axis=0).reshape([1,68,3])\n idexp_lm3d_std = aud_features.std(axis=0).reshape([1,68,3])\n idexp_lm3d_normalized = (aud_features.reshape([-1,68,3]) - idexp_lm3d_mean)/idexp_lm3d_std\n\n # step1. clamp the lm3d, to regularize apparent outliers\n lm3d_clamp_std = 2.3 # typically 1.~5., reduce it when blurry or bad cases occurs\n idexp_lm3d_normalized[:,0:17] = torch.clamp(idexp_lm3d_normalized[:,0:17], -lm3d_clamp_std, lm3d_clamp_std) # yaw_x_y_z\n idexp_lm3d_normalized[:,17:27,0:2] = torch.clamp(idexp_lm3d_normalized[:,17:27,0:2], -lm3d_clamp_std/2, lm3d_clamp_std/2) # brow_x_y\n idexp_lm3d_normalized[:,17:27,2] = torch.clamp(idexp_lm3d_normalized[:,17:27,2], -lm3d_clamp_std, lm3d_clamp_std) # brow_z\n idexp_lm3d_normalized[:,27:36] = torch.clamp(idexp_lm3d_normalized[:,27:36], -lm3d_clamp_std, lm3d_clamp_std) # nose\n idexp_lm3d_normalized[:,36:48,0:2] = torch.clamp(idexp_lm3d_normalized[:,36:48,0:2], -lm3d_clamp_std/2, lm3d_clamp_std/2) # eye_x_y\n idexp_lm3d_normalized[:,36:48,2] = torch.clamp(idexp_lm3d_normalized[:,36:48,2], -lm3d_clamp_std, lm3d_clamp_std) # eye_z\n idexp_lm3d_normalized[:,48:68] = torch.clamp(idexp_lm3d_normalized[:,48:68], -lm3d_clamp_std, lm3d_clamp_std) # mouth\n\n aud_features = idexp_lm3d_normalized*idexp_lm3d_std + idexp_lm3d_mean\n\n\n # _lambda_other = 0.4\n # _lambda_lip = 0.2\n # moving_lm = aud_features[0].clone()\n # print(aud_features[0,:48].shape)\n # for i in range(aud_features.size()[0]):\n # aud_features[i,0:17] = 2.0*_lambda_other * moving_lm[0:17] + (1 - 2.0*_lambda_other) * aud_features[i,0:17] # yaw\n # aud_features[i,17:27] = 2.0*_lambda_other * moving_lm[17:27] + (1 - 2.0*_lambda_other) * aud_features[i,17:27] # brow\n # aud_features[i,27:36] = 2.0*_lambda_other * moving_lm[27:36] + (1 - 2.0*_lambda_other) * aud_features[i,27:36] # nose\n # aud_features[i,36:48] = _lambda_other * moving_lm[36:48] + (1 - _lambda_other) * aud_features[i,36:48] # eye\n # aud_features[i,:48] = moving_lm[:48]\n # aud_features[i,48:68] = _lambda_lip * moving_lm[48:68] + (1 - _lambda_lip) * aud_features[i,48:68]\n else:\n aud_features = torch.from_numpy(aud_features)\n\n aud_features = aud_features.reshape(-1, 68, 3)\n\n if self.opt.method == 'genefaceDagger':\n video_idexp_lm3d_mean = aud_features.mean(axis=0).reshape([1,68,3])\n video_idexp_lm3d_std = aud_features.std(axis=0).reshape([1,68,3])\n aud_features = (aud_features - video_idexp_lm3d_mean) / video_idexp_lm3d_std\n\n # support both [N, 16] labels and [N, 16, K] logits\n if len(aud_features.shape) == 3:\n # if self.opt.cond_type in ['eo','ds']:\n # aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16] \n \n\n if self.opt.emb:\n print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')\n aud_features = aud_features.argmax(1) # [N, 16]\n \n else:\n assert self.opt.emb, \"aud only provide labels, must use --emb\"\n aud_features = aud_features.long()\n\n print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')\n\n self.poses = []\n self.auds = []\n self.eye_area = []\n\n for f in tqdm.tqdm(frames, desc=f'Loading data'):\n \n pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]\n pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)\n self.poses.append(pose)\n\n # find the corresponding audio to the image frame\n if not self.opt.asr and self.opt.aud == '':\n aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...\n self.auds.append(aud)\n\n if self.opt.exp_eye:\n \n if 'eye_ratio' in f:\n area = f['eye_ratio']\n else:\n area = 0.25 # default value for opened eye\n \n self.eye_area.append(area)\n \n # load pre-extracted background image (should be the same size as training image...)\n\n if self.opt.bg_img == 'white': # special\n bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)\n elif self.opt.bg_img == 'black': # special\n bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)\n else: # load from file\n bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]\n if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:\n bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)\n bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)\n bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.bg_img = bg_img\n\n self.poses = np.stack(self.poses, axis=0)\n\n # smooth camera path...\n if self.opt.smooth_path:\n self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)\n \n self.poses = torch.from_numpy(self.poses) # [N, 4, 4]\n \n if self.opt.asr:\n # live streaming, no pre-calculated auds\n self.auds = None\n else:\n # auds corresponding to images\n if self.opt.aud == '':\n self.auds = torch.stack(self.auds, dim=0) # eo: [N, 32, 16], idexp_lm3ds: [N, 68, 3]\n # auds is novel, may have a different length with images\n else:\n self.auds = aud_features\n \n self.bg_img = torch.from_numpy(self.bg_img)\n\n if self.opt.exp_eye:\n self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]\n print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')\n\n if self.opt.smooth_eye:\n\n # naive 5 window average\n ori_eye = self.eye_area.copy()\n for i in range(ori_eye.shape[0]):\n start = max(0, i - 1)\n end = min(ori_eye.shape[0], i + 2)\n self.eye_area[i] = ori_eye[start:end].mean()\n\n self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]\n\n # always preload\n self.poses = self.poses.to(self.device)\n\n if self.auds is not None:\n self.auds = self.auds.to(self.device)\n\n self.bg_img = self.bg_img.to(torch.half).to(self.device)\n \n if self.opt.exp_eye:\n self.eye_area = self.eye_area.to(self.device)\n\n # load intrinsics\n \n fl_x = fl_y = transform['focal_len']\n\n cx = (transform['cx'] / downscale)\n cy = (transform['cy'] / downscale)\n\n self.intrinsics = np.array([fl_x, fl_y, cx, cy])\n\n # directly build the coordinate meshgrid in [-1, 1]^2\n self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]\n \n def mirror_index(self, index):\n size = self.poses.shape[0]\n turn = index // size\n res = index % size\n if turn % 2 == 0:\n return res\n else:\n return size - res - 1\n\n def collate(self, index):\n\n B = len(index) # a list of length 1\n # assert B == 1\n\n results = {}\n\n # audio use the original index\n if self.auds is not None:\n if self.opt.cond_type == 'idexp':\n auds = get_audio_features(self.auds, self.opt.att, index[0], smooth_win_size=5).to(self.device)\n else:\n auds = get_audio_features(self.auds, self.opt.att, index[0]).to(self.device)\n \n results['auds'] = auds\n\n # head pose and bg image may mirror (replay --> <-- --> <--).\n index[0] = self.mirror_index(index[0])\n\n poses = self.poses[index].to(self.device) # [B, 4, 4]\n \n rays = get_rays(poses, self.intrinsics, self.H, self.W, self.num_rays, self.opt.patch_size)\n\n results['index'] = index # for ind. code\n results['H'] = self.H\n results['W'] = self.W\n results['rays_o'] = rays['rays_o']\n results['rays_d'] = rays['rays_d']\n\n if self.opt.exp_eye:\n results['eye'] = self.eye_area[index].to(self.device) # [1]\n else:\n results['eye'] = None\n\n bg_img = self.bg_img.view(1, -1, 3).repeat(B, 1, 1).to(self.device)\n\n results['bg_color'] = bg_img\n\n bg_coords = self.bg_coords # [1, N, 2]\n results['bg_coords'] = bg_coords\n\n results['poses'] = convert_poses(poses) # [B, 6]\n results['poses_matrix'] = poses # [B, 4, 4]\n \n return results\n\n def dataloader(self):\n\n \n # test with novel auds, then use its length\n if self.auds is not None:\n size = self.auds.shape[0]\n # live stream test, use 2 * len(poses), so it naturally mirrors.\n else:\n size = 2 * self.poses.shape[0]\n\n loader = DataLoader(list(range(size)), batch_size=1, collate_fn=self.collate, shuffle=False, num_workers=0)\n loader._data = self # an ugly fix... we need poses in trainer.\n\n # do evaluate if has gt images and use self-driven setting\n loader.has_gt = False\n\n return loader"
},
{
"identifier": "NeRFGUI",
"path": "nerf/gui.py",
"snippet": "class NeRFGUI:\n def __init__(self, opt, trainer, data_loader, debug=True):\n self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.\n self.W = opt.W\n self.H = opt.H\n self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)\n self.debug = debug\n self.training = False\n self.step = 0 # training step \n\n self.trainer = trainer\n self.data_loader = data_loader\n\n # override with dataloader's intrinsics\n self.W = data_loader._data.W\n self.H = data_loader._data.H\n self.cam.update_intrinsics(data_loader._data.intrinsics)\n\n # use dataloader's pose\n pose_init = data_loader._data.poses[0]\n self.cam.update_pose(pose_init.detach().cpu().numpy())\n\n # use dataloader's bg\n bg_img = data_loader._data.bg_img #.view(1, -1, 3)\n if self.H != bg_img.shape[0] or self.W != bg_img.shape[1]:\n bg_img = F.interpolate(bg_img.permute(2, 0, 1).unsqueeze(0).contiguous(), (self.H, self.W), mode='bilinear').squeeze(0).permute(1, 2, 0).contiguous()\n self.bg_color = bg_img.view(1, -1, 3)\n\n # audio features (from dataloader, only used in non-playing mode)\n self.audio_features = data_loader._data.auds # [N, 29, 16]\n self.audio_idx = 0\n\n # control eye\n self.eye_area = None if not self.opt.exp_eye else data_loader._data.eye_area.mean().item()\n\n # playing seq from dataloader, or pause.\n self.playing = False\n self.loader = iter(data_loader)\n\n self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32)\n self.need_update = True # camera moved, should reset accumulation\n self.spp = 1 # sample per pixel\n self.mode = 'image' # choose from ['image', 'depth']\n\n self.dynamic_resolution = False # assert False!\n self.downscale = 1\n self.train_steps = 16\n\n self.ind_index = 0\n self.ind_num = trainer.model.individual_codes.shape[0]\n\n # build asr\n if self.opt.asr:\n self.asr = ASR(opt)\n \n dpg.create_context()\n self.register_dpg()\n self.test_step()\n \n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.opt.asr:\n self.asr.stop() \n dpg.destroy_context()\n\n def train_step(self):\n\n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n outputs = self.trainer.train_gui(self.data_loader, step=self.train_steps)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n self.step += self.train_steps\n self.need_update = True\n\n dpg.set_value(\"_log_train_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_train_log\", f'step = {self.step: 5d} (+{self.train_steps: 2d}), loss = {outputs[\"loss\"]:.4f}, lr = {outputs[\"lr\"]:.5f}')\n\n # dynamic train steps\n # max allowed train time per-frame is 500 ms\n full_t = t / self.train_steps * 16\n train_steps = min(16, max(4, int(16 * 500 / full_t)))\n if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8:\n self.train_steps = train_steps\n\n def prepare_buffer(self, outputs):\n if self.mode == 'image':\n return outputs['image']\n else:\n return np.expand_dims(outputs['depth'], -1).repeat(3, -1)\n\n def test_step(self):\n\n if self.need_update or self.spp < self.opt.max_spp:\n \n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n if self.playing:\n try:\n data = next(self.loader)\n except StopIteration:\n self.loader = iter(self.data_loader)\n data = next(self.loader)\n \n if self.opt.asr:\n # use the live audio stream\n data['auds'] = self.asr.get_next_feat()\n\n outputs = self.trainer.test_gui_with_data(data, self.W, self.H)\n\n # sync local camera pose\n self.cam.update_pose(data['poses_matrix'][0].detach().cpu().numpy())\n \n else:\n if self.audio_features is not None:\n auds = get_audio_features(self.audio_features, self.opt.att, self.audio_idx)\n else:\n auds = None\n outputs = self.trainer.test_gui(self.cam.pose, self.cam.intrinsics, self.W, self.H, auds, self.eye_area, self.ind_index, self.bg_color, self.spp, self.downscale)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n # update dynamic resolution\n if self.dynamic_resolution:\n # max allowed infer time per-frame is 200 ms\n full_t = t / (self.downscale ** 2)\n downscale = min(1, max(1/4, math.sqrt(200 / full_t)))\n if downscale > self.downscale * 1.2 or downscale < self.downscale * 0.8:\n self.downscale = downscale\n\n if self.need_update:\n self.render_buffer = self.prepare_buffer(outputs)\n self.spp = 1\n self.need_update = False\n else:\n self.render_buffer = (self.render_buffer * self.spp + self.prepare_buffer(outputs)) / (self.spp + 1)\n self.spp += 1\n \n if self.playing:\n self.need_update = True\n\n dpg.set_value(\"_log_infer_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_resolution\", f'{int(self.downscale * self.W)}x{int(self.downscale * self.H)}')\n dpg.set_value(\"_log_spp\", self.spp)\n dpg.set_value(\"_texture\", self.render_buffer)\n\n \n def register_dpg(self):\n\n ### register texture \n\n with dpg.texture_registry(show=False):\n dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag=\"_texture\")\n\n ### register window\n\n # the rendered image, as the primary window\n with dpg.window(tag=\"_primary_window\", width=self.W, height=self.H):\n\n # add the texture\n dpg.add_image(\"_texture\")\n\n # dpg.set_primary_window(\"_primary_window\", True)\n\n dpg.show_tool(dpg.mvTool_Metrics)\n\n # control window\n with dpg.window(label=\"Control\", tag=\"_control_window\", width=400, height=300):\n\n # button theme\n with dpg.theme() as theme_button:\n with dpg.theme_component(dpg.mvButton):\n dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))\n dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)\n\n # time\n if not self.opt.test:\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train time: \")\n dpg.add_text(\"no data\", tag=\"_log_train_time\") \n\n with dpg.group(horizontal=True):\n dpg.add_text(\"Infer time: \")\n dpg.add_text(\"no data\", tag=\"_log_infer_time\")\n \n with dpg.group(horizontal=True):\n dpg.add_text(\"SPP: \")\n dpg.add_text(\"1\", tag=\"_log_spp\")\n\n # train button\n if not self.opt.test:\n with dpg.collapsing_header(label=\"Train\", default_open=True):\n\n # train / stop\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train: \")\n\n def callback_train(sender, app_data):\n if self.training:\n self.training = False\n dpg.configure_item(\"_button_train\", label=\"start\")\n else:\n self.training = True\n dpg.configure_item(\"_button_train\", label=\"stop\")\n\n dpg.add_button(label=\"start\", tag=\"_button_train\", callback=callback_train)\n dpg.bind_item_theme(\"_button_train\", theme_button)\n\n def callback_reset(sender, app_data):\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n self.trainer.model.apply(fn=weight_reset)\n self.trainer.model.reset_extra_state() # for cuda_ray density_grid and step_counter\n self.need_update = True\n\n dpg.add_button(label=\"reset\", tag=\"_button_reset\", callback=callback_reset)\n dpg.bind_item_theme(\"_button_reset\", theme_button)\n\n # save ckpt\n with dpg.group(horizontal=True):\n dpg.add_text(\"Checkpoint: \")\n\n def callback_save(sender, app_data):\n self.trainer.save_checkpoint(full=True, best=False)\n dpg.set_value(\"_log_ckpt\", \"saved \" + os.path.basename(self.trainer.stats[\"checkpoints\"][-1]))\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"save\", tag=\"_button_save\", callback=callback_save)\n dpg.bind_item_theme(\"_button_save\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_ckpt\")\n \n # save mesh\n with dpg.group(horizontal=True):\n dpg.add_text(\"Marching Cubes: \")\n\n def callback_mesh(sender, app_data):\n self.trainer.save_mesh(resolution=256, threshold=10)\n dpg.set_value(\"_log_mesh\", \"saved \" + f'{self.trainer.name}_{self.trainer.epoch}.ply')\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"mesh\", tag=\"_button_mesh\", callback=callback_mesh)\n dpg.bind_item_theme(\"_button_mesh\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_mesh\")\n\n with dpg.group(horizontal=True):\n dpg.add_text(\"\", tag=\"_log_train_log\")\n\n \n # rendering options\n with dpg.collapsing_header(label=\"Options\", default_open=True):\n \n # playing\n with dpg.group(horizontal=True):\n dpg.add_text(\"Play: \")\n\n def callback_play(sender, app_data):\n \n if self.playing:\n self.playing = False\n dpg.configure_item(\"_button_play\", label=\"start\")\n else:\n self.playing = True\n dpg.configure_item(\"_button_play\", label=\"stop\")\n if self.opt.asr:\n self.asr.warm_up()\n self.need_update = True\n\n dpg.add_button(label=\"start\", tag=\"_button_play\", callback=callback_play)\n dpg.bind_item_theme(\"_button_play\", theme_button)\n\n # set asr\n if self.opt.asr:\n\n # clear queue button\n def callback_clear_queue(sender, app_data):\n \n self.asr.clear_queue()\n self.need_update = True\n\n dpg.add_button(label=\"clear\", tag=\"_button_clear_queue\", callback=callback_clear_queue)\n dpg.bind_item_theme(\"_button_clear_queue\", theme_button)\n\n # dynamic rendering resolution\n with dpg.group(horizontal=True):\n\n def callback_set_dynamic_resolution(sender, app_data):\n if self.dynamic_resolution:\n self.dynamic_resolution = False\n self.downscale = 1\n else:\n self.dynamic_resolution = True\n self.need_update = True\n\n # Disable dynamic resolution for face.\n # dpg.add_checkbox(label=\"dynamic resolution\", default_value=self.dynamic_resolution, callback=callback_set_dynamic_resolution)\n dpg.add_text(f\"{self.W}x{self.H}\", tag=\"_log_resolution\")\n\n # mode combo\n def callback_change_mode(sender, app_data):\n self.mode = app_data\n self.need_update = True\n \n dpg.add_combo(('image', 'depth'), label='mode', default_value=self.mode, callback=callback_change_mode)\n\n\n # bg_color picker\n def callback_change_bg(sender, app_data):\n self.bg_color = torch.tensor(app_data[:3], dtype=torch.float32) # only need RGB in [0, 1]\n self.need_update = True\n\n dpg.add_color_edit((255, 255, 255), label=\"Background Color\", width=200, tag=\"_color_editor\", no_alpha=True, callback=callback_change_bg)\n\n # audio index slider\n if not self.opt.asr:\n def callback_set_audio_index(sender, app_data):\n self.audio_idx = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Audio\", min_value=0, max_value=self.audio_features.shape[0] - 1, format=\"%d\", default_value=self.audio_idx, callback=callback_set_audio_index)\n\n # ind code index slider\n if self.opt.ind_dim > 0:\n def callback_set_individual_code(sender, app_data):\n self.ind_index = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Individual\", min_value=0, max_value=self.ind_num - 1, format=\"%d\", default_value=self.ind_index, callback=callback_set_individual_code)\n\n # eye area slider\n if self.opt.exp_eye:\n def callback_set_eye(sender, app_data):\n self.eye_area = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"eye area\", min_value=0, max_value=0.5, format=\"%.2f percent\", default_value=self.eye_area, callback=callback_set_eye)\n\n # fov slider\n def callback_set_fovy(sender, app_data):\n self.cam.fovy = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"FoV (vertical)\", min_value=1, max_value=120, format=\"%d deg\", default_value=self.cam.fovy, callback=callback_set_fovy)\n\n # dt_gamma slider\n def callback_set_dt_gamma(sender, app_data):\n self.opt.dt_gamma = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"dt_gamma\", min_value=0, max_value=0.1, format=\"%.5f\", default_value=self.opt.dt_gamma, callback=callback_set_dt_gamma)\n\n # max_steps slider\n def callback_set_max_steps(sender, app_data):\n self.opt.max_steps = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"max steps\", min_value=1, max_value=1024, format=\"%d\", default_value=self.opt.max_steps, callback=callback_set_max_steps)\n\n # aabb slider\n def callback_set_aabb(sender, app_data, user_data):\n # user_data is the dimension for aabb (xmin, ymin, zmin, xmax, ymax, zmax)\n self.trainer.model.aabb_infer[user_data] = app_data\n\n # also change train aabb ? [better not...]\n #self.trainer.model.aabb_train[user_data] = app_data\n\n self.need_update = True\n\n dpg.add_separator()\n dpg.add_text(\"Axis-aligned bounding box:\")\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"x\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=0)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=3)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"y\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=1)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=4)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"z\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=2)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=5)\n \n\n # debug info\n if self.debug:\n with dpg.collapsing_header(label=\"Debug\"):\n # pose\n dpg.add_separator()\n dpg.add_text(\"Camera Pose:\")\n dpg.add_text(str(self.cam.pose), tag=\"_log_pose\")\n\n\n ### register camera handler\n\n def callback_camera_drag_rotate(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.orbit(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_wheel_scale(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n delta = app_data\n\n self.cam.scale(delta)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_drag_pan(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.pan(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n with dpg.handler_registry():\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=callback_camera_drag_rotate)\n dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan)\n\n \n dpg.create_viewport(title='RAD-NeRF', width=1080, height=720, resizable=True)\n\n ### global theme\n with dpg.theme() as theme_no_padding:\n with dpg.theme_component(dpg.mvAll):\n # set all padding to 0 to avoid scroll bar\n dpg.add_theme_style(dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core)\n \n dpg.bind_item_theme(\"_primary_window\", theme_no_padding)\n\n dpg.setup_dearpygui()\n\n #dpg.show_metrics()\n\n dpg.show_viewport()\n\n\n def render(self):\n\n while dpg.is_dearpygui_running():\n # update texture every frame\n if self.training:\n self.train_step()\n # audio stream thread...\n if self.opt.asr and self.playing:\n # run 2 ASR steps (audio is at 50FPS, video is at 25FPS)\n for _ in range(2):\n self.asr.run_step()\n self.test_step()\n dpg.render_dearpygui_frame()"
}
] | import torch
import argparse
from nerf.provider import NeRFDataset_Test
from nerf.gui import NeRFGUI
from nerf.utils import *
from nerf.network import NeRFNetwork, R2TalkerNeRF, GeneNeRFNetwork | 10,752 |
### network backbone options
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
parser.add_argument('--lambda_amb', type=float, default=0.1, help="lambda for ambient loss")
parser.add_argument('--fbg', action='store_true', help="frame-wise bg")
parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes")
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye")
parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence")
parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform")
### dataset options
parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)")
# parser.add_argument('--preload', action='store_true', help="preload all data into GPU, accelerate training but use more GPU memory")
# (the default value is for the fox dataset)
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.")
parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3")
parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location")
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)")
parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)")
parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable")
parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region")
parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...")
parser.add_argument('--torso', action='store_true', help="fix head and train torso")
parser.add_argument('--head_ckpt', type=str, default='', help="head model")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=450, help="GUI width")
parser.add_argument('--H', type=int, default=450, help="GUI height")
parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
### else
parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)")
parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits")
parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off")
parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size")
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off")
parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension")
parser.add_argument('--part', action='store_true', help="use partial training data (1/10)")
parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)")
parser.add_argument('--train_camera', action='store_true', help="optimize camera pose")
parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size")
parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size")
# asr
parser.add_argument('--asr', action='store_true', help="load asr for real-time app")
parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input")
parser.add_argument('--asr_play', action='store_true', help="play out the audio")
parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto')
# parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self')
parser.add_argument('--asr_save_feats', action='store_true')
# audio FPS
parser.add_argument('--fps', type=int, default=50)
# sliding window left-middle-right length (unit: 20ms)
parser.add_argument('-l', type=int, default=10)
parser.add_argument('-m', type=int, default=50)
parser.add_argument('-r', type=int, default=10)
opt = parser.parse_args()
if opt.method == 'r2talker':
opt.cond_type = 'idexp'
elif opt.method == 'genefaceDagger':
opt.cond_type = 'idexp'
elif opt.method == 'rad-nerf':
opt.cond_type = 'eo'
# assert test mode
opt.test = True
opt.test_train = False
# explicit smoothing
opt.smooth_path = True
opt.smooth_eye = True
opt.smooth_lips = True
assert opt.pose != '', 'Must provide a pose source'
assert opt.aud != '', 'Must provide an audio source'
if opt.O:
opt.fp16 = True
opt.exp_eye = True
opt.cuda_ray = True
# assert opt.cuda_ray, "Only support CUDA ray mode."
print(opt)
seed_everything(opt.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if opt.method == 'r2talker':
model = R2TalkerNeRF(opt)
elif opt.method == 'genefaceDagger':
model = GeneNeRFNetwork(opt)
elif opt.method == 'rad-nerf':
model = NeRFNetwork(opt)
# print(model)
trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, fp16=opt.fp16, metrics=[], use_checkpoint=opt.ckpt)
|
# torch.autograd.set_detect_anomaly(True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pose', type=str, help="transforms.json, pose source")
parser.add_argument('--aud', type=str, default=None, help="aud.npy, audio source")
parser.add_argument('--cond_type', type=str, default=None, help="type of driving condition: eo, ds, idexp")
parser.add_argument('--method', type=str, default='r2talker', help="r2talker, genefaceDagger, rad-nerf")
parser.add_argument('--bg_img', type=str, default='white', help="bg.jpg, background image source")
parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye")
# parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)")
# parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)")
parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use")
parser.add_argument('--workspace', type=str, default='workspace')
parser.add_argument('--seed', type=int, default=0)
### training options
# parser.add_argument('--iters', type=int, default=200000, help="training iters")
# parser.add_argument('--lr', type=float, default=5e-3, help="initial learning rate")
# parser.add_argument('--lr_net', type=float, default=5e-4, help="initial learning rate")
parser.add_argument('--ckpt', type=str, default='latest')
parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")
### network backbone options
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
parser.add_argument('--lambda_amb', type=float, default=0.1, help="lambda for ambient loss")
parser.add_argument('--fbg', action='store_true', help="frame-wise bg")
parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes")
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye")
parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence")
parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform")
### dataset options
parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)")
# parser.add_argument('--preload', action='store_true', help="preload all data into GPU, accelerate training but use more GPU memory")
# (the default value is for the fox dataset)
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.")
parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3")
parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location")
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)")
parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)")
parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable")
parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region")
parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...")
parser.add_argument('--torso', action='store_true', help="fix head and train torso")
parser.add_argument('--head_ckpt', type=str, default='', help="head model")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=450, help="GUI width")
parser.add_argument('--H', type=int, default=450, help="GUI height")
parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
### else
parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)")
parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits")
parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off")
parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size")
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off")
parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension")
parser.add_argument('--part', action='store_true', help="use partial training data (1/10)")
parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)")
parser.add_argument('--train_camera', action='store_true', help="optimize camera pose")
parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size")
parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size")
# asr
parser.add_argument('--asr', action='store_true', help="load asr for real-time app")
parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input")
parser.add_argument('--asr_play', action='store_true', help="play out the audio")
parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto')
# parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self')
parser.add_argument('--asr_save_feats', action='store_true')
# audio FPS
parser.add_argument('--fps', type=int, default=50)
# sliding window left-middle-right length (unit: 20ms)
parser.add_argument('-l', type=int, default=10)
parser.add_argument('-m', type=int, default=50)
parser.add_argument('-r', type=int, default=10)
opt = parser.parse_args()
if opt.method == 'r2talker':
opt.cond_type = 'idexp'
elif opt.method == 'genefaceDagger':
opt.cond_type = 'idexp'
elif opt.method == 'rad-nerf':
opt.cond_type = 'eo'
# assert test mode
opt.test = True
opt.test_train = False
# explicit smoothing
opt.smooth_path = True
opt.smooth_eye = True
opt.smooth_lips = True
assert opt.pose != '', 'Must provide a pose source'
assert opt.aud != '', 'Must provide an audio source'
if opt.O:
opt.fp16 = True
opt.exp_eye = True
opt.cuda_ray = True
# assert opt.cuda_ray, "Only support CUDA ray mode."
print(opt)
seed_everything(opt.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if opt.method == 'r2talker':
model = R2TalkerNeRF(opt)
elif opt.method == 'genefaceDagger':
model = GeneNeRFNetwork(opt)
elif opt.method == 'rad-nerf':
model = NeRFNetwork(opt)
# print(model)
trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, fp16=opt.fp16, metrics=[], use_checkpoint=opt.ckpt)
| test_loader = NeRFDataset_Test(opt, device=device).dataloader() | 0 | 2023-12-04 12:51:59+00:00 | 12k |
DSaurus/threestudio-4dfy | data/uncond_multiview_time.py | [
{
"identifier": "RandomCameraDataModuleConfig",
"path": "data/uncond_time.py",
"snippet": "class RandomCameraDataModuleConfig:\n # height, width, and batch_size should be Union[int, List[int]]\n # but OmegaConf does not support Union of containers\n height: Any = 64\n width: Any = 64\n batch_size: Any = 1\n resolution_milestones: List[int] = field(default_factory=lambda: [])\n eval_height: int = 512\n eval_width: int = 512\n eval_batch_size: int = 1\n n_val_views: int = 1\n n_test_views: int = 120\n elevation_range: Tuple[float, float] = (-10, 90)\n azimuth_range: Tuple[float, float] = (-180, 180)\n camera_distance_range: Tuple[float, float] = (1, 1.5)\n fovy_range: Tuple[float, float] = (\n 40,\n 70,\n ) # in degrees, in vertical direction (along height)\n camera_perturb: float = 0.1\n center_perturb: float = 0.2\n up_perturb: float = 0.02\n light_position_perturb: float = 1.0\n light_distance_range: Tuple[float, float] = (0.8, 1.5)\n eval_elevation_deg: float = 15.0\n eval_camera_distance: float = 1.5\n eval_fovy_deg: float = 70.0\n light_sample_strategy: str = \"dreamfusion\"\n batch_uniform_azimuth: bool = True\n progressive_until: int = 0 # progressive ranges for elevation, azimuth, r, fovy\n\n rays_d_normalize: bool = True\n\n # Dynamic\n static: bool = True\n num_frames: int = 1\n sample_rand_frames: Optional[str] = None\n # Simultaneous training\n simultan: bool = False\n prob_single_view_video: Optional[float] = None\n width_vid: int = 64\n height_vid: int = 64\n num_frames_factor: int = 1\n train_dynamic_camera: Optional[bool] = False\n num_test_loop_factor: int = 1\n num_test_loop_static: int = 4\n test_traj: Optional[str] = None\n\n update_from_json: bool = True"
},
{
"identifier": "RandomCameraDataset",
"path": "data/uncond_time.py",
"snippet": "class RandomCameraDataset(Dataset):\n def __init__(self, cfg: Any, split: str) -> None:\n super().__init__()\n self.cfg: RandomCameraDataModuleConfig = cfg\n self.split = split\n\n if split == \"val\":\n self.n_views = self.cfg.n_val_views\n else:\n self.n_views = self.cfg.n_test_views\n\n num_frames = self.cfg.num_frames\n if self.cfg.static:\n n_views_azimuth = self.n_views\n else:\n if split in [\"val\", \"test\"] and not self.cfg.static:\n num_frames = num_frames * self.cfg.num_frames_factor\n if self.split == \"test\":\n if self.cfg.test_traj in [\"motion_smooth\", \"motion_smooth_full\"]:\n self.n_views = num_frames * 2\n n_views_azimuth = self.n_views // 8\n if self.cfg.test_traj == \"motion_smooth\":\n self.n_views = self.n_views - 4\n num_frames_static = n_views_azimuth\n else:\n n_views_azimuth = num_frames * self.cfg.num_test_loop_factor\n self.n_views = 2 * n_views_azimuth * self.cfg.num_test_loop_static\n num_frames_static = num_frames\n elif self.split == \"val\":\n n_views_azimuth = self.n_views\n\n azimuth_deg: Float[Tensor, \"B\"]\n if self.split == \"val\":\n # make sure the first and last view are not the same\n azimuth_deg = torch.linspace(0, 360.0, n_views_azimuth + 1)[\n :n_views_azimuth\n ]\n # else:\n # azimuth_deg = torch.linspace(0, 360.0, n_views_azimuth\n # )\n elif self.split == \"test\":\n if self.cfg.static:\n azimuth_deg = torch.linspace(0, 360.0, n_views_azimuth)\n else:\n assert n_views_azimuth % self.cfg.num_test_loop_static == 0\n azimuth_deg = []\n for i in range(self.cfg.num_test_loop_static):\n if self.cfg.test_traj in [\"motion_smooth\", \"motion_smooth_full\"]:\n azimuth_start = (self.cfg.num_test_loop_static - i) * 90.0\n azimuth_end = (self.cfg.num_test_loop_static - i - 1) * 90.0\n else:\n azimuth_start = i * 90.0\n azimuth_end = (i + 1) * 90.0\n azimuth_static_deg_i = torch.full(\n (num_frames_static,), azimuth_start\n )\n azimuth_deg.append(azimuth_static_deg_i)\n azimuth_dynamic_deg_i = torch.linspace(\n azimuth_start, azimuth_end, n_views_azimuth\n )\n if self.cfg.test_traj == \"motion_smooth\":\n azimuth_dynamic_deg_i = azimuth_dynamic_deg_i[1:]\n azimuth_deg.append(azimuth_dynamic_deg_i)\n azimuth_deg = torch.cat(azimuth_deg)\n elevation_deg: Float[Tensor, \"B\"] = torch.full_like(\n azimuth_deg, self.cfg.eval_elevation_deg\n )\n camera_distances: Float[Tensor, \"B\"] = torch.full_like(\n elevation_deg, self.cfg.eval_camera_distance\n )\n\n elevation = elevation_deg * math.pi / 180\n azimuth = azimuth_deg * math.pi / 180\n\n # convert spherical coordinates to cartesian coordinates\n # right hand coordinate system, x back, y right, z up\n # elevation in (-90, 90), azimuth from +x to +y in (-180, 180)\n camera_positions: Float[Tensor, \"B 3\"] = torch.stack(\n [\n camera_distances * torch.cos(elevation) * torch.cos(azimuth),\n camera_distances * torch.cos(elevation) * torch.sin(azimuth),\n camera_distances * torch.sin(elevation),\n ],\n dim=-1,\n )\n\n # default scene center at origin\n center: Float[Tensor, \"B 3\"] = torch.zeros_like(camera_positions)\n # default camera up direction as +z\n up: Float[Tensor, \"B 3\"] = torch.as_tensor([0, 0, 1], dtype=torch.float32)[\n None, :\n ].repeat(self.cfg.eval_batch_size, 1)\n\n fovy_deg: Float[Tensor, \"B\"] = torch.full_like(\n elevation_deg, self.cfg.eval_fovy_deg\n )\n fovy = fovy_deg * math.pi / 180\n light_positions: Float[Tensor, \"B 3\"] = camera_positions\n\n lookat: Float[Tensor, \"B 3\"] = F.normalize(center - camera_positions, dim=-1)\n right: Float[Tensor, \"B 3\"] = F.normalize(torch.cross(lookat, up), dim=-1)\n up = F.normalize(torch.cross(right, lookat), dim=-1)\n c2w3x4: Float[Tensor, \"B 3 4\"] = torch.cat(\n [torch.stack([right, up, -lookat], dim=-1), camera_positions[:, :, None]],\n dim=-1,\n )\n c2w: Float[Tensor, \"B 4 4\"] = torch.cat(\n [c2w3x4, torch.zeros_like(c2w3x4[:, :1])], dim=1\n )\n c2w[:, 3, 3] = 1.0\n\n # get directions by dividing directions_unit_focal by focal length\n focal_length: Float[Tensor, \"B\"] = (\n 0.5 * self.cfg.eval_height / torch.tan(0.5 * fovy)\n )\n directions_unit_focal = get_ray_directions(\n H=self.cfg.eval_height, W=self.cfg.eval_width, focal=1.0\n )\n directions: Float[Tensor, \"B H W 3\"] = directions_unit_focal[\n None, :, :, :\n ].repeat(self.n_views, 1, 1, 1)\n directions[:, :, :, :2] = (\n directions[:, :, :, :2] / focal_length[:, None, None, None]\n )\n\n rays_o, rays_d = get_rays(\n directions, c2w, keepdim=True, normalize=self.cfg.rays_d_normalize\n )\n proj_mtx: Float[Tensor, \"B 4 4\"] = get_projection_matrix(\n fovy, self.cfg.eval_width / self.cfg.eval_height, 0.01, 100.0\n ) # FIXME: hard-coded near and far\n mvp_mtx: Float[Tensor, \"B 4 4\"] = get_mvp_matrix(c2w, proj_mtx)\n\n self.rays_o, self.rays_d = rays_o, rays_d\n self.mvp_mtx = mvp_mtx\n self.c2w = c2w\n self.camera_positions = camera_positions\n self.light_positions = light_positions\n self.elevation, self.azimuth = elevation, azimuth\n self.elevation_deg, self.azimuth_deg = elevation_deg, azimuth_deg\n self.camera_distances = camera_distances\n self.fovy = fovy\n self.proj_mtx = proj_mtx\n\n if self.cfg.test_traj in [\"motion_smooth\", \"motion_smooth_full\"]:\n frame_times = torch.linspace(0, 1.0, num_frames)\n if self.cfg.test_traj == \"motion_smooth\":\n frame_times = frame_times[: num_frames - 2]\n else:\n frame_times = frame_times[:num_frames]\n frame_times = frame_times.repeat(math.ceil(self.n_views / num_frames))\n else:\n frame_times = torch.linspace(0, 1.0, num_frames).repeat(\n math.ceil(self.n_views / num_frames)\n )\n frame_times = frame_times[: self.n_views]\n frame_times_video = torch.linspace(0, 1.0, num_frames)\n self.frame_times = frame_times\n self.frame_times_video = frame_times_video\n\n def __len__(self):\n return self.n_views\n\n def __getitem__(self, index):\n return {\n \"index\": index,\n \"rays_o\": self.rays_o[index],\n \"rays_d\": self.rays_d[index],\n \"mvp_mtx\": self.mvp_mtx[index],\n \"c2w\": self.c2w[index],\n \"camera_positions\": self.camera_positions[index],\n \"light_positions\": self.light_positions[index],\n \"elevation\": self.elevation_deg[index],\n \"azimuth\": self.azimuth_deg[index],\n \"camera_distances\": self.camera_distances[index],\n \"height\": self.cfg.eval_height,\n \"width\": self.cfg.eval_width,\n \"fovy\": self.fovy[index],\n \"proj_mtx\": self.proj_mtx[index],\n \"frame_times\": self.frame_times[[index]],\n \"frame_times_video\": self.frame_times_video,\n \"train_dynamic_camera\": False,\n }\n\n def collate(self, batch):\n batch = torch.utils.data.default_collate(batch)\n batch.update({\"height\": self.cfg.eval_height, \"width\": self.cfg.eval_width})\n return batch"
},
{
"identifier": "RandomCameraIterableDataset",
"path": "data/uncond_time.py",
"snippet": "class RandomCameraIterableDataset(IterableDataset, Updateable):\n def __init__(self, cfg: Any) -> None:\n super().__init__()\n self.cfg: RandomCameraDataModuleConfig = cfg\n self.heights: List[int] = (\n [self.cfg.height] if isinstance(self.cfg.height, int) else self.cfg.height\n )\n self.widths: List[int] = (\n [self.cfg.width] if isinstance(self.cfg.width, int) else self.cfg.width\n )\n self.batch_sizes: List[int] = (\n [self.cfg.batch_size]\n if isinstance(self.cfg.batch_size, int)\n else self.cfg.batch_size\n )\n assert len(self.heights) == len(self.widths) == len(self.batch_sizes)\n self.resolution_milestones: List[int]\n if (\n len(self.heights) == 1\n and len(self.widths) == 1\n and len(self.batch_sizes) == 1\n ):\n if len(self.cfg.resolution_milestones) > 0:\n threestudio.warn(\n \"Ignoring resolution_milestones since height and width are not changing\"\n )\n self.resolution_milestones = [-1]\n else:\n assert len(self.heights) == len(self.cfg.resolution_milestones) + 1\n self.resolution_milestones = [-1] + self.cfg.resolution_milestones\n\n self.directions_unit_focals = [\n get_ray_directions(H=height, W=width, focal=1.0)\n for (height, width) in zip(self.heights, self.widths)\n ]\n self.height: int = self.heights[0]\n self.width: int = self.widths[0]\n self.batch_size: int = self.batch_sizes[0]\n self.directions_unit_focal = self.directions_unit_focals[0]\n self.elevation_range = self.cfg.elevation_range\n self.azimuth_range = self.cfg.azimuth_range\n self.camera_distance_range = self.cfg.camera_distance_range\n self.fovy_range = self.cfg.fovy_range\n self.simultan_idx = 0\n if self.cfg.simultan:\n self.directions_unit_focals_vid = get_ray_directions(\n H=self.cfg.height_vid, W=self.cfg.width_vid, focal=1.0\n )\n self.height_vid = self.cfg.height_vid\n self.width_vid = self.cfg.width_vid\n else:\n self.directions_unit_focals_vid = None\n self.height_vid = None\n self.width_vid = None\n if self.cfg.train_dynamic_camera:\n self.elevation_range_delta = (\n -180.0 / (16 * self.cfg.num_frames),\n 180.0 / (16 * self.cfg.num_frames),\n )\n self.azimuth_range_delta = (\n -180.0 / (2 * self.cfg.num_frames),\n 180.0 / (2 * self.cfg.num_frames),\n )\n self.camera_distance_range_delta = lambda x: (\n (self.camera_distance_range[0] - x) / self.cfg.num_frames,\n (self.camera_distance_range[1] - x) / self.cfg.num_frames,\n )\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n size_ind = bisect.bisect_right(self.resolution_milestones, global_step) - 1\n self.height = self.heights[size_ind]\n self.width = self.widths[size_ind]\n self.batch_size = self.batch_sizes[size_ind]\n self.directions_unit_focal = self.directions_unit_focals[size_ind]\n threestudio.debug(\n f\"Training height: {self.height}, width: {self.width}, batch_size: {self.batch_size}\"\n )\n # progressive view\n self.progressive_view(global_step)\n\n def __iter__(self):\n while True:\n yield {}\n\n def progressive_view(self, global_step):\n r = min(1.0, global_step / (self.cfg.progressive_until + 1))\n self.elevation_range = [\n (1 - r) * self.cfg.eval_elevation_deg + r * self.cfg.elevation_range[0],\n (1 - r) * self.cfg.eval_elevation_deg + r * self.cfg.elevation_range[1],\n ]\n self.azimuth_range = [\n (1 - r) * 0.0 + r * self.cfg.azimuth_range[0],\n (1 - r) * 0.0 + r * self.cfg.azimuth_range[1],\n ]\n # self.camera_distance_range = [\n # (1 - r) * self.cfg.eval_camera_distance\n # + r * self.cfg.camera_distance_range[0],\n # (1 - r) * self.cfg.eval_camera_distance\n # + r * self.cfg.camera_distance_range[1],\n # ]\n # self.fovy_range = [\n # (1 - r) * self.cfg.eval_fovy_deg + r * self.cfg.fovy_range[0],\n # (1 - r) * self.cfg.eval_fovy_deg + r * self.cfg.fovy_range[1],\n # ]\n\n def collate(self, batch) -> Dict[str, Any]:\n # Simultaneous training\n if self.cfg.simultan:\n if self.cfg.prob_single_view_video is not None:\n is_video = random.random() < self.cfg.prob_single_view_video\n else:\n is_video = False\n else:\n is_video = False\n if is_video:\n height = self.height_vid\n width = self.width_vid\n is_video = True\n directions_unit_focal = self.directions_unit_focals_vid\n else:\n height = self.height\n width = self.width\n is_video = False\n directions_unit_focal = self.directions_unit_focal\n if self.cfg.simultan:\n self.simultan_idx += 1\n train_dynamic_camera = self.cfg.train_dynamic_camera and is_video\n batch_size = self.batch_size\n num_frames = self.cfg.num_frames\n if train_dynamic_camera:\n batch_factor = num_frames\n else:\n batch_factor = 1\n batch_size = batch_size * batch_factor\n # sample elevation angles\n elevation_deg: Float[Tensor, \"B\"]\n elevation: Float[Tensor, \"B\"]\n if random.random() < 0.5:\n # sample elevation angles uniformly with a probability 0.5 (biased towards poles)\n elevation_deg = (\n torch.rand(self.batch_size).repeat(batch_factor)\n * (self.cfg.elevation_range[1] - self.cfg.elevation_range[0])\n + self.cfg.elevation_range[0]\n )\n elevation = elevation_deg * math.pi / 180\n else:\n # otherwise sample uniformly on sphere\n elevation_range_percent = [\n self.elevation_range[0] / 180.0 * math.pi,\n self.elevation_range[1] / 180.0 * math.pi,\n ]\n # inverse transform sampling\n elevation = torch.asin(\n (\n torch.rand(self.batch_size).repeat(batch_factor)\n * (\n math.sin(elevation_range_percent[1])\n - math.sin(elevation_range_percent[0])\n )\n + math.sin(elevation_range_percent[0])\n )\n )\n elevation_deg = elevation / math.pi * 180.0\n if train_dynamic_camera:\n elevation_delta_deg = (\n torch.rand(self.batch_size)\n * (self.elevation_range_delta[1] - self.elevation_range_delta[0])\n + self.elevation_range_delta[0]\n ) * torch.arange(num_frames)\n elevation_deg = elevation_deg + elevation_delta_deg\n elevation = elevation + elevation_delta_deg * math.pi / 180\n # sample azimuth angles from a uniform distribution bounded by azimuth_range\n azimuth_deg: Float[Tensor, \"B\"]\n if self.cfg.batch_uniform_azimuth:\n assert batch_factor == 1\n # ensures sampled azimuth angles in a batch cover the whole range\n azimuth_deg = (\n torch.rand(self.batch_size) + torch.arange(self.batch_size)\n ) / self.batch_size * (\n self.azimuth_range[1] - self.azimuth_range[0]\n ) + self.azimuth_range[\n 0\n ]\n else:\n # simple random sampling\n azimuth_deg = (\n torch.rand(self.batch_size).repeat(batch_factor)\n * (self.azimuth_range[1] - self.azimuth_range[0])\n + self.azimuth_range[0]\n )\n azimuth = azimuth_deg * math.pi / 180\n if train_dynamic_camera:\n azimuth_delta_deg = (\n torch.rand(self.batch_size)\n * (self.azimuth_range_delta[1] - self.azimuth_range_delta[0])\n + self.azimuth_range_delta[0]\n ) * torch.arange(num_frames)\n azimuth_deg = azimuth_deg + azimuth_delta_deg\n azimuth = azimuth + elevation_delta_deg * math.pi / 180\n\n # sample distances from a uniform distribution bounded by distance_range\n camera_distances: Float[Tensor, \"B\"] = (\n torch.rand(self.batch_size).repeat(batch_factor)\n * (self.camera_distance_range[1] - self.camera_distance_range[0])\n + self.camera_distance_range[0]\n )\n if train_dynamic_camera:\n camera_distance_range_delta = self.camera_distance_range_delta(\n camera_distances[0]\n )\n camera_distance_delta = (\n torch.rand(self.batch_size)\n * (camera_distance_range_delta[1] - camera_distance_range_delta[0])\n + camera_distance_range_delta[0]\n ) * torch.arange(num_frames)\n camera_distances = camera_distances + camera_distance_delta\n\n # convert spherical coordinates to cartesian coordinates\n # right hand coordinate system, x back, y right, z up\n # elevation in (-90, 90), azimuth from +x to +y in (-180, 180)\n camera_positions: Float[Tensor, \"B 3\"] = torch.stack(\n [\n camera_distances * torch.cos(elevation) * torch.cos(azimuth),\n camera_distances * torch.cos(elevation) * torch.sin(azimuth),\n camera_distances * torch.sin(elevation),\n ],\n dim=-1,\n )\n\n # default scene center at origin\n center: Float[Tensor, \"B 3\"] = torch.zeros_like(camera_positions)\n # default camera up direction as +z\n up: Float[Tensor, \"B 3\"] = torch.as_tensor([0, 0, 1], dtype=torch.float32)[\n None, :\n ].repeat(batch_size, 1)\n\n # sample camera perturbations from a uniform distribution [-camera_perturb, camera_perturb]\n camera_perturb: Float[Tensor, \"B 3\"] = (\n torch.rand(self.batch_size, 3) * 2 * self.cfg.camera_perturb\n - self.cfg.camera_perturb\n ).repeat(batch_factor, 1)\n camera_positions = camera_positions + camera_perturb\n # sample center perturbations from a normal distribution with mean 0 and std center_perturb\n center_perturb: Float[Tensor, \"B 3\"] = (\n torch.randn(self.batch_size, 3) * self.cfg.center_perturb\n ).repeat(batch_factor, 1)\n center = center + center_perturb\n # sample up perturbations from a normal distribution with mean 0 and std up_perturb\n up_perturb: Float[Tensor, \"B 3\"] = (\n torch.randn(self.batch_size, 3) * self.cfg.up_perturb\n ).repeat(batch_factor, 1)\n up = up + up_perturb\n\n # sample fovs from a uniform distribution bounded by fov_range\n fovy_deg: Float[Tensor, \"B\"] = (\n torch.rand(self.batch_size) * (self.fovy_range[1] - self.fovy_range[0])\n + self.fovy_range[0]\n ).repeat(batch_factor)\n fovy = fovy_deg * math.pi / 180\n\n # sample light distance from a uniform distribution bounded by light_distance_range\n light_distances: Float[Tensor, \"B\"] = (\n torch.rand(self.batch_size)\n * (self.cfg.light_distance_range[1] - self.cfg.light_distance_range[0])\n + self.cfg.light_distance_range[0]\n ).repeat(batch_factor)\n\n if self.cfg.light_sample_strategy == \"dreamfusion\":\n # sample light direction from a normal distribution with mean camera_position and std light_position_perturb\n light_direction: Float[Tensor, \"B 3\"] = F.normalize(\n camera_positions\n + torch.randn(self.batch_size, 3).repeat(batch_factor, 1)\n * self.cfg.light_position_perturb,\n dim=-1,\n )\n # get light position by scaling light direction by light distance\n light_positions: Float[Tensor, \"B 3\"] = (\n light_direction * light_distances[:, None]\n )\n elif self.cfg.light_sample_strategy == \"magic3d\":\n # sample light direction within restricted angle range (pi/3)\n local_z = F.normalize(camera_positions, dim=-1)\n local_x = F.normalize(\n torch.stack(\n [local_z[:, 1], -local_z[:, 0], torch.zeros_like(local_z[:, 0])],\n dim=-1,\n ),\n dim=-1,\n )\n local_y = F.normalize(torch.cross(local_z, local_x, dim=-1), dim=-1)\n rot = torch.stack([local_x, local_y, local_z], dim=-1)\n light_azimuth = (\n torch.rand(self.batch_size) * math.pi - 2 * math.pi\n ).repeat(\n batch_factor\n ) # [-pi, pi]\n light_elevation = (\n torch.rand(self.batch_size) * math.pi / 3 + math.pi / 6\n ).repeat(\n batch_factor\n ) # [pi/6, pi/2]\n light_positions_local = torch.stack(\n [\n light_distances\n * torch.cos(light_elevation)\n * torch.cos(light_azimuth),\n light_distances\n * torch.cos(light_elevation)\n * torch.sin(light_azimuth),\n light_distances * torch.sin(light_elevation),\n ],\n dim=-1,\n )\n light_positions = (rot @ light_positions_local[:, :, None])[:, :, 0]\n else:\n raise ValueError(\n f\"Unknown light sample strategy: {self.cfg.light_sample_strategy}\"\n )\n\n lookat: Float[Tensor, \"B 3\"] = F.normalize(center - camera_positions, dim=-1)\n right: Float[Tensor, \"B 3\"] = F.normalize(torch.cross(lookat, up), dim=-1)\n up = F.normalize(torch.cross(right, lookat), dim=-1)\n c2w3x4: Float[Tensor, \"B 3 4\"] = torch.cat(\n [torch.stack([right, up, -lookat], dim=-1), camera_positions[:, :, None]],\n dim=-1,\n )\n c2w: Float[Tensor, \"B 4 4\"] = torch.cat(\n [c2w3x4, torch.zeros_like(c2w3x4[:, :1])], dim=1\n )\n c2w[:, 3, 3] = 1.0\n\n # get directions by dividing directions_unit_focal by focal length\n focal_length: Float[Tensor, \"B\"] = 0.5 * height / torch.tan(0.5 * fovy)\n directions: Float[Tensor, \"B H W 3\"] = directions_unit_focal[\n None, :, :, :\n ].repeat(batch_size, 1, 1, 1)\n directions[:, :, :, :2] = (\n directions[:, :, :, :2] / focal_length[:, None, None, None]\n )\n\n # Importance note: the returned rays_d MUST be normalized!\n rays_o, rays_d = get_rays(\n directions, c2w, keepdim=True, normalize=self.cfg.rays_d_normalize\n )\n proj_mtx: Float[Tensor, \"B 4 4\"] = get_projection_matrix(\n fovy, width / height, 0.01, 100.0\n ) # FIXME: hard-coded near and far\n mvp_mtx: Float[Tensor, \"B 4 4\"] = get_mvp_matrix(c2w, proj_mtx)\n\n # Dynamic\n if self.cfg.sample_rand_frames == \"t0\":\n t0 = torch.FloatTensor(1).uniform_(0, 1 / num_frames).item()\n frame_times = torch.linspace(\n t0, t0 + (num_frames - 1) / num_frames, num_frames\n )\n else:\n frame_times = torch.linspace(0.0, 1.0, num_frames)\n return {\n \"rays_o\": rays_o,\n \"rays_d\": rays_d,\n \"mvp_mtx\": mvp_mtx,\n \"camera_positions\": camera_positions,\n \"c2w\": c2w,\n \"light_positions\": light_positions,\n \"elevation\": elevation_deg,\n \"azimuth\": azimuth_deg,\n \"camera_distances\": camera_distances,\n \"height\": height,\n \"width\": width,\n \"fovy\": fovy,\n \"proj_mtx\": proj_mtx,\n \"frame_times\": frame_times,\n \"frame_times_video\": frame_times,\n \"is_video\": is_video,\n \"train_dynamic_camera\": train_dynamic_camera,\n }"
}
] | import math
import os
import random
import cv2
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from dataclasses import dataclass, field
from threestudio import register
from threestudio.utils.base import Updateable
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import get_rank
from threestudio.utils.ops import (
get_mvp_matrix,
get_projection_matrix,
get_ray_directions,
get_rays,
)
from threestudio.utils.typing import *
from torch.utils.data import DataLoader, Dataset, IterableDataset
from .uncond_time import (
RandomCameraDataModuleConfig,
RandomCameraDataset,
RandomCameraIterableDataset,
) | 9,819 | self.cfg.n_view, dim=0
)
* self.cfg.light_position_perturb,
dim=-1,
)
# get light position by scaling light direction by light distance
light_positions: Float[Tensor, "B 3"] = (
light_direction * light_distances[:, None]
)
elif self.cfg.light_sample_strategy == "magic3d":
# sample light direction within restricted angle range (pi/3)
local_z = F.normalize(camera_positions, dim=-1)
local_x = F.normalize(
torch.stack(
[local_z[:, 1], -local_z[:, 0], torch.zeros_like(local_z[:, 0])],
dim=-1,
),
dim=-1,
)
local_y = F.normalize(torch.cross(local_z, local_x, dim=-1), dim=-1)
rot = torch.stack([local_x, local_y, local_z], dim=-1)
light_azimuth = (
torch.rand(real_batch_size) * math.pi - 2 * math.pi
).repeat_interleave(
self.cfg.n_view, dim=0
) # [-pi, pi]
light_elevation = (
torch.rand(real_batch_size) * math.pi / 3 + math.pi / 6
).repeat_interleave(
self.cfg.n_view, dim=0
) # [pi/6, pi/2]
light_positions_local = torch.stack(
[
light_distances
* torch.cos(light_elevation)
* torch.cos(light_azimuth),
light_distances
* torch.cos(light_elevation)
* torch.sin(light_azimuth),
light_distances * torch.sin(light_elevation),
],
dim=-1,
)
light_positions = (rot @ light_positions_local[:, :, None])[:, :, 0]
else:
raise ValueError(
f"Unknown light sample strategy: {self.cfg.light_sample_strategy}"
)
lookat: Float[Tensor, "B 3"] = F.normalize(center - camera_positions, dim=-1)
right: Float[Tensor, "B 3"] = F.normalize(torch.cross(lookat, up), dim=-1)
up = F.normalize(torch.cross(right, lookat), dim=-1)
c2w3x4: Float[Tensor, "B 3 4"] = torch.cat(
[torch.stack([right, up, -lookat], dim=-1), camera_positions[:, :, None]],
dim=-1,
)
c2w: Float[Tensor, "B 4 4"] = torch.cat(
[c2w3x4, torch.zeros_like(c2w3x4[:, :1])], dim=1
)
c2w[:, 3, 3] = 1.0
# get directions by dividing directions_unit_focal by focal length
focal_length: Float[Tensor, "B"] = 0.5 * self.height / torch.tan(0.5 * fovy)
directions: Float[Tensor, "B H W 3"] = self.directions_unit_focal[
None, :, :, :
].repeat(self.batch_size, 1, 1, 1)
directions[:, :, :, :2] = (
directions[:, :, :, :2] / focal_length[:, None, None, None]
)
# Importance note: the returned rays_d MUST be normalized!
rays_o, rays_d = get_rays(
directions, c2w, keepdim=True, normalize=self.cfg.rays_d_normalize
)
proj_mtx: Float[Tensor, "B 4 4"] = get_projection_matrix(
fovy, self.width / self.height, 0.1, 1000.0
) # FIXME: hard-coded near and far
mvp_mtx: Float[Tensor, "B 4 4"] = get_mvp_matrix(c2w, proj_mtx)
if self.cfg.sample_rand_frames == "t0":
t0 = torch.FloatTensor(1).uniform_(0, 1 / self.cfg.num_frames).item()
frame_times = torch.linspace(
t0,
t0 + (self.cfg.num_frames - 1) / self.cfg.num_frames,
self.cfg.num_frames,
)
else:
frame_times = torch.linspace(0.0, 1.0, self.cfg.num_frames)
return {
"rays_o": rays_o,
"rays_d": rays_d,
"mvp_mtx": mvp_mtx,
"camera_positions": camera_positions,
"c2w": c2w,
"light_positions": light_positions,
"elevation": elevation_deg,
"azimuth": azimuth_deg,
"camera_distances": camera_distances,
"height": self.height,
"width": self.width,
"fovy": fovy,
"frame_times": frame_times,
"train_dynamic_camera": False,
}
@register("4dfy-random-multiview-camera-datamodule")
class RandomMultiviewCameraDataModule(pl.LightningDataModule):
cfg: RandomMultiviewCameraDataModuleConfig
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
super().__init__()
self.cfg = parse_structured(RandomMultiviewCameraDataModuleConfig, cfg)
def setup(self, stage=None) -> None:
if stage in [None, "fit"]:
self.train_dataset = RandomMultiviewCameraIterableDataset(self.cfg)
if stage in [None, "fit", "validate"]:
|
@dataclass
class RandomMultiviewCameraDataModuleConfig(RandomCameraDataModuleConfig):
relative_radius: bool = True
n_view: int = 1
zoom_range: Tuple[float, float] = (1.0, 1.0)
rays_d_normalize: bool = True
class RandomMultiviewCameraIterableDataset(RandomCameraIterableDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zoom_range = self.cfg.zoom_range
def collate(self, batch) -> Dict[str, Any]:
assert (
self.batch_size % self.cfg.n_view == 0
), f"batch_size ({self.batch_size}) must be dividable by n_view ({self.cfg.n_view})!"
real_batch_size = self.batch_size // self.cfg.n_view
# sample elevation angles
elevation_deg: Float[Tensor, "B"]
elevation: Float[Tensor, "B"]
if random.random() < 0.5:
# sample elevation angles uniformly with a probability 0.5 (biased towards poles)
elevation_deg = (
torch.rand(real_batch_size)
* (self.elevation_range[1] - self.elevation_range[0])
+ self.elevation_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
elevation = elevation_deg * math.pi / 180
else:
# otherwise sample uniformly on sphere
elevation_range_percent = [
(self.elevation_range[0] + 90.0) / 180.0,
(self.elevation_range[1] + 90.0) / 180.0,
]
# inverse transform sampling
elevation = torch.asin(
2
* (
torch.rand(real_batch_size)
* (elevation_range_percent[1] - elevation_range_percent[0])
+ elevation_range_percent[0]
)
- 1.0
).repeat_interleave(self.cfg.n_view, dim=0)
elevation_deg = elevation / math.pi * 180.0
# sample azimuth angles from a uniform distribution bounded by azimuth_range
azimuth_deg: Float[Tensor, "B"]
# ensures sampled azimuth angles in a batch cover the whole range
azimuth_deg = (
torch.rand(real_batch_size).reshape(-1, 1)
+ torch.arange(self.cfg.n_view).reshape(1, -1)
).reshape(-1) / self.cfg.n_view * (
self.azimuth_range[1] - self.azimuth_range[0]
) + self.azimuth_range[
0
]
azimuth = azimuth_deg * math.pi / 180
######## Different from original ########
# sample fovs from a uniform distribution bounded by fov_range
fovy_deg: Float[Tensor, "B"] = (
torch.rand(real_batch_size) * (self.fovy_range[1] - self.fovy_range[0])
+ self.fovy_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
fovy = fovy_deg * math.pi / 180
# sample distances from a uniform distribution bounded by distance_range
camera_distances: Float[Tensor, "B"] = (
torch.rand(real_batch_size)
* (self.camera_distance_range[1] - self.camera_distance_range[0])
+ self.camera_distance_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
if self.cfg.relative_radius:
scale = 1 / torch.tan(0.5 * fovy)
camera_distances = scale * camera_distances
# zoom in by decreasing fov after camera distance is fixed
zoom: Float[Tensor, "B"] = (
torch.rand(real_batch_size) * (self.zoom_range[1] - self.zoom_range[0])
+ self.zoom_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
fovy = fovy * zoom
fovy_deg = fovy_deg * zoom
###########################################
# convert spherical coordinates to cartesian coordinates
# right hand coordinate system, x back, y right, z up
# elevation in (-90, 90), azimuth from +x to +y in (-180, 180)
camera_positions: Float[Tensor, "B 3"] = torch.stack(
[
camera_distances * torch.cos(elevation) * torch.cos(azimuth),
camera_distances * torch.cos(elevation) * torch.sin(azimuth),
camera_distances * torch.sin(elevation),
],
dim=-1,
)
# default scene center at origin
center: Float[Tensor, "B 3"] = torch.zeros_like(camera_positions)
# default camera up direction as +z
up: Float[Tensor, "B 3"] = torch.as_tensor([0, 0, 1], dtype=torch.float32)[
None, :
].repeat(self.batch_size, 1)
# sample camera perturbations from a uniform distribution [-camera_perturb, camera_perturb]
camera_perturb: Float[Tensor, "B 3"] = (
torch.rand(real_batch_size, 3) * 2 * self.cfg.camera_perturb
- self.cfg.camera_perturb
).repeat_interleave(self.cfg.n_view, dim=0)
camera_positions = camera_positions + camera_perturb
# sample center perturbations from a normal distribution with mean 0 and std center_perturb
center_perturb: Float[Tensor, "B 3"] = (
torch.randn(real_batch_size, 3) * self.cfg.center_perturb
).repeat_interleave(self.cfg.n_view, dim=0)
center = center + center_perturb
# sample up perturbations from a normal distribution with mean 0 and std up_perturb
up_perturb: Float[Tensor, "B 3"] = (
torch.randn(real_batch_size, 3) * self.cfg.up_perturb
).repeat_interleave(self.cfg.n_view, dim=0)
up = up + up_perturb
# sample light distance from a uniform distribution bounded by light_distance_range
light_distances: Float[Tensor, "B"] = (
torch.rand(real_batch_size)
* (self.cfg.light_distance_range[1] - self.cfg.light_distance_range[0])
+ self.cfg.light_distance_range[0]
).repeat_interleave(self.cfg.n_view, dim=0)
if self.cfg.light_sample_strategy == "dreamfusion":
# sample light direction from a normal distribution with mean camera_position and std light_position_perturb
light_direction: Float[Tensor, "B 3"] = F.normalize(
camera_positions
+ torch.randn(real_batch_size, 3).repeat_interleave(
self.cfg.n_view, dim=0
)
* self.cfg.light_position_perturb,
dim=-1,
)
# get light position by scaling light direction by light distance
light_positions: Float[Tensor, "B 3"] = (
light_direction * light_distances[:, None]
)
elif self.cfg.light_sample_strategy == "magic3d":
# sample light direction within restricted angle range (pi/3)
local_z = F.normalize(camera_positions, dim=-1)
local_x = F.normalize(
torch.stack(
[local_z[:, 1], -local_z[:, 0], torch.zeros_like(local_z[:, 0])],
dim=-1,
),
dim=-1,
)
local_y = F.normalize(torch.cross(local_z, local_x, dim=-1), dim=-1)
rot = torch.stack([local_x, local_y, local_z], dim=-1)
light_azimuth = (
torch.rand(real_batch_size) * math.pi - 2 * math.pi
).repeat_interleave(
self.cfg.n_view, dim=0
) # [-pi, pi]
light_elevation = (
torch.rand(real_batch_size) * math.pi / 3 + math.pi / 6
).repeat_interleave(
self.cfg.n_view, dim=0
) # [pi/6, pi/2]
light_positions_local = torch.stack(
[
light_distances
* torch.cos(light_elevation)
* torch.cos(light_azimuth),
light_distances
* torch.cos(light_elevation)
* torch.sin(light_azimuth),
light_distances * torch.sin(light_elevation),
],
dim=-1,
)
light_positions = (rot @ light_positions_local[:, :, None])[:, :, 0]
else:
raise ValueError(
f"Unknown light sample strategy: {self.cfg.light_sample_strategy}"
)
lookat: Float[Tensor, "B 3"] = F.normalize(center - camera_positions, dim=-1)
right: Float[Tensor, "B 3"] = F.normalize(torch.cross(lookat, up), dim=-1)
up = F.normalize(torch.cross(right, lookat), dim=-1)
c2w3x4: Float[Tensor, "B 3 4"] = torch.cat(
[torch.stack([right, up, -lookat], dim=-1), camera_positions[:, :, None]],
dim=-1,
)
c2w: Float[Tensor, "B 4 4"] = torch.cat(
[c2w3x4, torch.zeros_like(c2w3x4[:, :1])], dim=1
)
c2w[:, 3, 3] = 1.0
# get directions by dividing directions_unit_focal by focal length
focal_length: Float[Tensor, "B"] = 0.5 * self.height / torch.tan(0.5 * fovy)
directions: Float[Tensor, "B H W 3"] = self.directions_unit_focal[
None, :, :, :
].repeat(self.batch_size, 1, 1, 1)
directions[:, :, :, :2] = (
directions[:, :, :, :2] / focal_length[:, None, None, None]
)
# Importance note: the returned rays_d MUST be normalized!
rays_o, rays_d = get_rays(
directions, c2w, keepdim=True, normalize=self.cfg.rays_d_normalize
)
proj_mtx: Float[Tensor, "B 4 4"] = get_projection_matrix(
fovy, self.width / self.height, 0.1, 1000.0
) # FIXME: hard-coded near and far
mvp_mtx: Float[Tensor, "B 4 4"] = get_mvp_matrix(c2w, proj_mtx)
if self.cfg.sample_rand_frames == "t0":
t0 = torch.FloatTensor(1).uniform_(0, 1 / self.cfg.num_frames).item()
frame_times = torch.linspace(
t0,
t0 + (self.cfg.num_frames - 1) / self.cfg.num_frames,
self.cfg.num_frames,
)
else:
frame_times = torch.linspace(0.0, 1.0, self.cfg.num_frames)
return {
"rays_o": rays_o,
"rays_d": rays_d,
"mvp_mtx": mvp_mtx,
"camera_positions": camera_positions,
"c2w": c2w,
"light_positions": light_positions,
"elevation": elevation_deg,
"azimuth": azimuth_deg,
"camera_distances": camera_distances,
"height": self.height,
"width": self.width,
"fovy": fovy,
"frame_times": frame_times,
"train_dynamic_camera": False,
}
@register("4dfy-random-multiview-camera-datamodule")
class RandomMultiviewCameraDataModule(pl.LightningDataModule):
cfg: RandomMultiviewCameraDataModuleConfig
def __init__(self, cfg: Optional[Union[dict, DictConfig]] = None) -> None:
super().__init__()
self.cfg = parse_structured(RandomMultiviewCameraDataModuleConfig, cfg)
def setup(self, stage=None) -> None:
if stage in [None, "fit"]:
self.train_dataset = RandomMultiviewCameraIterableDataset(self.cfg)
if stage in [None, "fit", "validate"]: | self.val_dataset = RandomCameraDataset(self.cfg, "val") | 1 | 2023-12-01 15:10:28+00:00 | 12k |
camenduru/magicanimate-hf | magicanimate/models/unet_controlnet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n output_states = ()\n\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "CrossAttnUpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n attentions = []\n motion_modules = []\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n out_channels // attn_num_head_channels,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n res_hidden_states_tuple,\n temb=None,\n encoder_hidden_states=None,\n upsample_size=None,\n attention_mask=None,\n ):\n for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n )[0]\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n \n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n \n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "DownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n \n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample3D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None):\n output_states = ()\n\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n\n # add motion module\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n output_states += (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states += (hidden_states,)\n\n return hidden_states, output_states"
},
{
"identifier": "UNetMidBlock3DCrossAttn",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attn_num_head_channels=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.attn_num_head_channels = attn_num_head_channels\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n attentions = []\n motion_modules = []\n\n for _ in range(num_layers):\n if dual_cross_attention:\n raise NotImplementedError\n attentions.append(\n Transformer3DModel(\n attn_num_head_channels,\n in_channels // attn_num_head_channels,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=in_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n resnets.append(\n ResnetBlock3D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n\n self.attentions = nn.ModuleList(attentions)\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):\n hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n hidden_states = resnet(hidden_states, temb)\n\n return hidden_states"
},
{
"identifier": "UpBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n\n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n ):\n super().__init__()\n resnets = []\n motion_modules = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock3D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n motion_modules.append(\n get_motion_module(\n in_channels=out_channels,\n motion_module_type=motion_module_type, \n motion_module_kwargs=motion_module_kwargs,\n ) if use_motion_module else None\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.motion_modules = nn.ModuleList(motion_modules)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):\n for resnet, motion_module in zip(self.resnets, self.motion_modules):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)\n if motion_module is not None:\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states"
},
{
"identifier": "get_down_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n \n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n \n motion_module_type=None,\n motion_module_kwargs=None,\n):\n down_block_type = down_block_type[7:] if down_block_type.startswith(\"UNetRes\") else down_block_type\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")"
},
{
"identifier": "get_up_block",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n attn_num_head_channels,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n \n use_motion_module=None,\n motion_module_type=None,\n motion_module_kwargs=None,\n):\n up_block_type = up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attn_num_head_channels,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")"
},
{
"identifier": "InflatedConv3d",
"path": "magicanimate/models/resnet.py",
"snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x"
}
] | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from magicanimate.models.unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from diffusers.utils import WEIGHTS_NAME
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint | 8,210 | ):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
| up_block = get_up_block( | 6 | 2023-12-04 20:47:34+00:00 | 12k |
TISUnion/PrimeBackup | prime_backup/mcdr/task/backup/prune_backup_task.py | [
{
"identifier": "DeleteBackupAction",
"path": "prime_backup/action/delete_backup_action.py",
"snippet": "class DeleteBackupAction(Action[DeleteBackupResult]):\n\tdef __init__(self, backup_id: int):\n\t\tsuper().__init__()\n\t\tself.backup_id = misc_utils.ensure_type(backup_id, int)\n\n\tdef run(self) -> DeleteBackupResult:\n\t\tself.logger.info('Deleting backup #{}'.format(self.backup_id))\n\t\twith DbAccess.open_session() as session:\n\t\t\tbackup = session.get_backup(self.backup_id)\n\t\t\tinfo = BackupInfo.of(backup)\n\n\t\t\thashes = []\n\t\t\tfor file in backup.files:\n\t\t\t\tif file.blob_hash is not None:\n\t\t\t\t\thashes.append(file.blob_hash)\n\t\t\t\tsession.delete_file(file)\n\t\t\tsession.delete_backup(backup)\n\n\t\torphan_blob_cleaner = DeleteOrphanBlobsAction(hashes, quiet=True)\n\t\tbls = orphan_blob_cleaner.run()\n\n\t\tself.logger.info('Deleted backup #{} done, -{} blobs (size {} / {})'.format(\n\t\t\tinfo.id, bls.count, ByteCount(bls.stored_size).auto_str(), ByteCount(bls.raw_size).auto_str(),\n\t\t))\n\t\treturn DeleteBackupResult(info, bls)"
},
{
"identifier": "ListBackupAction",
"path": "prime_backup/action/list_backup_action.py",
"snippet": "class ListBackupAction(_ListBackupActionBase[List[BackupInfo]]):\n\tdef run(self) -> List[BackupInfo]:\n\t\twith DbAccess.open_session() as session:\n\t\t\tbackups = session.list_backup(backup_filter=self.backup_filter, limit=self.limit, offset=self.offset)\n\t\t\treturn [BackupInfo.of(backup) for backup in backups]"
},
{
"identifier": "PruneSetting",
"path": "prime_backup/config/prune_config.py",
"snippet": "class PruneSetting(Serializable):\n\tenabled: bool = False\n\n\t# <=0 means no limit\n\tmax_amount: int = 0\n\tmax_lifetime: Duration = Duration('0s')\n\n\t# https://pve.proxmox.com/wiki/Backup_and_Restore#vzdump_retention\n\t# https://pbs.proxmox.com/docs/prune-simulator/\n\t# -1 means infinity\n\tlast: int = -1\n\thour: int = 0\n\tday: int = 0\n\tweek: int = 0\n\tmonth: int = 0\n\tyear: int = 0"
},
{
"identifier": "BackupNotFound",
"path": "prime_backup/exceptions.py",
"snippet": "class BackupNotFound(PrimeBackupError):\n\tdef __init__(self, backup_id: int):\n\t\tsuper().__init__()\n\t\tself.backup_id = backup_id"
},
{
"identifier": "HeavyTask",
"path": "prime_backup/mcdr/task/basic_task.py",
"snippet": "class HeavyTask(_BasicTask[_T], ABC):\n\t\"\"\"\n\tFor tasks that require DB access and does some operations on blobs / database\n\t\"\"\"\n\tMAX_ONGOING_TASK = 1"
},
{
"identifier": "TextComponents",
"path": "prime_backup/mcdr/text_components.py",
"snippet": "class TextComponents:\n\t@classmethod\n\tdef tr(cls, key, *args, **kwargs):\n\t\tfrom prime_backup.utils.mcdr_utils import tr\n\t\treturn tr('text_components.' + key, *args, **kwargs)\n\n\t@classmethod\n\tdef auto(cls, value: Any) -> RTextBase:\n\t\tif isinstance(value, bool):\n\t\t\treturn cls.boolean(value)\n\t\telif isinstance(value, (int, float)):\n\t\t\treturn cls.number(value)\n\t\telif isinstance(value, Duration):\n\t\t\treturn cls.duration(value)\n\t\telif isinstance(value, Operator):\n\t\t\treturn cls.operator(value)\n\t\telif isinstance(value, ByteCount):\n\t\t\treturn cls.file_size(value)\n\t\telif isinstance(value, Path):\n\t\t\treturn cls.file_name(value)\n\t\telif isinstance(value, datetime.datetime):\n\t\t\treturn cls.date(value)\n\t\telse:\n\t\t\treturn RTextBase.from_any(value)\n\n\t@classmethod\n\tdef backup_brief(cls, backup: BackupInfo, *, backup_id_fancy: bool = True) -> RTextBase:\n\t\t# \"backup #1: foobar\"\n\t\treturn RTextList(cls.tr(\n\t\t\t'backup_brief',\n\t\t\tcls.backup_id(backup.id, hover=backup_id_fancy, click=backup_id_fancy),\n\t\t\tcls.backup_comment(backup.comment),\n\t\t))\n\n\t@classmethod\n\tdef backup_comment(cls, comment: str) -> RTextBase:\n\t\tif len(comment) > 0:\n\t\t\tif (er := backup_utils.extract_backup_comment_translation_key(comment)) is not None:\n\t\t\t\targs = er.args\n\t\t\t\tif er.key == 'pre_restore' and len(args) == 0:\n\t\t\t\t\targs = ('?',)\n\t\t\t\treturn cls.tr(f'backup_comment.{er.key}', *args)\n\t\t\treturn RText(comment)\n\t\telse:\n\t\t\treturn cls.tr('backup_comment.none').set_color(RColor.gray).set_styles(RStyle.italic)\n\n\t@classmethod\n\tdef backup_date(cls, backup: BackupInfo):\n\t\treturn cls.date(backup.date)\n\n\t@classmethod\n\tdef backup_full(cls, backup: BackupInfo, operation_buttons: bool = False, *, show_flags: bool = False) -> RTextBase:\n\t\t# \"[#1] [>] [x] H-- 1.2GiB 2023-11-30 09:30:13: foobar\"\n\t\tt_bid = cls.backup_id(backup.id)\n\n\t\trtl = RTextList(RText('[', RColor.gray), t_bid, RText('] ', RColor.gray))\n\t\tif operation_buttons:\n\t\t\trtl.append(RText('[>]', color=RColor.dark_green).h(cls.tr('backup_full.restore', t_bid)).c(RAction.suggest_command, mkcmd(f'back {backup.id}')), ' ')\n\t\t\tif not backup.tags.is_protected():\n\t\t\t\trtl.append(RText('[x]', color=RColor.red).h(cls.tr('backup_full.delete', t_bid)).c(RAction.suggest_command, mkcmd(f'delete {backup.id}')), ' ')\n\t\t\telse:\n\t\t\t\trtl.append(RText('[x]', color=RColor.dark_gray).h(cls.tr('backup_full.protected', t_bid)), ' ')\n\n\t\tif show_flags:\n\t\t\tfor name in [BackupTagName.hidden, BackupTagName.pre_restore_backup, BackupTagName.protected]:\n\t\t\t\tmisc_utils.assert_true(name.value.type is bool, 'it should be a bool field')\n\t\t\t\tflag = backup.tags.get(name) is True\n\t\t\t\tif flag:\n\t\t\t\t\trtl.append(name.value.flag)\n\t\t\t\telse:\n\t\t\t\t\trtl.append(RText('-', RColor.dark_gray))\n\t\t\trtl.append(' ')\n\n\t\trtl.append(\n\t\t\tcls.backup_size(backup), ' ',\n\t\t\tcls.backup_date(backup), RText(': ', RColor.gray),\n\t\t\tcls.backup_comment(backup.comment).h(cls.tr('backup_full.creator', cls.operator(backup.creator))),\n\t\t)\n\t\treturn rtl\n\n\t@classmethod\n\tdef backup_id(cls, backup_id: Union[int, BackupInfo], *, hover: bool = True, click: bool = True) -> RTextBase:\n\t\tif isinstance(backup_id, BackupInfo):\n\t\t\tbackup_id = backup_id.id\n\t\ttext = RText(f'#{backup_id}', TextColors.backup_id)\n\t\tif hover:\n\t\t\ttext.h(cls.tr('backup_id.hover', RText(backup_id, TextColors.backup_id)))\n\t\tif click:\n\t\t\ttext.c(RAction.run_command, mkcmd(f'show {backup_id}'))\n\t\treturn text\n\n\t@classmethod\n\tdef backup_id_list(cls, backup_ids: Iterable[Any], **kwargs) -> RTextBase:\n\t\treturn RTextList(\n\t\t\t'[',\n\t\t\tRTextBase.join(', ', [cls.backup_id(backup_id, **kwargs) for backup_id in backup_ids]),\n\t\t\t']',\n\t\t)\n\n\t@classmethod\n\tdef backup_size(cls, backup_or_blob_list_summary: Union[BackupInfo, BlobListSummary], *, ndigits: int = 2) -> RTextBase:\n\t\tb = backup_or_blob_list_summary\n\t\treturn cls.file_size(b.raw_size, ndigits=ndigits).h(cls.dual_size_hover(b.raw_size, b.stored_size))\n\n\t@classmethod\n\tdef blob_list_summary_store_size(cls, bls: BlobListSummary) -> RTextBase:\n\t\treturn cls.file_size(bls.raw_size).h(cls.dual_size_hover(bls.raw_size, bls.stored_size))\n\n\t@classmethod\n\tdef boolean(cls, value: bool) -> RTextBase:\n\t\treturn RText(str(value).lower(), RColor.green if value else RColor.red)\n\n\t@classmethod\n\tdef command(cls, s: str, *, color: RColor = RColor.gray, suggest: bool = False, run: bool = False, raw: bool = False) -> RTextBase:\n\t\tcmd = s if raw else mkcmd(s)\n\t\ttext = RText(cmd, color)\n\t\tif suggest:\n\t\t\ttext.h(cls.tr('command.suggest', cmd)).c(RAction.suggest_command, cmd)\n\t\telif run:\n\t\t\ttext.h(cls.tr('command.run', cmd)).c(RAction.run_command, cmd)\n\t\treturn text\n\n\t@classmethod\n\tdef compress_method(cls, compress_method: Union[str, CompressMethod]) -> RTextBase:\n\t\tif isinstance(compress_method, CompressMethod):\n\t\t\tcompress_method = compress_method.name\n\t\treturn RText(compress_method, RColor.light_purple)\n\n\t@classmethod\n\tdef confirm_hint(cls, what: RTextBase, time_wait_text: Any):\n\t\treturn cls.tr(\n\t\t\t'confirm_hint.base',\n\t\t\ttime_wait_text,\n\t\t\tclick_and_run(\n\t\t\t\tRTextList(cls.tr('confirm_hint.confirm', what), '√').set_color(RColor.yellow),\n\t\t\t\tcls.tr('confirm_hint.confirm.hover', cls.command('confirm'), what),\n\t\t\t\tmkcmd('confirm'),\n\t\t\t),\n\t\t\tclick_and_run(\n\t\t\t\tRTextList(cls.tr('confirm_hint.abort', what), '×').set_color(RColor.gold),\n\t\t\t\tcls.tr('confirm_hint.abort.hover', cls.command('abort'), what),\n\t\t\t\tmkcmd('abort'),\n\t\t\t),\n\t\t)\n\n\t@classmethod\n\tdef crontab(cls, crontab_str: str) -> RTextBase:\n\t\turl = 'https://crontab.guru/#' + crontab_str.replace(' ', '_')\n\t\treturn RText(crontab_str, TextColors.date).h(cls.tr('crontab.help_url', cls.url(url, click=False))).c(RAction.open_url, url)\n\n\t@classmethod\n\tdef date_diff(cls, date: datetime.datetime) -> RTextBase:\n\t\tnow = datetime.datetime.now(date.tzinfo)\n\t\tdiff = (date - now).total_seconds()\n\t\tif diff >= 0:\n\t\t\treturn cls.tr('date_diff.later', cls.duration(diff))\n\t\telse:\n\t\t\treturn cls.tr('date_diff.ago', cls.duration(-diff))\n\n\t@classmethod\n\tdef date(cls, date: Union[datetime.datetime, int]) -> RTextBase:\n\t\tif isinstance(date, int):\n\t\t\tdate = conversion_utils.timestamp_to_local_date(date)\n\t\treturn RText(conversion_utils.datetime_to_str(date), TextColors.date).h(cls.date_diff(date))\n\n\t@classmethod\n\tdef dual_size_hover(cls, raw_size: int, stored_size: int, *, ndigits: int = 2) -> RTextBase:\n\t\tt_raw_size = cls.file_size(raw_size, ndigits=ndigits)\n\t\tt_stored_size = cls.file_size(stored_size, ndigits=ndigits)\n\t\tt_percent = cls.percent(stored_size, raw_size)\n\t\treturn cls.tr('dual_size_hover', t_stored_size, t_percent, t_raw_size)\n\n\t@classmethod\n\tdef duration(cls, seconds_or_duration: Union[float, Duration], *, color: Optional[RColor] = TextColors.number, ndigits: int = 2) -> RTextBase:\n\t\t# full duration text, e.g. \"1 minute\", \"2 hours\"\n\t\tif isinstance(seconds_or_duration, Duration):\n\t\t\tduration = seconds_or_duration\n\t\telif isinstance(seconds_or_duration, (int, float)):\n\t\t\tduration = Duration(seconds_or_duration)\n\t\telse:\n\t\t\traise TypeError(type(seconds_or_duration))\n\t\tvalue, unit = duration.auto_format()\n\t\tplural_suffix = cls.tr('duration.plural_suffix') if value != 1 else ''\n\t\ttext = cls.tr('duration.text', round(value, ndigits), cls.tr('duration.' + unit, plural_suffix))\n\t\tif color is not None:\n\t\t\ttext.set_color(color)\n\t\treturn text\n\n\t@classmethod\n\tdef file_mode(cls, mode: int) -> RTextBase:\n\t\tif stat.S_ISREG(mode):\n\t\t\ttype_flag = '-'\n\t\t\tcolor = RColor.light_purple\n\t\telif stat.S_ISDIR(mode):\n\t\t\ttype_flag = 'd'\n\t\t\tcolor = RColor.blue\n\t\telif stat.S_ISLNK(mode):\n\t\t\ttype_flag = 'l'\n\t\t\tcolor = RColor.aqua\n\t\telse:\n\t\t\ttype_flag = '?'\n\t\t\tcolor = RColor.gray\n\n\t\tpermissions = ''\n\t\tfor i in range(9):\n\t\t\tpermissions += 'rwx'[i % 3] if (mode >> (8 - i)) & 1 == 1 else '-'\n\n\t\treturn RText(type_flag + permissions, color)\n\n\t@classmethod\n\tdef file_name(cls, file_path: Path) -> RTextBase:\n\t\treturn RText(file_path.name, TextColors.file).h(file_path.as_posix())\n\n\t@classmethod\n\tdef file_size(cls, byte_cnt: Union[int, ByteCount], *, ndigits: int = 2, always_sign: bool = False, color: RColor = TextColors.byte_count) -> RTextBase:\n\t\tif not isinstance(byte_cnt, ByteCount):\n\t\t\tbyte_cnt = ByteCount(byte_cnt)\n\t\treturn RText(byte_cnt.auto_str(ndigits=ndigits, always_sign=always_sign), color=color)\n\n\t@classmethod\n\tdef hash_method(cls, hash_method: Union[str, HashMethod]) -> RTextBase:\n\t\tif isinstance(hash_method, HashMethod):\n\t\t\thash_method = hash_method.name\n\t\treturn RText(hash_method, RColor.light_purple)\n\n\t@classmethod\n\tdef number(cls, value: Any) -> RTextBase:\n\t\treturn RText(value, TextColors.number)\n\n\t@classmethod\n\tdef number_list(cls, values: Iterable[Any]) -> RTextBase:\n\t\treturn RTextList(\n\t\t\t'[',\n\t\t\tRTextBase.join(', ', [cls.number(v) for v in values]),\n\t\t\t']',\n\t\t)\n\n\t@classmethod\n\tdef operator(cls, op: Operator) -> RTextBase:\n\t\ttr_key = f'operator.{op.type}'\n\t\tif op.type in ['player', 'command_source', 'unknown']:\n\t\t\treturn cls.tr(tr_key, op.name)\n\t\telif op.type in ['console']:\n\t\t\treturn cls.tr(tr_key)\n\t\telif op.type == constants.PLUGIN_ID:\n\t\t\tfrom prime_backup.mcdr import mcdr_globals\n\t\t\tt_name = cls.tr(tr_key + '.' + op.name)\n\t\t\tif not mcdr_globals.server.has_translation(misc_utils.ensure_type(getattr(t_name, 'translation_key'), str)):\n\t\t\t\tt_name = RText(op.name, styles=RStyle.italic)\n\t\t\treturn RTextList(cls.tr(tr_key), RText('-', RColor.gray), t_name).set_color(RColor.dark_aqua)\n\t\telse:\n\t\t\treturn RText(f'{op.type}:{op.name}')\n\n\t@classmethod\n\tdef percent(cls, value: float, total: float) -> RTextBase:\n\t\tif total != 0:\n\t\t\treturn RText(f'{100 * value / total:.1f}%', RColor.dark_green)\n\t\telse:\n\t\t\treturn RText('N/A', RColor.gray)\n\n\t@classmethod\n\tdef tag_name(cls, tag_name: BackupTagName) -> RTextBase:\n\t\treturn RText(tag_name.name, TextColors.backup_tag).h(tag_name.value.text)\n\n\t@classmethod\n\tdef title(cls, text: Any) -> RTextBase:\n\t\treturn RTextList(RText('======== ', RColor.gray), text, RText(' ========', RColor.gray))\n\n\t@classmethod\n\tdef url(cls, url: str, *, click: bool = True) -> RTextBase:\n\t\ttext = RText(url, RColor.blue, RStyle.underlined)\n\t\tif click:\n\t\t\ttext.c(RAction.open_url, url)\n\t\treturn text"
},
{
"identifier": "BackupFilter",
"path": "prime_backup/types/backup_filter.py",
"snippet": "class BackupFilter:\n\tid_start: Optional[int] = None\n\tid_end: Optional[int] = None\n\tcreator: Optional[Operator] = None\n\ttimestamp_start: Optional[int] = None\n\ttimestamp_end: Optional[int] = None\n\ttag_filters: List[BackupTagFilter] = dataclasses.field(default_factory=list)\n\n\tdef filter_pre_restore_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.pre_restore_backup, True, BackupTagFilter.Policy.equals))\n\t\treturn self\n\n\tdef filter_non_pre_restore_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.pre_restore_backup, True, BackupTagFilter.Policy.not_equals))\n\t\treturn self\n\n\tdef filter_non_hidden_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.hidden, True, BackupTagFilter.Policy.not_equals))\n\t\treturn self\n\n\tdef filter_non_protected_backup(self) -> 'BackupFilter':\n\t\tself.tag_filters.append(BackupTagFilter(BackupTagName.protected, True, BackupTagFilter.Policy.not_equals))\n\t\treturn self"
},
{
"identifier": "BackupInfo",
"path": "prime_backup/types/backup_info.py",
"snippet": "class BackupInfo:\n\tid: int\n\ttimestamp_ns: int\n\tcreator: Operator\n\tcomment: str\n\ttargets: List[str]\n\ttags: BackupTags\n\n\traw_size: int # uncompressed size\n\tstored_size: int # actual size\n\n\tfiles: List['FileInfo']\n\n\[email protected]_property\n\tdef date(self) -> datetime.datetime:\n\t\treturn conversion_utils.timestamp_to_local_date(self.timestamp_ns)\n\n\[email protected]_property\n\tdef date_str(self) -> str:\n\t\treturn conversion_utils.timestamp_to_local_date_str(self.timestamp_ns)\n\n\t@classmethod\n\tdef of(cls, backup: schema.Backup, *, with_files: bool = False) -> 'Self':\n\t\t\"\"\"\n\t\tNotes: should be inside a session\n\t\t\"\"\"\n\t\tfrom prime_backup.types.file_info import FileInfo\n\t\treturn cls(\n\t\t\tid=backup.id,\n\t\t\ttimestamp_ns=backup.timestamp,\n\t\t\tcreator=Operator.of(backup.creator),\n\t\t\tcomment=backup.comment,\n\t\t\ttargets=list(backup.targets),\n\t\t\ttags=BackupTags(backup.tags),\n\t\t\traw_size=backup.file_raw_size_sum or 0,\n\t\t\tstored_size=backup.file_stored_size_sum or 0,\n\t\t\tfiles=list(map(FileInfo.of, backup.files)) if with_files else [],\n\t\t)"
},
{
"identifier": "BlobListSummary",
"path": "prime_backup/types/blob_info.py",
"snippet": "class BlobListSummary(NamedTuple):\n\tcount: int\n\traw_size: int\n\tstored_size: int\n\n\t@classmethod\n\tdef zero(cls) -> 'BlobListSummary':\n\t\treturn BlobListSummary(0, 0, 0)\n\n\t@classmethod\n\tdef of(cls, blobs: Iterable[BlobInfo]) -> 'BlobListSummary':\n\t\t\"\"\"\n\t\tNotes: should be inside a session\n\t\t\"\"\"\n\t\tcnt, raw_size_sum, stored_size_sum = 0, 0, 0\n\t\tfor blob in blobs:\n\t\t\tcnt += 1\n\t\t\traw_size_sum += blob.raw_size\n\t\t\tstored_size_sum += blob.stored_size\n\t\treturn BlobListSummary(\n\t\t\tcount=cnt,\n\t\t\traw_size=raw_size_sum,\n\t\t\tstored_size=stored_size_sum,\n\t\t)\n\n\tdef __add__(self, other: 'BlobListSummary') -> 'BlobListSummary':\n\t\tmisc_utils.ensure_type(other, type(self))\n\t\treturn BlobListSummary(\n\t\t\tcount=self.count + other.count,\n\t\t\traw_size=self.raw_size + other.raw_size,\n\t\t\tstored_size=self.stored_size + other.stored_size,\n\t\t)"
},
{
"identifier": "ByteCount",
"path": "prime_backup/types/units.py",
"snippet": "class ByteCount(Quantity):\n\tdef __new__(cls, s: Union[int, float, str]):\n\t\tif isinstance(s, str) and len(s) > 0 and s[-1].lower() == 'b':\n\t\t\ts = s[:-1]\n\t\treturn super().__new__(cls, s)\n\n\t@classmethod\n\tdef _auto_format(cls, val) -> UnitValuePair:\n\t\tuv = super()._auto_format(val)\n\t\tif not uv.unit.endswith('B'):\n\t\t\tuv = UnitValuePair(uv.value, uv.unit + 'B')\n\t\treturn uv\n\n\t@classmethod\n\tdef _precise_format(cls, val) -> UnitValuePair:\n\t\tuv = super()._precise_format(val)\n\t\tif not uv.unit.endswith('B'):\n\t\t\tuv = UnitValuePair(uv.value, uv.unit + 'B')\n\t\treturn uv"
},
{
"identifier": "misc_utils",
"path": "prime_backup/utils/misc_utils.py",
"snippet": "T = TypeVar('T')\ndef assert_true(expr: bool, msg: Union[str, Callable[[], str]]):\ndef represent(obj: Any, *, attrs: Optional[dict] = None) -> str:\ndef ensure_type(value: Any, class_or_tuple: Union[Tuple[Type[T]], Type[T], Type]) -> T:\ndef make_thread_name(name: str) -> str:"
},
{
"identifier": "log_utils",
"path": "prime_backup/utils/log_utils.py",
"snippet": "LOG_FORMATTER = logging.Formatter('[%(asctime)s %(levelname)s] (%(funcName)s) %(message)s')\nLOG_FORMATTER_NO_FUNC = logging.Formatter('[%(asctime)s %(levelname)s] %(message)s')\ndef __get_log_mode() -> int:\ndef __get_log_file_path(file_name: str) -> Path:\ndef create_file_logger(name: str) -> logging.Logger:\ndef open_file_logger(name: str) -> ContextManager[logging.Logger]:"
}
] | import collections
import dataclasses
import datetime
import functools
import time
import pytz
from typing import List, NamedTuple, Dict, Union, Optional, Callable
from mcdreforged.api.all import *
from prime_backup.action.delete_backup_action import DeleteBackupAction
from prime_backup.action.list_backup_action import ListBackupAction
from prime_backup.config.prune_config import PruneSetting
from prime_backup.exceptions import BackupNotFound
from prime_backup.mcdr.task.basic_task import HeavyTask
from prime_backup.mcdr.text_components import TextComponents
from prime_backup.types.backup_filter import BackupFilter
from prime_backup.types.backup_info import BackupInfo
from prime_backup.types.blob_info import BlobListSummary
from prime_backup.types.units import ByteCount
from prime_backup.utils import misc_utils, log_utils
from prime_backup.types.backup_tags import BackupTags
from prime_backup.types.operator import Operator | 7,397 | if backup.id in marks:
continue
if backup.tags.is_protected():
marks[backup.id] = PruneMark.create_protected()
continue
bucket = bucket_mapper(backup)
if bucket in already_included:
existed = already_included[bucket]
fallback_marks[backup.id] = fallback_marks.get(backup.id) or PruneMark.create_remove(f'superseded by {existed.id} ({policy})')
continue
if bucket in handled_buckets:
existed = handled_buckets[bucket]
marks[backup.id] = PruneMark.create_remove(f'superseded by {existed.id} ({policy})')
else:
if 0 <= limit <= len(handled_buckets):
break
handled_buckets[bucket] = backup
marks[backup.id] = PruneMark.create_keep(f'keep {policy} {len(handled_buckets)}')
def create_time_str_func(fmt: str):
def func(backup: BackupInfo) -> str:
timestamp = backup.timestamp_ns / 1e9
dt = datetime.datetime.fromtimestamp(timestamp, tz=timezone)
return dt.strftime(fmt)
return func
if settings.last != 0:
def __backup_to_id(b: BackupInfo) -> str:
return str(b.id)
mark_selections(settings.last, 'last', __backup_to_id)
if settings.hour != 0:
mark_selections(settings.hour, 'hour', create_time_str_func('%Y/%m/%d/%H'))
if settings.day != 0:
mark_selections(settings.day, 'day', create_time_str_func('%Y/%m/%d'))
if settings.week != 0:
mark_selections(settings.week, 'week', create_time_str_func('%G/%V'))
if settings.month != 0:
mark_selections(settings.month, 'month', create_time_str_func('%Y/%m'))
if settings.year != 0:
mark_selections(settings.year, 'year', create_time_str_func('%Y'))
plan_list = PrunePlan()
now = time.time_ns()
regular_keep_count = 0
all_marks = collections.ChainMap(marks, fallback_marks)
default_mark = PruneMark.create_remove('unmarked')
for backup_info in backups:
if backup_info.tags.is_protected():
plan_list.append(PrunePlanItem(backup_info, PruneMark.create_protected()))
else:
mark = all_marks.get(backup_info.id, default_mark)
if mark.keep:
if 0 < settings.max_amount <= regular_keep_count:
mark = PruneMark.create_remove('max_amount exceeded')
elif 0 < settings.max_lifetime.value_nano < (now - backup_info.timestamp_ns):
mark = PruneMark.create_remove('max_lifetime exceeded')
plan_list.append(PrunePlanItem(backup_info, mark))
if mark.keep:
regular_keep_count += 1
return plan_list
def __msg_header(self) -> RTextBase:
return RTextList('(', self.what_to_prune, ') ').set_color(RColor.gray)
def reply(self, msg: Union[str, RTextBase], *, with_prefix: bool = True):
if self.what_to_prune is not None:
msg = self.__msg_header() + msg
super().reply(msg, with_prefix=with_prefix)
def run(self) -> PruneBackupResult:
backups = ListBackupAction(backup_filter=self.backup_filter).run()
backup_ids = {backup.id for backup in backups}
timezone: Optional[datetime.tzinfo] = None
if (timezone_override := self.config.prune.timezone_override) is not None:
try:
timezone = pytz.timezone(timezone_override)
except pytz.UnknownTimeZoneError as e:
self.logger.error('Bad timezone override from config, using local timezone: {}'.format(e))
else:
timezone = None
plan_list = self.calc_prune_backups(backups, self.setting, timezone=timezone)
for pl in plan_list:
misc_utils.assert_true(pl.backup.id in backup_ids, lambda: 'unexpected backup id {}, {}'.format(pl.backup.id, backup_ids))
result = PruneBackupResult(plan_list)
with log_utils.open_file_logger('prune') as prune_logger:
prune_logger.info('Prune started')
to_deleted_ids = [pl.backup.id for pl in plan_list if not pl.mark.keep]
if len(to_deleted_ids) == 0:
if self.verbose >= _PruneVerbose.all:
self.reply_tr('nothing_to_prune')
prune_logger.info('Nothing to prune')
return result
prune_logger.info('============== Prune calculate result start ==============')
for pl in plan_list:
prune_logger.info('Backup #{} at {}: keep={} reason={}'.format(pl.backup.id, pl.backup.date_str, pl.mark.keep, pl.mark.reason))
prune_logger.info('============== Prune calculate result end ==============')
if self.verbose >= _PruneVerbose.delete:
self.reply_tr(
'list_to_be_pruned',
TextComponents.number(len(to_deleted_ids)),
TextComponents.backup_id_list(to_deleted_ids, hover=False, click=False),
)
for pl in plan_list:
bid = pl.backup.id
if self.aborted_event.is_set():
if self.verbose >= _PruneVerbose.delete:
self.reply(self.get_aborted_text())
break
if not pl.mark.keep:
self.reply_tr('prune', TextComponents.backup_id(bid, hover=False, click=False))
try:
dr = DeleteBackupAction(bid).run()
except Exception as e:
|
class _PruneVerbose:
silent = 0
delete = 1
all = 2
class PruneMark(NamedTuple):
keep: bool
reason: str
def is_protected(self) -> bool:
return self.keep and self.reason == 'protected'
@classmethod
def create_keep(cls, reason: str) -> 'PruneMark':
return PruneMark(True, reason)
@classmethod
def create_protected(cls) -> 'PruneMark':
return PruneMark(True, 'protected')
@classmethod
def create_remove(cls, reason: str) -> 'PruneMark':
return PruneMark(False, reason)
class PrunePlanItem(NamedTuple):
backup: BackupInfo
mark: PruneMark
class PrunePlan(List[PrunePlanItem]):
def get_keep_reason(self, backup_or_id: Union[int, BackupInfo]) -> Optional[str]:
if isinstance(backup_or_id, BackupInfo):
backup_or_id = backup_or_id.id
mark = self.id_to_mark[backup_or_id]
if mark.keep:
return mark.reason
return None
@functools.cached_property
def id_to_mark(self) -> Dict[int, PruneMark]:
return {pri.backup.id: pri.mark for pri in self}
@dataclasses.dataclass
class PruneBackupResult:
plan: PrunePlan
deleted_backup_count: int = 0
deleted_blobs: BlobListSummary = BlobListSummary.zero()
@dataclasses.dataclass
class PruneAllBackupResult:
sub_plans: List[PrunePlan] = dataclasses.field(default_factory=list)
deleted_backup_count: int = 0
deleted_blobs: BlobListSummary = BlobListSummary.zero()
class PruneBackupTask(HeavyTask[PruneBackupResult]):
def __init__(self, source: CommandSource, backup_filter: BackupFilter, setting: PruneSetting, *, what_to_prune: Optional[RTextBase] = None, verbose: int = 2):
super().__init__(source)
self.backup_filter = backup_filter
self.setting = setting
if not setting.enabled:
raise ValueError('the prune setting should be enabled')
self.what_to_prune = what_to_prune
self.verbose = verbose
@property
def id(self) -> str:
return 'backup_prune'
def is_abort_able(self) -> bool:
return True
@classmethod
def calc_prune_backups(cls, backups: List[BackupInfo], settings: PruneSetting, *, timezone: Optional[datetime.tzinfo] = None) -> PrunePlan:
marks: Dict[int, PruneMark] = {}
fallback_marks: Dict[int, PruneMark] = {}
backups = list(sorted(backups, key=lambda b: b.timestamp_ns, reverse=True)) # new -> old
def has_mark(backup: BackupInfo, keep: bool, protect: Optional[bool] = None) -> bool:
if (m := marks.get(backup.id)) is None:
return False
return m.keep == keep and (protect is None or m.is_protected() == protect)
# ref: https://github.com/proxmox/proxmox-backup/blob/master/pbs-datastore/src/prune.rs
def mark_selections(limit: int, policy: str, bucket_mapper: Callable[[BackupInfo], str]):
already_included: Dict[str, BackupInfo] = {}
handled_buckets: Dict[str, BackupInfo] = {}
for backup in backups:
if has_mark(backup, True, False):
already_included[bucket_mapper(backup)] = backup
for backup in backups:
if backup.id in marks:
continue
if backup.tags.is_protected():
marks[backup.id] = PruneMark.create_protected()
continue
bucket = bucket_mapper(backup)
if bucket in already_included:
existed = already_included[bucket]
fallback_marks[backup.id] = fallback_marks.get(backup.id) or PruneMark.create_remove(f'superseded by {existed.id} ({policy})')
continue
if bucket in handled_buckets:
existed = handled_buckets[bucket]
marks[backup.id] = PruneMark.create_remove(f'superseded by {existed.id} ({policy})')
else:
if 0 <= limit <= len(handled_buckets):
break
handled_buckets[bucket] = backup
marks[backup.id] = PruneMark.create_keep(f'keep {policy} {len(handled_buckets)}')
def create_time_str_func(fmt: str):
def func(backup: BackupInfo) -> str:
timestamp = backup.timestamp_ns / 1e9
dt = datetime.datetime.fromtimestamp(timestamp, tz=timezone)
return dt.strftime(fmt)
return func
if settings.last != 0:
def __backup_to_id(b: BackupInfo) -> str:
return str(b.id)
mark_selections(settings.last, 'last', __backup_to_id)
if settings.hour != 0:
mark_selections(settings.hour, 'hour', create_time_str_func('%Y/%m/%d/%H'))
if settings.day != 0:
mark_selections(settings.day, 'day', create_time_str_func('%Y/%m/%d'))
if settings.week != 0:
mark_selections(settings.week, 'week', create_time_str_func('%G/%V'))
if settings.month != 0:
mark_selections(settings.month, 'month', create_time_str_func('%Y/%m'))
if settings.year != 0:
mark_selections(settings.year, 'year', create_time_str_func('%Y'))
plan_list = PrunePlan()
now = time.time_ns()
regular_keep_count = 0
all_marks = collections.ChainMap(marks, fallback_marks)
default_mark = PruneMark.create_remove('unmarked')
for backup_info in backups:
if backup_info.tags.is_protected():
plan_list.append(PrunePlanItem(backup_info, PruneMark.create_protected()))
else:
mark = all_marks.get(backup_info.id, default_mark)
if mark.keep:
if 0 < settings.max_amount <= regular_keep_count:
mark = PruneMark.create_remove('max_amount exceeded')
elif 0 < settings.max_lifetime.value_nano < (now - backup_info.timestamp_ns):
mark = PruneMark.create_remove('max_lifetime exceeded')
plan_list.append(PrunePlanItem(backup_info, mark))
if mark.keep:
regular_keep_count += 1
return plan_list
def __msg_header(self) -> RTextBase:
return RTextList('(', self.what_to_prune, ') ').set_color(RColor.gray)
def reply(self, msg: Union[str, RTextBase], *, with_prefix: bool = True):
if self.what_to_prune is not None:
msg = self.__msg_header() + msg
super().reply(msg, with_prefix=with_prefix)
def run(self) -> PruneBackupResult:
backups = ListBackupAction(backup_filter=self.backup_filter).run()
backup_ids = {backup.id for backup in backups}
timezone: Optional[datetime.tzinfo] = None
if (timezone_override := self.config.prune.timezone_override) is not None:
try:
timezone = pytz.timezone(timezone_override)
except pytz.UnknownTimeZoneError as e:
self.logger.error('Bad timezone override from config, using local timezone: {}'.format(e))
else:
timezone = None
plan_list = self.calc_prune_backups(backups, self.setting, timezone=timezone)
for pl in plan_list:
misc_utils.assert_true(pl.backup.id in backup_ids, lambda: 'unexpected backup id {}, {}'.format(pl.backup.id, backup_ids))
result = PruneBackupResult(plan_list)
with log_utils.open_file_logger('prune') as prune_logger:
prune_logger.info('Prune started')
to_deleted_ids = [pl.backup.id for pl in plan_list if not pl.mark.keep]
if len(to_deleted_ids) == 0:
if self.verbose >= _PruneVerbose.all:
self.reply_tr('nothing_to_prune')
prune_logger.info('Nothing to prune')
return result
prune_logger.info('============== Prune calculate result start ==============')
for pl in plan_list:
prune_logger.info('Backup #{} at {}: keep={} reason={}'.format(pl.backup.id, pl.backup.date_str, pl.mark.keep, pl.mark.reason))
prune_logger.info('============== Prune calculate result end ==============')
if self.verbose >= _PruneVerbose.delete:
self.reply_tr(
'list_to_be_pruned',
TextComponents.number(len(to_deleted_ids)),
TextComponents.backup_id_list(to_deleted_ids, hover=False, click=False),
)
for pl in plan_list:
bid = pl.backup.id
if self.aborted_event.is_set():
if self.verbose >= _PruneVerbose.delete:
self.reply(self.get_aborted_text())
break
if not pl.mark.keep:
self.reply_tr('prune', TextComponents.backup_id(bid, hover=False, click=False))
try:
dr = DeleteBackupAction(bid).run()
except Exception as e: | if isinstance(e, BackupNotFound): | 3 | 2023-11-28 19:03:36+00:00 | 12k |
TACJu/MaXTron | MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/ReductionCell.py | [
{
"identifier": "Token_transformer",
"path": "MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/token_transformer.py",
"snippet": "class Token_transformer(nn.Module):\n\n def __init__(self, dim, in_dim, num_heads, mlp_ratio=1., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim, in_dim=in_dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(in_dim)\n self.mlp = Mlp(in_features=in_dim, hidden_features=int(in_dim*mlp_ratio), out_features=in_dim, act_layer=act_layer, drop=drop)\n\n def forward(self, x):\n x = self.attn(self.norm1(x))\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x"
},
{
"identifier": "Token_performer",
"path": "MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/token_performer.py",
"snippet": "class Token_performer(nn.Module):\n def __init__(self, dim, in_dim, head_cnt=1, kernel_ratio=0.5, dp1=0.1, dp2 = 0.1, gamma=False, init_values=1e-4):\n super().__init__()\n self.head_dim = in_dim // head_cnt\n self.emb = in_dim\n self.kqv = nn.Linear(dim, 3 * self.emb)\n self.dp = nn.Dropout(dp1)\n self.proj = nn.Linear(self.emb, self.emb)\n self.head_cnt = head_cnt\n self.norm1 = nn.LayerNorm(dim)\n self.norm2 = nn.LayerNorm(self.emb)\n self.epsilon = 1e-8 # for stable in division\n self.drop_path = nn.Identity()\n\n self.mlp = nn.Sequential(\n nn.Linear(self.emb, 1 * self.emb),\n nn.GELU(),\n nn.Linear(1 * self.emb, self.emb),\n nn.Dropout(dp2),\n )\n\n self.m = int(self.head_dim * kernel_ratio)\n self.w = torch.randn(head_cnt, self.m, self.head_dim)\n for i in range(self.head_cnt):\n self.w[i] = nn.Parameter(nn.init.orthogonal_(self.w[i]) * math.sqrt(self.m), requires_grad=False)\n self.w.requires_grad_(False)\n\n if gamma:\n self.gamma1 = nn.Parameter(init_values * torch.ones((self.emb)),requires_grad=True)\n else:\n self.gamma1 = 1\n\n def prm_exp(self, x):\n # part of the function is borrow from https://github.com/lucidrains/performer-pytorch \n # and Simo Ryu (https://github.com/cloneofsimo)\n # ==== positive random features for gaussian kernels ====\n # x = (B, H, N, hs)\n # w = (H, m, hs)\n # return : x : B, T, m\n # SM(x, y) = E_w[exp(w^T x - |x|/2) exp(w^T y - |y|/2)]\n # therefore return exp(w^Tx - |x|/2)/sqrt(m)\n xd = ((x * x).sum(dim=-1, keepdim=True)).repeat(1, 1, 1, self.m) / 2\n wtx = torch.einsum('bhti,hmi->bhtm', x.float(), self.w.to(x.device))\n\n return torch.exp(wtx - xd) / math.sqrt(self.m)\n\n def attn(self, x):\n B, N, C = x.shape\n kqv = self.kqv(x).reshape(B, N, 3, self.head_cnt, self.head_dim).permute(2, 0, 3, 1, 4)\n k, q, v = kqv[0], kqv[1], kqv[2] # (B, H, T, hs)\n\n kp, qp = self.prm_exp(k), self.prm_exp(q) # (B, H, T, m), (B, H, T, m)\n D = torch.einsum('bhti,bhi->bht', qp, kp.sum(dim=2)).unsqueeze(dim=-1) # (B, H, T, m) * (B, H, m) -> (B, H, T, 1)\n kptv = torch.einsum('bhin,bhim->bhnm', v.float(), kp) # (B, H, emb, m)\n y = torch.einsum('bhti,bhni->bhtn', qp, kptv) / (D.repeat(1, 1, 1, self.head_dim) + self.epsilon) # (B, H, T, emb)/Diag\n\n # skip connection\n\n y = y.permute(0, 2, 1, 3).reshape(B, N, self.emb)\n v = v.permute(0, 2, 1, 3).reshape(B, N, self.emb)\n\n y = v + self.dp(self.gamma1 * self.proj(y)) # same as token_transformer, use v as skip connection\n\n return y\n\n def forward(self, x):\n x = self.attn(self.norm1(x))\n x = x + self.mlp(self.norm2(x))\n return x"
},
{
"identifier": "WindowTransformerBlock",
"path": "MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/window.py",
"snippet": "class WindowTransformerBlock(nn.Module):\n r\"\"\" Swin Transformer Block.\n\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n shift_size (int): Shift size for SW-MSA.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, in_dim, out_dim, input_resolution, num_heads, window_size=7, shift_size=0,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n relative_pos=True, act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.in_dim = in_dim\n self.dim = out_dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.window_size = window_size\n self.shift_size = shift_size\n self.mlp_ratio = mlp_ratio\n self.relative_pos = relative_pos\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, we don't partition windows\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0-window_size\"\n\n self.norm1 = norm_layer(in_dim)\n self.attn = WindowAttention(\n in_dim=in_dim, out_dim=out_dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,\n qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, relative_pos=relative_pos)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(out_dim)\n mlp_hidden_dim = int(out_dim * mlp_ratio)\n self.mlp = Mlp(in_features=out_dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n if self.shift_size > 0:\n # calculate attention mask for SW-MSA\n H, W = self.input_resolution\n img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1\n mask_windows = mask_windows.reshape(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n\n self.register_buffer(\"attn_mask\", attn_mask)\n\n def forward(self, x):\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n\n shortcut = x\n x = self.norm1(x)\n x = x.reshape(B, H, W, C)\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n else:\n shifted_x = x\n\n # partition windows\n x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C\n x_windows = x_windows.reshape(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C\n\n # W-MSA/SW-MSA\n attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C\n\n # merge windows\n attn_windows = attn_windows.reshape(-1, self.window_size, self.window_size, C)\n shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C\n\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n else:\n x = shifted_x\n x = x.reshape(B, H * W, C)\n\n # FFN\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, \" \\\n f\"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}\""
},
{
"identifier": "VSAWindowAttention",
"path": "MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/window.py",
"snippet": "class VSAWindowAttention(nn.Module):\n def __init__(self, dim, num_heads, out_dim=None, window_size=1, qkv_bias=True, qk_scale=None, \n attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = num_heads\n self.dim = dim\n self.out_dim = out_dim or dim\n self.relative_pos_embedding = True\n head_dim = dim // self.num_heads\n self.ws = window_size\n\n self.sampling_offsets = nn.Sequential(\n nn.AvgPool2d(kernel_size=window_size, stride=window_size),\n nn.LeakyReLU(), \n nn.Conv2d(dim, self.num_heads * 2, kernel_size=1, stride=1)\n )\n self.sampling_scales = nn.Sequential(\n nn.AvgPool2d(kernel_size=window_size, stride=window_size), \n nn.LeakyReLU(), \n nn.Conv2d(dim, self.num_heads * 2, kernel_size=1, stride=1)\n )\n\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Conv2d(dim, out_dim * 3, 1, bias=qkv_bias)\n\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Conv2d(out_dim, out_dim, 1)\n self.proj_drop = nn.Dropout(proj_drop)\n\n if self.relative_pos_embedding:\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((window_size + window_size - 1) * (window_size + window_size - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # get pair-wise relative position index for each token inside the window\n coords_h = torch.arange(self.ws)\n coords_w = torch.arange(self.ws)\n coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww\n coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww\n relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww\n relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2\n relative_coords[:, :, 0] += self.ws - 1 # shift to start from 0\n relative_coords[:, :, 1] += self.ws - 1\n relative_coords[:, :, 0] *= 2 * self.ws - 1\n relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww\n self.register_buffer(\"relative_position_index\", relative_position_index)\n\n trunc_normal_(self.relative_position_bias_table, std=.02)\n print('The relative_pos_embedding is used')\n\n def forward(self, x, coords=None):\n b, _, h, w = x.shape\n shortcut = x\n padding_td = (self.ws - h % self.ws) % self.ws\n padding_lr = (self.ws - w % self.ws) % self.ws\n padding_top = padding_td // 2\n padding_down = padding_td - padding_top\n padding_left = padding_lr // 2\n padding_right = padding_lr - padding_left\n expand_h, expand_w = h+padding_top+padding_down, w+padding_left+padding_right\n window_num_h = expand_h // self.ws\n window_num_w = expand_w // self.ws\n image_reference_h = torch.linspace(-1, 1, expand_h).to(x.device)\n image_reference_w = torch.linspace(-1, 1, expand_w).to(x.device)\n image_reference = torch.stack(torch.meshgrid(image_reference_w, image_reference_h), 0).permute(0, 2, 1).unsqueeze(0) # 2, h, w\n window_reference = nn.functional.avg_pool2d(image_reference, kernel_size=self.ws)\n image_reference = image_reference.reshape(1, 2, window_num_h, self.ws, window_num_w, self.ws)\n assert window_num_h == window_reference.shape[-2]\n assert window_num_w == window_reference.shape[-1]\n window_reference = window_reference.reshape(1, 2, window_num_h, 1, window_num_w, 1)\n\n base_coords_h = torch.arange(self.ws).to(x.device) * 2 * self.ws / self.ws / (expand_h-1)\n base_coords_h = (base_coords_h - base_coords_h.mean())\n base_coords_w = torch.arange(self.ws).to(x.device) * 2 * self.ws / self.ws / (expand_w-1)\n base_coords_w = (base_coords_w - base_coords_w.mean())\n # base_coords = torch.stack(torch.meshgrid(base_coords_w, base_coords_h), 0).permute(0, 2, 1).reshape(1, 2, 1, self.ws, 1, self.ws)\n\n expanded_base_coords_h = base_coords_h.unsqueeze(dim=0).repeat(window_num_h, 1)\n assert expanded_base_coords_h.shape[0] == window_num_h\n assert expanded_base_coords_h.shape[1] == self.ws\n expanded_base_coords_w = base_coords_w.unsqueeze(dim=0).repeat(window_num_w, 1)\n assert expanded_base_coords_w.shape[0] == window_num_w\n assert expanded_base_coords_w.shape[1] == self.ws\n expanded_base_coords_h = expanded_base_coords_h.reshape(-1)\n expanded_base_coords_w = expanded_base_coords_w.reshape(-1)\n window_coords = torch.stack(torch.meshgrid(expanded_base_coords_w, expanded_base_coords_h), 0).permute(0, 2, 1).reshape(1, 2, window_num_h, self.ws, window_num_w, self.ws)\n # base_coords = window_reference+window_coords\n base_coords = image_reference\n\n x = torch.nn.functional.pad(x, (padding_left, padding_right, padding_top, padding_down))\n\n coords = base_coords.repeat(b*self.num_heads, 1, 1, 1, 1, 1)\n sampling_offsets = self.sampling_offsets(x)\n num_predict_total = b * self.num_heads\n sampling_offsets = sampling_offsets.reshape(num_predict_total, 2, window_num_h, window_num_w)\n sampling_offsets[:, 0, ...] = sampling_offsets[:, 0, ...] / (h // self.ws)\n sampling_offsets[:, 1, ...] = sampling_offsets[:, 1, ...] / (w // self.ws)\n \n sampling_scales = self.sampling_scales(x) #B, heads*2, h // window_size, w // window_size\n sampling_scales = sampling_scales.reshape(num_predict_total, 2, window_num_h, window_num_w)\n \n coords = coords + window_coords * sampling_scales[:, :, :, None, :, None] + sampling_offsets[:, :, :, None, :, None]\n sample_coords = coords.permute(0, 2, 3, 4, 5, 1).reshape(num_predict_total, self.ws*window_num_h, self.ws*window_num_w, 2)\n\n qkv = self.qkv(shortcut).reshape(b, 3, self.num_heads, self.out_dim // self.num_heads, h, w).transpose(1, 0).reshape(3*b*self.num_heads, self.out_dim // self.num_heads, h, w)\n qkv = torch.nn.functional.pad(qkv, (padding_left, padding_right, padding_top, padding_down)).reshape(3, b*self.num_heads, self.out_dim // self.num_heads, h+padding_td, w+padding_lr)\n q, k, v = qkv[0], qkv[1], qkv[2]\n k_selected = F.grid_sample(k, grid=sample_coords, padding_mode='zeros', align_corners=True)\n v_selected = F.grid_sample(v, grid=sample_coords, padding_mode='zeros', align_corners=True)\n\n q = q.reshape(b, self.num_heads, self.out_dim//self.num_heads, window_num_h, self.ws, window_num_w, self.ws).permute(0, 3, 5, 1, 4, 6, 2).reshape(b*window_num_h*window_num_w, self.num_heads, self.ws*self.ws, self.out_dim//self.num_heads)\n k = k_selected.reshape(b, self.num_heads, self.out_dim//self.num_heads, window_num_h, self.ws, window_num_w, self.ws).permute(0, 3, 5, 1, 4, 6, 2).reshape(b*window_num_h*window_num_w, self.num_heads, self.ws*self.ws, self.out_dim//self.num_heads)\n v = v_selected.reshape(b, self.num_heads, self.out_dim//self.num_heads, window_num_h, self.ws, window_num_w, self.ws).permute(0, 3, 5, 1, 4, 6, 2).reshape(b*window_num_h*window_num_w, self.num_heads, self.ws*self.ws, self.out_dim//self.num_heads)\n\n dots = (q @ k.transpose(-2, -1)) * self.scale\n\n if self.relative_pos_embedding:\n relative_position_bias = self.relative_position_bias_table[self.relative_position_index.reshape(-1)].reshape(\n self.ws * self.ws, self.ws * self.ws, -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n dots += relative_position_bias.unsqueeze(0)\n\n attn = dots.softmax(dim=-1)\n out = attn @ v\n\n out = rearrange(out, '(b hh ww) h (ws1 ws2) d -> b (h d) (hh ws1) (ww ws2)', h=self.num_heads, b=b, hh=window_num_h, ww=window_num_w, ws1=self.ws, ws2=self.ws)\n out = out[:, :, padding_top:h+padding_top, padding_left:w+padding_left]\n \n out = self.proj(out)\n out = self.proj_drop(out)\n\n return out\n \n def _clip_grad(self, grad_norm):\n # print('clip grads of the model for selection')\n nn.utils.clip_grad_norm_(self.sampling_offsets.parameters(), grad_norm)\n nn.utils.clip_grad_norm_(self.sampling_scales.parameters(), grad_norm)\n\n def _reset_parameters(self):\n nn.init.constant_(self.sampling_offsets[-1].weight, 0.)\n nn.init.constant_(self.sampling_offsets[-1].bias, 0.)\n nn.init.constant_(self.sampling_scales[-1].weight, 0.)\n nn.init.constant_(self.sampling_scales[-1].bias, 0.)"
},
{
"identifier": "window_partition",
"path": "MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/window.py",
"snippet": "def window_partition(x, window_size):\n \"\"\"\n Args:\n x: (B, H, W, C)\n window_size (int): window size\n\n Returns:\n windows: (num_windows*B, window_size, window_size, C)\n \"\"\"\n B, H, W, C = x.shape\n x = x.reshape(B, H // window_size, window_size, W // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, C)\n return windows"
},
{
"identifier": "window_reverse",
"path": "MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/window.py",
"snippet": "def window_reverse(windows, window_size, H, W):\n \"\"\"\n Args:\n windows: (num_windows*B, window_size, window_size, C)\n window_size (int): Window size\n H (int): Height of image\n W (int): Width of image\n\n Returns:\n x: (B, H, W, C)\n \"\"\"\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.reshape(B, H // window_size, W // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(B, H, W, -1)\n return x"
},
{
"identifier": "Mlp",
"path": "MaXTron_Tube-Link/mmdet/models/backbones/vitaev2_vsa_modules/window.py",
"snippet": "class Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x"
}
] | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from numpy.core.fromnumeric import resize, shape
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from .token_transformer import Token_transformer
from .token_performer import Token_performer
from .window import WindowTransformerBlock, VSAWindowAttention, window_partition, window_reverse, Mlp | 7,869 | self.kernel_size = kernel_size
self.stride = downsample_ratio
self.share_weights = share_weights
self.outSize = img_size // downsample_ratio
if share_weights:
self.convolution = nn.Conv2d(in_channels=in_chans, out_channels=embed_dim, kernel_size=self.kernel_size, \
stride=self.stride, padding=3*dilations[0]//2, dilation=dilations[0])
else:
self.convs = nn.ModuleList()
for dilation in self.dilations:
padding = math.ceil(((self.kernel_size-1)*dilation + 1 - self.stride) / 2)
if img_size % downsample_ratio != 0:
padding += 1
self.convs.append(nn.Sequential(*[nn.Conv2d(in_channels=in_chans, out_channels=embed_dim, kernel_size=self.kernel_size, \
stride=self.stride, padding=padding, dilation=dilation),
nn.GELU()]))
if self.op == 'sum':
self.out_chans = embed_dim
elif op == 'cat':
self.out_chans = embed_dim * len(self.dilations)
def forward(self, x):
B, C, W, H = x.shape
if self.share_weights:
padding = math.ceil(((self.kernel_size-1)*self.dilations[0] + 1 - self.stride) / 2)
y = nn.functional.conv2d(x, weight=self.convolution.weight, bias=self.convolution.bias, \
stride=self.downsample_ratio, padding=padding, dilation=self.dilations[0]).unsqueeze(dim=-1)
for i in range(1, len(self.dilations)):
padding = math.ceil(((self.kernel_size-1)*self.dilations[i] + 1 - self.stride) / 2)
_y = nn.functional.conv2d(x, weight=self.convolution.weight, bias=self.convolution.bias, \
stride=self.downsample_ratio, padding=padding, dilation=self.dilations[i]).unsqueeze(dim=-1)
y = torch.cat((y, _y), dim=-1)
else:
y = self.convs[0](x).unsqueeze(dim=-1)
for i in range(1, len(self.dilations)):
_y = self.convs[i](x).unsqueeze(dim=-1)
y = torch.cat((y, _y), dim=-1)
B, C, W, H, N = y.shape
if self.op == 'sum':
y = y.sum(dim=-1).flatten(2).permute(0,2,1).contiguous()
elif self.op == 'cat':
y = y.permute(0,4,1,2,3).flatten(3).reshape(B, N*C, W*H).permute(0,2,1).contiguous()
else:
raise NotImplementedError('no such operation: {} for multi-levels!'.format(self.op))
return y, (W, H)
class ReductionCell(nn.Module):
def __init__(self, img_size=224, in_chans=3, embed_dims=64, wide_pcm=False, token_dims=64, downsample_ratios=4, kernel_size=7,
num_heads=1, dilations=[1,2,3,4], share_weights=False, op='cat', tokens_type='performer', group=1,
relative_pos=False, cpe=False, drop=0., attn_drop=0., drop_path=0., mlp_ratio=1.0, window_size=7, num_deform=None):
super().__init__()
self.img_size = img_size
self.window_size = window_size
self.op = op
self.dilations = dilations
self.num_heads = num_heads
self.embed_dims = embed_dims
self.token_dims = token_dims
self.in_chans = in_chans
self.downsample_ratios = downsample_ratios
self.kernel_size = kernel_size
self.outSize = img_size
self.relative_pos = relative_pos
self.cpe = cpe
PCMStride = []
residual = downsample_ratios // 2
for _ in range(3):
PCMStride.append((residual > 0) + 1)
residual = residual // 2
assert residual == 0
self.pool = None
self.tokens_type = tokens_type
if tokens_type == 'pooling':
PCMStride = [1, 1, 1]
self.pool = nn.MaxPool2d(downsample_ratios, stride=downsample_ratios, padding=0)
tokens_type = 'transformer'
self.outSize = self.outSize // downsample_ratios
downsample_ratios = 1
if not wide_pcm:
self.PCM = nn.Sequential(
nn.Conv2d(in_chans, embed_dims, kernel_size=(3, 3), stride=PCMStride[0], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(embed_dims),
nn.SiLU(inplace=True),
nn.Conv2d(embed_dims, embed_dims, kernel_size=(3, 3), stride=PCMStride[1], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(embed_dims),
nn.SiLU(inplace=True),
nn.Conv2d(embed_dims, token_dims, kernel_size=(3, 3), stride=PCMStride[2], padding=(1, 1), groups=group), # the 1st convolution
)
else:
self.PCM = nn.Sequential(
nn.Conv2d(in_chans, token_dims*2, kernel_size=(3, 3), stride=PCMStride[0], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(token_dims*2),
nn.SiLU(inplace=True),
nn.Conv2d(token_dims*2, token_dims*2, kernel_size=(3, 3), stride=PCMStride[1], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(token_dims*2),
nn.SiLU(inplace=True),
nn.Conv2d(token_dims*2, token_dims, kernel_size=(3, 3), stride=PCMStride[2], padding=(1, 1), groups=group), # the 1st convolution
)
self.PRM = PRM(img_size=img_size, kernel_size=kernel_size, downsample_ratio=downsample_ratios, dilations=self.dilations,
in_chans=in_chans, embed_dim=embed_dims, share_weights=share_weights, op=op)
self.outSize = self.outSize // downsample_ratios
in_chans = self.PRM.out_chans
if tokens_type == 'performer':
# assert num_heads == 1
self.attn = Token_performer(dim=in_chans, in_dim=token_dims, head_cnt=num_heads, kernel_ratio=0.5)
elif tokens_type == 'performer_less':
self.attn = None
self.PCM = None
elif tokens_type == 'transformer':
self.attn = Token_transformer(dim=in_chans, in_dim=token_dims, num_heads=num_heads, mlp_ratio=mlp_ratio, drop=drop,
attn_drop=attn_drop, drop_path=drop_path)
elif tokens_type == 'window':
|
class PRM(nn.Module):
def __init__(self, img_size=224, kernel_size=4, downsample_ratio=4, dilations=[1,6,12], in_chans=3, embed_dim=64, share_weights=False, op='cat'):
super().__init__()
self.dilations = dilations
self.embed_dim = embed_dim
self.downsample_ratio = downsample_ratio
self.op = op
self.kernel_size = kernel_size
self.stride = downsample_ratio
self.share_weights = share_weights
self.outSize = img_size // downsample_ratio
if share_weights:
self.convolution = nn.Conv2d(in_channels=in_chans, out_channels=embed_dim, kernel_size=self.kernel_size, \
stride=self.stride, padding=3*dilations[0]//2, dilation=dilations[0])
else:
self.convs = nn.ModuleList()
for dilation in self.dilations:
padding = math.ceil(((self.kernel_size-1)*dilation + 1 - self.stride) / 2)
if img_size % downsample_ratio != 0:
padding += 1
self.convs.append(nn.Sequential(*[nn.Conv2d(in_channels=in_chans, out_channels=embed_dim, kernel_size=self.kernel_size, \
stride=self.stride, padding=padding, dilation=dilation),
nn.GELU()]))
if self.op == 'sum':
self.out_chans = embed_dim
elif op == 'cat':
self.out_chans = embed_dim * len(self.dilations)
def forward(self, x):
B, C, W, H = x.shape
if self.share_weights:
padding = math.ceil(((self.kernel_size-1)*self.dilations[0] + 1 - self.stride) / 2)
y = nn.functional.conv2d(x, weight=self.convolution.weight, bias=self.convolution.bias, \
stride=self.downsample_ratio, padding=padding, dilation=self.dilations[0]).unsqueeze(dim=-1)
for i in range(1, len(self.dilations)):
padding = math.ceil(((self.kernel_size-1)*self.dilations[i] + 1 - self.stride) / 2)
_y = nn.functional.conv2d(x, weight=self.convolution.weight, bias=self.convolution.bias, \
stride=self.downsample_ratio, padding=padding, dilation=self.dilations[i]).unsqueeze(dim=-1)
y = torch.cat((y, _y), dim=-1)
else:
y = self.convs[0](x).unsqueeze(dim=-1)
for i in range(1, len(self.dilations)):
_y = self.convs[i](x).unsqueeze(dim=-1)
y = torch.cat((y, _y), dim=-1)
B, C, W, H, N = y.shape
if self.op == 'sum':
y = y.sum(dim=-1).flatten(2).permute(0,2,1).contiguous()
elif self.op == 'cat':
y = y.permute(0,4,1,2,3).flatten(3).reshape(B, N*C, W*H).permute(0,2,1).contiguous()
else:
raise NotImplementedError('no such operation: {} for multi-levels!'.format(self.op))
return y, (W, H)
class ReductionCell(nn.Module):
def __init__(self, img_size=224, in_chans=3, embed_dims=64, wide_pcm=False, token_dims=64, downsample_ratios=4, kernel_size=7,
num_heads=1, dilations=[1,2,3,4], share_weights=False, op='cat', tokens_type='performer', group=1,
relative_pos=False, cpe=False, drop=0., attn_drop=0., drop_path=0., mlp_ratio=1.0, window_size=7, num_deform=None):
super().__init__()
self.img_size = img_size
self.window_size = window_size
self.op = op
self.dilations = dilations
self.num_heads = num_heads
self.embed_dims = embed_dims
self.token_dims = token_dims
self.in_chans = in_chans
self.downsample_ratios = downsample_ratios
self.kernel_size = kernel_size
self.outSize = img_size
self.relative_pos = relative_pos
self.cpe = cpe
PCMStride = []
residual = downsample_ratios // 2
for _ in range(3):
PCMStride.append((residual > 0) + 1)
residual = residual // 2
assert residual == 0
self.pool = None
self.tokens_type = tokens_type
if tokens_type == 'pooling':
PCMStride = [1, 1, 1]
self.pool = nn.MaxPool2d(downsample_ratios, stride=downsample_ratios, padding=0)
tokens_type = 'transformer'
self.outSize = self.outSize // downsample_ratios
downsample_ratios = 1
if not wide_pcm:
self.PCM = nn.Sequential(
nn.Conv2d(in_chans, embed_dims, kernel_size=(3, 3), stride=PCMStride[0], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(embed_dims),
nn.SiLU(inplace=True),
nn.Conv2d(embed_dims, embed_dims, kernel_size=(3, 3), stride=PCMStride[1], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(embed_dims),
nn.SiLU(inplace=True),
nn.Conv2d(embed_dims, token_dims, kernel_size=(3, 3), stride=PCMStride[2], padding=(1, 1), groups=group), # the 1st convolution
)
else:
self.PCM = nn.Sequential(
nn.Conv2d(in_chans, token_dims*2, kernel_size=(3, 3), stride=PCMStride[0], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(token_dims*2),
nn.SiLU(inplace=True),
nn.Conv2d(token_dims*2, token_dims*2, kernel_size=(3, 3), stride=PCMStride[1], padding=(1, 1), groups=group), # the 1st convolution
nn.BatchNorm2d(token_dims*2),
nn.SiLU(inplace=True),
nn.Conv2d(token_dims*2, token_dims, kernel_size=(3, 3), stride=PCMStride[2], padding=(1, 1), groups=group), # the 1st convolution
)
self.PRM = PRM(img_size=img_size, kernel_size=kernel_size, downsample_ratio=downsample_ratios, dilations=self.dilations,
in_chans=in_chans, embed_dim=embed_dims, share_weights=share_weights, op=op)
self.outSize = self.outSize // downsample_ratios
in_chans = self.PRM.out_chans
if tokens_type == 'performer':
# assert num_heads == 1
self.attn = Token_performer(dim=in_chans, in_dim=token_dims, head_cnt=num_heads, kernel_ratio=0.5)
elif tokens_type == 'performer_less':
self.attn = None
self.PCM = None
elif tokens_type == 'transformer':
self.attn = Token_transformer(dim=in_chans, in_dim=token_dims, num_heads=num_heads, mlp_ratio=mlp_ratio, drop=drop,
attn_drop=attn_drop, drop_path=drop_path)
elif tokens_type == 'window': | self.attn = WindowTransformerBlock(in_dim=in_chans, out_dim=token_dims, input_resolution=(self.img_size//self.downsample_ratios, self.img_size//self.downsample_ratios), | 2 | 2023-12-01 20:08:54+00:00 | 12k |
navervision/lincir | validate.py | [
{
"identifier": "collate_fn",
"path": "data_utils.py",
"snippet": "def collate_fn(batch):\n '''\n function which discard None images in a batch when using torch DataLoader\n :param batch: input_batch\n :return: output_batch = input_batch - None_values\n '''\n batch = list(filter(lambda x: x is not None, batch))\n return torch.utils.data.dataloader.default_collate(batch)"
},
{
"identifier": "PROJECT_ROOT",
"path": "data_utils.py",
"snippet": "PROJECT_ROOT = Path(__file__).absolute().parents[1].absolute()"
},
{
"identifier": "targetpad_transform",
"path": "data_utils.py",
"snippet": "def targetpad_transform(target_ratio: float, dim: int) -> torch.Tensor:\n \"\"\"\n CLIP-like preprocessing transform computed after using TargetPad pad\n :param target_ratio: target ratio for TargetPad\n :param dim: image output dimension\n :return: CLIP-like torchvision Compose transform\n \"\"\"\n return Compose([\n TargetPad(target_ratio, dim),\n Resize(dim, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(dim),\n _convert_image_to_rgb,\n ToTensor(),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n ])"
},
{
"identifier": "FashionIQDataset",
"path": "loader.py",
"snippet": "class FashionIQDataset(Dataset):\n \"\"\"\n Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/datasets.py\n FashionIQ dataset class for PyTorch.\n The dataset can be used in 'relative' or 'classic' mode:\n - In 'classic' mode the dataset yield :a dict with keys ['image', 'image_name']\n - In 'relative' mode the dataset yield dict with keys:\n - ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_captions'] when\n split in ['train', 'val']\n - ['reference_image', 'reference_name', 'relative_captions'] when split == test\n \"\"\"\n\n def __init__(self, dataset_path: Union[Path, str], split: Literal['train', 'val', 'test'], dress_types: List[str],\n mode: Literal['relative', 'classic'], preprocess: callable, no_duplicates: Optional[bool] = False):\n \"\"\"\n :param dataset_path: path to the FashionIQ dataset\n :param split: dataset split, should be in ['train, 'val', 'test']\n :param dress_types: list of fashionIQ categories, each category should be in ['dress', 'shirt', 'toptee']\n :param mode: dataset mode, should be in ['relative', 'classic']:\n - In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']\n - In 'relative' mode the dataset yield dict with keys:\n - ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_captions']\n when split in ['train', 'val']\n - ['reference_image', 'reference_name', 'relative_captions'] when split == test\n :param preprocess: function which preprocesses the image\n :param no_duplicates: if True, the dataset will not yield duplicate images in relative mode, does not affect classic mode\n \"\"\"\n dataset_path = Path(dataset_path)\n self.dataset_path = dataset_path\n self.mode = mode\n self.dress_types = dress_types\n self.split = split\n self.no_duplicates = no_duplicates\n\n # Validate the inputs\n if mode not in ['relative', 'classic']:\n raise ValueError(\"mode should be in ['relative', 'classic']\")\n if split not in ['test', 'train', 'val']:\n raise ValueError(\"split should be in ['test', 'train', 'val']\")\n for dress_type in dress_types:\n if dress_type not in ['dress', 'shirt', 'toptee']:\n raise ValueError(\"dress_type should be in ['dress', 'shirt', 'toptee']\")\n\n self.preprocess = preprocess\n\n # get triplets made by (reference_image, target_image, a pair of relative captions)\n self.triplets: List[dict] = []\n for dress_type in dress_types:\n with open(dataset_path / 'captions' / f'cap.{dress_type}.{split}.json') as f:\n self.triplets.extend(json.load(f))\n\n # Remove duplicats from\n if self.no_duplicates:\n seen = set()\n new_triplets = []\n for triplet in self.triplets:\n if triplet['candidate'] not in seen:\n seen.add(triplet['candidate'])\n new_triplets.append(triplet)\n self.triplets = new_triplets\n\n # get the image names\n self.image_names: list = []\n for dress_type in dress_types:\n with open(dataset_path / 'image_splits' / f'split.{dress_type}.{split}.json') as f:\n self.image_names.extend(json.load(f))\n\n print(f\"FashionIQ {split} - {dress_types} dataset in {mode} mode initialized\")\n\n def __getitem__(self, index) -> dict:\n try:\n if self.mode == 'relative':\n relative_captions = self.triplets[index]['captions']\n reference_name = self.triplets[index]['candidate']\n\n if self.split in ['train', 'val']:\n reference_image_path = self.dataset_path / 'images' / f\"{reference_name}.jpg\"\n reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]\n target_name = self.triplets[index]['target']\n target_image_path = self.dataset_path / 'images' / f\"{target_name}.jpg\"\n target_image = self.preprocess(PIL.Image.open(target_image_path), return_tensors='pt')['pixel_values'][0]\n\n return {\n 'reference_image': reference_image,\n 'reference_name': reference_name,\n 'target_image': target_image,\n 'target_name': target_name,\n 'relative_captions': relative_captions\n }\n\n elif self.split == 'test':\n reference_image_path = self.dataset_path / 'images' / f\"{reference_name}.jpg\"\n reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]\n\n return {\n 'reference_image': reference_image,\n 'reference_name': reference_name,\n 'relative_captions': relative_captions\n }\n\n elif self.mode == 'classic':\n image_name = self.image_names[index]\n image_path = self.dataset_path / 'images' / f\"{image_name}.jpg\"\n image = self.preprocess(PIL.Image.open(image_path), return_tensors='pt')['pixel_values'][0]\n\n return {\n 'image': image,\n 'image_name': image_name\n }\n\n else:\n raise ValueError(\"mode should be in ['relative', 'classic']\")\n except Exception as e:\n print(f\"Exception: {e}\")\n\n def __len__(self):\n if self.mode == 'relative':\n return len(self.triplets)\n elif self.mode == 'classic':\n return len(self.image_names)\n else:\n raise ValueError(\"mode should be in ['relative', 'classic']\")"
},
{
"identifier": "CIRRDataset",
"path": "loader.py",
"snippet": "class CIRRDataset(Dataset):\n \"\"\"\n Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/datasets.py\n CIRR dataset class for PyTorch dataloader.\n The dataset can be used in 'relative' or 'classic' mode:\n - In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']\n - In 'relative' mode the dataset yield dict with keys:\n - ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_caption', 'group_members']\n when split in ['train', 'val']\n - ['reference_image', 'reference_name' 'relative_caption', 'group_members', 'pair_id'] when split == test\n \"\"\"\n\n def __init__(self, dataset_path: Union[Path, str], split: Literal['train', 'val', 'test'],\n mode: Literal['relative', 'classic'], preprocess: callable, no_duplicates: Optional[bool] = False):\n \"\"\"\n :param dataset_path: path to the CIRR dataset\n :param split: dataset split, should be in ['train', 'val', 'test']\n :param mode: dataset mode, should be in ['relative', 'classic']:\n - In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']\n - In 'relative' mode the dataset yield dict with keys:\n - ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_caption',\n 'group_members'] when split in ['train', 'val']\n - ['reference_image', 'reference_name' 'relative_caption', 'group_members', 'pair_id'] when split == test\n :param preprocess: function which preprocesses the image\n :param no_duplicates: if True, the dataset will not yield duplicate images in relative mode, does not affect classic mode\n \"\"\"\n dataset_path = Path(dataset_path)\n self.dataset_path = dataset_path\n self.preprocess = preprocess\n self.mode = mode\n self.split = split\n self.no_duplicates = no_duplicates\n\n if split == \"test\":\n split = \"test1\"\n self.split = \"test1\"\n\n # Validate inputs\n if split not in ['test1', 'train', 'val']:\n raise ValueError(\"split should be in ['test1', 'train', 'val']\")\n if mode not in ['relative', 'classic']:\n raise ValueError(\"mode should be in ['relative', 'classic']\")\n\n # get triplets made by (reference_image, target_image, relative caption)\n with open(dataset_path / 'cirr' / 'captions' / f'cap.rc2.{split}.json') as f:\n self.triplets = json.load(f)\n\n # Remove duplicates from triplets\n if self.no_duplicates:\n seen = set()\n new_triplets = []\n for triplet in self.triplets:\n if triplet['reference'] not in seen:\n seen.add(triplet['reference'])\n new_triplets.append(triplet)\n self.triplets = new_triplets\n\n # get a mapping from image name to relative path\n with open(dataset_path / 'cirr' / 'image_splits' / f'split.rc2.{split}.json') as f:\n self.name_to_relpath = json.load(f)\n\n print(f\"CIRR {split} dataset in {mode} mode initialized\")\n\n def __getitem__(self, index) -> dict:\n try:\n if self.mode == 'relative':\n group_members = self.triplets[index]['img_set']['members']\n reference_name = self.triplets[index]['reference']\n relative_caption = self.triplets[index]['caption']\n\n if self.split in ['train', 'val']:\n reference_image_path = self.dataset_path / self.name_to_relpath[reference_name]\n reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]\n target_hard_name = self.triplets[index]['target_hard']\n target_image_path = self.dataset_path / self.name_to_relpath[target_hard_name]\n target_image = self.preprocess(PIL.Image.open(target_image_path), return_tensors='pt')['pixel_values'][0]\n\n return {\n 'reference_image': reference_image,\n 'reference_name': reference_name,\n 'target_image': target_image,\n 'target_name': target_hard_name,\n 'relative_caption': relative_caption,\n 'group_members': group_members\n }\n\n elif self.split == 'test1':\n pair_id = self.triplets[index]['pairid']\n reference_image_path = self.dataset_path / self.name_to_relpath[reference_name]\n reference_image = self.preprocess(PIL.Image.open(reference_image_path), return_tensors='pt')['pixel_values'][0]\n return {\n 'reference_image': reference_image,\n 'reference_name': reference_name,\n 'relative_caption': relative_caption,\n 'group_members': group_members,\n 'pair_id': pair_id\n }\n\n elif self.mode == 'classic':\n image_name = list(self.name_to_relpath.keys())[index]\n image_path = self.dataset_path / self.name_to_relpath[image_name]\n im = PIL.Image.open(image_path)\n image = self.preprocess(im, return_tensors='pt')['pixel_values'][0]\n\n return {\n 'image': image,\n 'image_name': image_name\n }\n\n else:\n raise ValueError(\"mode should be in ['relative', 'classic']\")\n\n except Exception as e:\n print(f\"Exception: {e}\")\n\n def __len__(self):\n if self.mode == 'relative':\n return len(self.triplets)\n elif self.mode == 'classic':\n return len(self.name_to_relpath)\n else:\n raise ValueError(\"mode should be in ['relative', 'classic']\")"
},
{
"identifier": "CIRCODataset",
"path": "loader.py",
"snippet": "class CIRCODataset(Dataset):\n \"\"\"\n Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/datasets.py\n CIRCO dataset class for PyTorch.\n The dataset can be used in 'relative' or 'classic' mode:\n - In 'classic' mode the dataset yield a dict with keys ['image', 'image_name']\n - In 'relative' mode the dataset yield dict with keys:\n - ['reference_image', 'reference_name', 'target_image', 'target_name', 'relative_captions', 'shared_concept',\n 'gt_img_ids', 'query_id'] when split == 'val'\n - ['reference_image', 'reference_name', 'relative_captions', 'shared_concept', 'query_id'] when split == test\n \"\"\"\n\n def __init__(self, dataset_path: Union[str, Path], split: Literal['val', 'test'],\n mode: Literal['relative', 'classic'], preprocess: callable):\n \"\"\"\n Args:\n dataset_path (Union[str, Path]): path to CIRCO dataset\n split (str): dataset split, should be in ['test', 'val']\n mode (str): dataset mode, should be in ['relative', 'classic']\n preprocess (callable): function which preprocesses the image\n \"\"\"\n\n # Set dataset paths and configurations\n dataset_path = Path(dataset_path)\n self.mode = mode\n self.split = split\n self.preprocess = preprocess\n self.data_path = dataset_path\n\n # Ensure input arguments are valid\n if mode not in ['relative', 'classic']:\n raise ValueError(\"mode should be in ['relative', 'classic']\")\n if split not in ['test', 'val']:\n raise ValueError(\"split should be in ['test', 'val']\")\n\n # Load COCO images information\n with open(dataset_path / 'COCO2017_unlabeled' / \"annotations\" / \"image_info_unlabeled2017.json\", \"r\") as f:\n imgs_info = json.load(f)\n\n self.img_paths = [dataset_path / 'COCO2017_unlabeled' / \"unlabeled2017\" / img_info[\"file_name\"] for img_info in\n imgs_info[\"images\"]]\n self.img_ids = [img_info[\"id\"] for img_info in imgs_info[\"images\"]]\n self.img_ids_indexes_map = {str(img_id): i for i, img_id in enumerate(self.img_ids)}\n\n # get CIRCO annotations\n with open(dataset_path / 'annotations' / f'{split}.json', \"r\") as f:\n self.annotations: List[dict] = json.load(f)\n\n # Get maximum number of ground truth images (for padding when loading the images)\n self.max_num_gts = 23 # Maximum number of ground truth images\n\n print(f\"CIRCODataset {split} dataset in {mode} mode initialized\")\n\n def get_target_img_ids(self, index) -> Dict[str, int]:\n \"\"\"\n Returns the id of the target image and ground truth images for a given query\n\n Args:\n index (int): id of the query\n\n Returns:\n Dict[str, int]: dictionary containing target image id and a list of ground truth image ids\n \"\"\"\n\n return {\n 'target_img_id': self.annotations[index]['target_img_id'],\n 'gt_img_ids': self.annotations[index]['gt_img_ids']\n }\n\n def __getitem__(self, index) -> dict:\n \"\"\"\n Returns a specific item from the dataset based on the index.\n\n In 'classic' mode, the dataset yields a dictionary with the following keys: [img, img_id]\n In 'relative' mode, the dataset yields dictionaries with the following keys:\n - [reference_img, reference_img_id, target_img, target_img_id, relative_caption, shared_concept, gt_img_ids,\n query_id]\n if split == val\n - [reference_img, reference_img_id, relative_caption, shared_concept, query_id] if split == test\n \"\"\"\n\n if self.mode == 'relative':\n # Get the query id\n query_id = str(self.annotations[index]['id'])\n\n # Get relative caption and shared concept\n relative_caption = self.annotations[index]['relative_caption']\n shared_concept = self.annotations[index]['shared_concept']\n\n # Get the reference image\n reference_img_id = str(self.annotations[index]['reference_img_id'])\n reference_img_path = self.img_paths[self.img_ids_indexes_map[reference_img_id]]\n reference_img = self.preprocess(PIL.Image.open(reference_img_path), return_tensors='pt')['pixel_values'][0]\n\n if self.split == 'val':\n # Get the target image and ground truth images\n target_img_id = str(self.annotations[index]['target_img_id'])\n gt_img_ids = [str(x) for x in self.annotations[index]['gt_img_ids']]\n target_img_path = self.img_paths[self.img_ids_indexes_map[target_img_id]]\n target_img = self.preprocess(PIL.Image.open(target_img_path), return_tensors='pt')['pixel_values'][0]\n\n # Pad ground truth image IDs with zeros for collate_fn\n gt_img_ids += [''] * (self.max_num_gts - len(gt_img_ids))\n\n return {\n 'reference_image': reference_img,\n 'reference_name': reference_img_id,\n 'target_image': target_img,\n 'target_name': target_img_id,\n 'relative_caption': relative_caption,\n 'shared_concept': shared_concept,\n 'gt_img_ids': gt_img_ids,\n 'query_id': query_id,\n }\n\n elif self.split == 'test':\n return {\n 'reference_image': reference_img,\n 'reference_name': reference_img_id,\n 'relative_caption': relative_caption,\n 'shared_concept': shared_concept,\n 'query_id': query_id,\n }\n\n elif self.mode == 'classic':\n # Get image ID and image path\n img_id = str(self.img_ids[index])\n img_path = self.img_paths[index]\n\n # Preprocess image and return\n img = self.preprocess(PIL.Image.open(img_path), return_tensors='pt')['pixel_values'][0]\n return {\n 'image': img,\n 'image_name': img_id\n }\n\n def __len__(self):\n \"\"\"\n Returns the length of the dataset.\n \"\"\"\n if self.mode == 'relative':\n return len(self.annotations)\n elif self.mode == 'classic':\n return len(self.img_ids)\n else:\n raise ValueError(\"mode should be in ['relative', 'classic']\")"
},
{
"identifier": "encode_with_pseudo_tokens_HF",
"path": "encode_with_pseudo_tokens.py",
"snippet": "def encode_with_pseudo_tokens_HF(clip_model: CLIPTextModelWithProjection, text: torch.Tensor, pseudo_tokens: torch.Tensor,\n num_tokens=1, return_last_states=False) -> torch.Tensor:\n x = clip_model.text_model.embeddings.token_embedding(text).type(clip_model.dtype) # [batch_size, n_ctx, d_model]\n x = torch.where(text.unsqueeze(-1) == 259,\n pseudo_tokens.unsqueeze(1).type(clip_model.dtype),\n x)\n x = x + clip_model.text_model.embeddings.position_embedding(clip_model.text_model.embeddings.position_ids)\n _causal_attention_mask = _make_causal_mask(text.shape, x.dtype, device=x.device)\n x = clip_model.text_model.encoder(inputs_embeds=x,\n attention_mask=None,\n causal_attention_mask=_causal_attention_mask,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=False)\n x = x[0]\n x_last = clip_model.text_model.final_layer_norm(x)\n x = x_last[torch.arange(x_last.shape[0], device=x_last.device),\n text.to(dtype=torch.int, device=x_last.device).argmax(dim=-1),\n ]\n if hasattr(clip_model, 'text_projection'):\n x = clip_model.text_projection(x)\n\n if return_last_states:\n return x, x_last\n else:\n return x"
},
{
"identifier": "build_text_encoder",
"path": "models.py",
"snippet": "def build_text_encoder(args):\n clip_model_dict = {'base32': 'openai/clip-vit-base-patch32',\n 'base': 'openai/clip-vit-base-patch16',\n 'large': 'openai/clip-vit-large-patch14',\n 'huge': 'laion/CLIP-ViT-H-14-laion2B-s32B-b79K',\n 'giga': 'Geonmo/CLIP-Giga-config-fixed',\n 'meta-large': 'facebook/metaclip-l14-fullcc2.5b',\n 'meta-huge': 'facebook/metaclip-h14-fullcc2.5b',\n }\n\n clip_preprocess = CLIPImageProcessor(crop_size={'height': 224, 'width': 224},\n do_center_crop=True,\n do_convert_rgb=True,\n do_normalize=True,\n do_rescale=True,\n do_resize=True,\n image_mean=[0.48145466, 0.4578275, 0.40821073],\n image_std=[0.26862954, 0.26130258, 0.27577711],\n resample=3,\n size={'shortest_edge': 224},\n )\n\n clip_vision_model = CLIPVisionModelWithProjection.from_pretrained(clip_model_dict[args.clip_model_name], torch_dtype=torch.float16 if args.mixed_precision == 'fp16' else torch.float32, cache_dir=args.cache_dir)\n\n clip_text_model = CLIPTextModelWithProjection.from_pretrained(clip_model_dict[args.clip_model_name], torch_dtype=torch.float16 if args.mixed_precision == 'fp16' else torch.float32, cache_dir=args.cache_dir)\n\n tokenizer = CLIPTokenizer.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', subfolder='tokenizer_2', cache_dir=args.cache_dir)\n tokenizer.add_special_tokens({'additional_special_tokens':[\"[$]\"]}) # NOTE: 49408\n\n return clip_vision_model, clip_preprocess, clip_text_model, tokenizer"
},
{
"identifier": "Phi",
"path": "models.py",
"snippet": "class Phi(nn.Module):\n \"\"\"\n Textual Inversion Phi network.\n Takes as input the visual features of an image and outputs the pseudo-work embedding.\n Copy-paste from https://github.com/miccunifi/SEARLE/blob/main/src/phi.py\n \"\"\"\n\n def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, dropout: int):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Linear(input_dim, hidden_dim),\n nn.GELU(),\n nn.Dropout(p=dropout),\n nn.Linear(hidden_dim, hidden_dim),\n nn.GELU(),\n nn.Dropout(p=dropout),\n nn.Linear(hidden_dim, output_dim),\n )\n\n def forward(self, x):\n #x = F.normalize(x, dim=-1)\n return self.layers(x)"
},
{
"identifier": "PIC2WORD",
"path": "models.py",
"snippet": "class PIC2WORD(nn.Module):\n def __init__(self, embed_dim=512, middle_dim=512, output_dim=512, n_layer=2, dropout=0.1):\n super().__init__()\n self.fc_out = nn.Linear(middle_dim, output_dim)\n layers = []\n dim = embed_dim\n for _ in range(n_layer):\n block = []\n block.append(nn.Linear(dim, middle_dim))\n block.append(nn.Dropout(dropout))\n block.append(nn.ReLU())\n dim = middle_dim\n layers.append(nn.Sequential(*block))\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x: torch.Tensor):\n for layer in self.layers:\n x = layer(x)\n return self.fc_out(x)"
},
{
"identifier": "extract_image_features",
"path": "utils.py",
"snippet": "def extract_image_features(dataset: Dataset, clip_model: CLIPVisionModelWithProjection, batch_size: Optional[int] = 32,\n num_workers: Optional[int] = 10) -> Tuple[torch.Tensor, List[str]]:\ndef contrastive_loss(v1: torch.Tensor, v2: torch.Tensor, temperature: float) -> torch.Tensor:\ndef extract_pseudo_tokens_with_phi(clip_model: CLIPVisionModelWithProjection, phi: Phi, dataset: Dataset, args) -> Tuple[torch.Tensor, List[str]]:\ndef extract_image_features_with_names(clip_model: CLIPVisionModelWithProjection, dataset: Dataset) -> Tuple[torch.Tensor, List[str]]:\n def __init__(self, images: torch.Tensor, names: torch.Tensor):\n def __getitem__(self, index) -> dict:\n def __len__(self):\ndef get_templates():\nclass CustomTensorDataset(Dataset):"
}
] | import json
import pickle
import clip
import numpy as np
import torch
import torch.nn.functional as F
from argparse import ArgumentParser
from typing import List, Dict, Tuple
from clip.model import CLIP
from transformers import CLIPTextModelWithProjection
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import tqdm
from data_utils import collate_fn, PROJECT_ROOT, targetpad_transform
from loader import FashionIQDataset, CIRRDataset, CIRCODataset
from encode_with_pseudo_tokens import encode_with_pseudo_tokens_HF
from models import build_text_encoder, Phi, PIC2WORD
from utils import extract_image_features, device, extract_pseudo_tokens_with_phi | 9,603 | gt_img_ids = np.array(gt_img_ids)[
np.array(gt_img_ids) != ''] # remove trailing empty strings added for collate_fn
similarity = predicted_feature @ index_features.T
sorted_indices = torch.topk(similarity, dim=-1, k=50).indices.cpu()
sorted_index_names = np.array(index_names)[sorted_indices]
map_labels = torch.tensor(np.isin(sorted_index_names, gt_img_ids), dtype=torch.uint8)
precisions = torch.cumsum(map_labels, dim=0) * map_labels # Consider only positions corresponding to GTs
precisions = precisions / torch.arange(1, map_labels.shape[0] + 1) # Compute precision for each position
ap_at5.append(float(torch.sum(precisions[:5]) / min(len(gt_img_ids), 5)))
ap_at10.append(float(torch.sum(precisions[:10]) / min(len(gt_img_ids), 10)))
ap_at25.append(float(torch.sum(precisions[:25]) / min(len(gt_img_ids), 25)))
ap_at50.append(float(torch.sum(precisions[:50]) / min(len(gt_img_ids), 50)))
assert target_name == gt_img_ids[0], f"Target name not in GTs {target_name} {gt_img_ids}"
single_gt_labels = torch.tensor(sorted_index_names == target_name)
recall_at5.append(float(torch.sum(single_gt_labels[:5])))
recall_at10.append(float(torch.sum(single_gt_labels[:10])))
recall_at25.append(float(torch.sum(single_gt_labels[:25])))
recall_at50.append(float(torch.sum(single_gt_labels[:50])))
map_at5 = np.mean(ap_at5) * 100
map_at10 = np.mean(ap_at10) * 100
map_at25 = np.mean(ap_at25) * 100
map_at50 = np.mean(ap_at50) * 100
recall_at5 = np.mean(recall_at5) * 100
recall_at10 = np.mean(recall_at10) * 100
recall_at25 = np.mean(recall_at25) * 100
recall_at50 = np.mean(recall_at50) * 100
return {
'circo_map_at5': map_at5,
'circo_map_at10': map_at10,
'circo_map_at25': map_at25,
'circo_map_at50': map_at50,
'circo_recall_at5': recall_at5,
'circo_recall_at10': recall_at10,
'circo_recall_at25': recall_at25,
'circo_recall_at50': recall_at50,
}
@torch.no_grad()
def circo_val_retrieval(dataset_path: str, image_encoder, text_encoder, ref_names_list: List[str], pseudo_tokens: torch.Tensor,
preprocess: callable) -> Dict[str, float]:
"""
Compute the retrieval metrics on the CIRCO validation set given the pseudo tokens and the reference names
"""
# Load the model
#clip_model, _ = clip.load(clip_model_name, device=device, jit=False)
#clip_model = clip_model.float().eval().requires_grad_(False)
# Extract the index features
classic_val_dataset = CIRCODataset(dataset_path, 'val', 'classic', preprocess)
index_features, index_names = extract_image_features(classic_val_dataset, image_encoder)
# Define the relative validation dataset
relative_val_dataset = CIRCODataset(dataset_path, 'val', 'relative', preprocess)
return circo_compute_val_metrics(relative_val_dataset, text_encoder, index_features, index_names, ref_names_list,
pseudo_tokens)
def main():
parser = ArgumentParser()
parser.add_argument("--exp-name", type=str, help="Experiment to evaluate")
parser.add_argument("--eval-type", type=str, choices=['oti', 'phi', 'searle', 'searle-xl', 'pic2word'], required=True,
help="If 'oti' evaluate directly using the inverted oti pseudo tokens, "
"if 'phi' predicts the pseudo tokens using the phi network, "
"if 'searle' uses the pre-trained SEARLE model to predict the pseudo tokens, "
"if 'searle-xl' uses the pre-trained SEARLE-XL model to predict the pseudo tokens"
)
parser.add_argument("--dataset", type=str, required=True, choices=['cirr', 'fashioniq', 'circo'],
help="Dataset to use")
parser.add_argument("--dataset-path", type=str, help="Path to the dataset", required=True)
parser.add_argument("--preprocess-type", default="clip", type=str, choices=['clip', 'targetpad'],
help="Preprocess pipeline to use")
parser.add_argument("--phi-checkpoint-name", type=str,
help="Phi checkpoint to use, needed when using phi, e.g. 'phi_20.pt'")
parser.add_argument("--clip_model_name", default="giga", type=str)
parser.add_argument("--cache_dir", default="./hf_models", type=str)
parser.add_argument("--l2_normalize", action="store_true", help="Whether or not to use l2 normalization")
args = parser.parse_args()
#if args.eval_type in ['phi', 'oti'] and args.exp_name is None:
# raise ValueError("Experiment name is required when using phi or oti evaluation type")
if args.eval_type == 'phi' and args.phi_checkpoint_name is None:
raise ValueError("Phi checkpoint name is required when using phi evaluation type")
if args.eval_type == 'oti':
experiment_path = PROJECT_ROOT / 'data' / "oti_pseudo_tokens" / args.dataset.lower() / 'val' / args.exp_name
if not experiment_path.exists():
raise ValueError(f"Experiment {args.exp_name} not found")
with open(experiment_path / 'hyperparameters.json') as f:
hyperparameters = json.load(f)
pseudo_tokens = torch.load(experiment_path / 'ema_oti_pseudo_tokens.pt', map_location=device)
with open(experiment_path / 'image_names.pkl', 'rb') as f:
ref_names_list = pickle.load(f)
clip_model_name = hyperparameters['clip_model_name']
clip_model, clip_preprocess = clip.load(clip_model_name, device='cpu', jit=False)
if args.preprocess_type == 'targetpad':
print('Target pad preprocess pipeline is used')
preprocess = targetpad_transform(1.25, clip_model.visual.input_resolution)
elif args.preprocess_type == 'clip':
print('CLIP preprocess pipeline is used')
preprocess = clip_preprocess
else:
raise ValueError("Preprocess type not supported")
elif args.eval_type in ['phi', 'searle', 'searle-xl', 'pic2word']:
if args.eval_type == 'phi':
args.mixed_precision = 'fp16'
|
torch.multiprocessing.set_sharing_strategy('file_system')
@torch.no_grad()
def fiq_generate_val_predictions(clip_model, relative_val_dataset: Dataset, ref_names_list: List[str],
pseudo_tokens: torch.Tensor) -> Tuple[torch.Tensor, List[str]]:
"""
Generates features predictions for the validation set of Fashion IQ.
"""
# Create data loader
relative_val_loader = DataLoader(dataset=relative_val_dataset, batch_size=32, num_workers=10,
pin_memory=False, collate_fn=collate_fn, shuffle=False)
predicted_features_list = []
target_names_list = []
# Compute features
for batch in tqdm(relative_val_loader):
reference_names = batch['reference_name']
target_names = batch['target_name']
relative_captions = batch['relative_captions']
flattened_captions: list = np.array(relative_captions).T.flatten().tolist()
input_captions = [
f"{flattened_captions[i].strip('.?, ')} and {flattened_captions[i + 1].strip('.?, ')}" for
i in range(0, len(flattened_captions), 2)]
input_captions_reversed = [
f"{flattened_captions[i + 1].strip('.?, ')} and {flattened_captions[i].strip('.?, ')}" for
i in range(0, len(flattened_captions), 2)]
input_captions = [
f"a photo of $ that {in_cap}" for in_cap in input_captions]
batch_tokens = torch.vstack([pseudo_tokens[ref_names_list.index(ref)].unsqueeze(0) for ref in reference_names])
tokenized_input_captions = clip.tokenize(input_captions, context_length=77).to(device)
text_features = encode_with_pseudo_tokens_HF(clip_model, tokenized_input_captions, batch_tokens)
input_captions_reversed = [
f"a photo of $ that {in_cap}" for in_cap in input_captions_reversed]
tokenized_input_captions_reversed = clip.tokenize(input_captions_reversed, context_length=77).to(device)
text_features_reversed = encode_with_pseudo_tokens_HF(clip_model, tokenized_input_captions_reversed,
batch_tokens)
predicted_features = F.normalize((F.normalize(text_features) + F.normalize(text_features_reversed)) / 2)
# predicted_features = F.normalize((text_features + text_features_reversed) / 2)
predicted_features_list.append(predicted_features)
target_names_list.extend(target_names)
predicted_features = torch.vstack(predicted_features_list)
return predicted_features, target_names_list
@torch.no_grad()
def fiq_compute_val_metrics(relative_val_dataset: Dataset, clip_model, index_features: torch.Tensor,
index_names: List[str], ref_names_list: List[str], pseudo_tokens: torch.Tensor) \
-> Dict[str, float]:
"""
Compute the retrieval metrics on the FashionIQ validation set given the dataset, pseudo tokens and the reference names
"""
# Generate the predicted features
predicted_features, target_names = fiq_generate_val_predictions(clip_model, relative_val_dataset, ref_names_list,
pseudo_tokens)
# Move the features to the device
index_features = index_features.to(device)
predicted_features = predicted_features.to(device)
# Normalize the features
index_features = F.normalize(index_features.float())
# Compute the distances
distances = 1 - predicted_features @ index_features.T
sorted_indices = torch.argsort(distances, dim=-1).cpu()
sorted_index_names = np.array(index_names)[sorted_indices]
# Check if the target names are in the top 10 and top 50
labels = torch.tensor(
sorted_index_names == np.repeat(np.array(target_names), len(index_names)).reshape(len(target_names), -1))
assert torch.equal(torch.sum(labels, dim=-1).int(), torch.ones(len(target_names)).int())
# Compute the metrics
recall_at10 = (torch.sum(labels[:, :10]) / len(labels)).item() * 100
recall_at50 = (torch.sum(labels[:, :50]) / len(labels)).item() * 100
return {'fiq_recall_at10': recall_at10,
'fiq_recall_at50': recall_at50}
@torch.no_grad()
def fiq_val_retrieval(dataset_path: str, dress_type: str, image_encoder, text_encoder, ref_names_list: List[str],
pseudo_tokens: torch.Tensor, preprocess: callable) -> Dict[str, float]:
"""
Compute the retrieval metrics on the FashionIQ validation set given the pseudo tokens and the reference names
"""
# Load the model
#clip_model, _ = clip.load(clip_model_name, device=device, jit=False)
#clip_model = clip_model.float().eval().requires_grad_(False)
# Extract the index features
classic_val_dataset = FashionIQDataset(dataset_path, 'val', [dress_type], 'classic', preprocess)
index_features, index_names = extract_image_features(classic_val_dataset, image_encoder)
# Define the relative dataset
relative_val_dataset = FashionIQDataset(dataset_path, 'val', [dress_type], 'relative', preprocess)
return fiq_compute_val_metrics(relative_val_dataset, text_encoder, index_features, index_names, ref_names_list,
pseudo_tokens)
@torch.no_grad()
def cirr_generate_val_predictions(clip_model: CLIPTextModelWithProjection, relative_val_dataset: Dataset, ref_names_list: List[str],
pseudo_tokens: torch.Tensor) -> \
Tuple[torch.Tensor, List[str], List[str], List[List[str]]]:
"""
Generates features predictions for the validation set of CIRR
"""
# Define the dataloader
relative_val_loader = DataLoader(dataset=relative_val_dataset, batch_size=32, num_workers=10,
pin_memory=False, collate_fn=collate_fn)
predicted_features_list = []
target_names_list = []
group_members_list = []
reference_names_list = []
for batch in tqdm(relative_val_loader):
reference_names = batch['reference_name']
target_names = batch['target_name']
relative_captions = batch['relative_caption']
group_members = batch['group_members']
group_members = np.array(group_members).T.tolist()
input_captions = [
f"a photo of $ that {rel_caption}" for rel_caption in relative_captions]
batch_tokens = torch.vstack([pseudo_tokens[ref_names_list.index(ref)].unsqueeze(0) for ref in reference_names])
tokenized_input_captions = clip.tokenize(input_captions, context_length=77).to(device)
text_features = encode_with_pseudo_tokens_HF(clip_model, tokenized_input_captions, batch_tokens)
predicted_features = F.normalize(text_features)
predicted_features_list.append(predicted_features)
target_names_list.extend(target_names)
group_members_list.extend(group_members)
reference_names_list.extend(reference_names)
predicted_features = torch.vstack(predicted_features_list)
return predicted_features, reference_names_list, target_names_list, group_members_list
@torch.no_grad()
def cirr_generate_val_predictions_with_phi(clip_model: CLIPTextModelWithProjection, phi, relative_val_dataset: Dataset, ref_names_list: List[str],
image_features: torch.Tensor) -> \
Tuple[torch.Tensor, List[str], List[str], List[List[str]]]:
"""
Generates features predictions for the validation set of CIRR
"""
# Define the dataloader
relative_val_loader = DataLoader(dataset=relative_val_dataset, batch_size=32, num_workers=10,
pin_memory=False, collate_fn=collate_fn)
predicted_features_list = []
target_names_list = []
group_members_list = []
reference_names_list = []
for batch in tqdm(relative_val_loader):
reference_names = batch['reference_name']
target_names = batch['target_name']
relative_captions = batch['relative_caption']
group_members = batch['group_members']
group_members = np.array(group_members).T.tolist()
input_captions = [
f"a photo of $ that {rel_caption}" for rel_caption in relative_captions]
# we need to make batch_tokens with selected_image_features
selected_image_features = torch.vstack([image_features[ref_names_list.index(ref)] for ref in reference_names])
tokenized_input_captions = clip.tokenize(input_captions, context_length=77).to(device)
context = clip_model.text_model.embeddings.token_embedding(tokenized_input_captions) + clip_model.text_model.embeddings.position_embedding(clip_model.text_model.embeddings.position_ids)
batch_tokens = phi(selected_image_features, context)
#batch_tokens = torch.vstack([pseudo_tokens[ref_names_list.index(ref)].unsqueeze(0) for ref in reference_names])
text_features = encode_with_pseudo_tokens_HF(clip_model, tokenized_input_captions, batch_tokens)
predicted_features = F.normalize(text_features)
predicted_features_list.append(predicted_features)
target_names_list.extend(target_names)
group_members_list.extend(group_members)
reference_names_list.extend(reference_names)
predicted_features = torch.vstack(predicted_features_list)
return predicted_features, reference_names_list, target_names_list, group_members_list
@torch.no_grad()
def cirr_compute_val_metrics(relative_val_dataset: Dataset, clip_model, index_features: torch.Tensor,
index_names: List[str], ref_names_list: List[str], pseudo_tokens: torch.Tensor) \
-> Dict[str, float]:
"""
Compute the retrieval metrics on the CIRR validation set given the dataset, pseudo tokens and the reference names
"""
# Generate the predicted features
predicted_features, reference_names, target_names, group_members = \
cirr_generate_val_predictions(clip_model, relative_val_dataset, ref_names_list, pseudo_tokens)
index_features = index_features.to(device)
predicted_features = predicted_features.to(device)
# Normalize the index features
index_features = F.normalize(index_features, dim=-1).float()
predicted_features = predicted_features.float()
# Compute the distances and sort the results
distances = 1 - predicted_features @ index_features.T
sorted_indices = torch.argsort(distances, dim=-1).cpu()
sorted_index_names = np.array(index_names)[sorted_indices]
# Delete the reference image from the results
reference_mask = torch.tensor(
sorted_index_names != np.repeat(np.array(reference_names), len(index_names)).reshape(len(target_names), -1))
sorted_index_names = sorted_index_names[reference_mask].reshape(sorted_index_names.shape[0],
sorted_index_names.shape[1] - 1)
# Compute the ground-truth labels wrt the predictions
labels = torch.tensor(
sorted_index_names == np.repeat(np.array(target_names), len(index_names) - 1).reshape(len(target_names), -1))
# Compute the subset predictions and ground-truth labels
group_members = np.array(group_members)
group_mask = (sorted_index_names[..., None] == group_members[:, None, :]).sum(-1).astype(bool)
group_labels = labels[group_mask].reshape(labels.shape[0], -1)
assert torch.equal(torch.sum(labels, dim=-1).int(), torch.ones(len(target_names)).int())
assert torch.equal(torch.sum(group_labels, dim=-1).int(), torch.ones(len(target_names)).int())
# Compute the metrics
recall_at1 = (torch.sum(labels[:, :1]) / len(labels)).item() * 100
recall_at5 = (torch.sum(labels[:, :5]) / len(labels)).item() * 100
recall_at10 = (torch.sum(labels[:, :10]) / len(labels)).item() * 100
recall_at50 = (torch.sum(labels[:, :50]) / len(labels)).item() * 100
group_recall_at1 = (torch.sum(group_labels[:, :1]) / len(group_labels)).item() * 100
group_recall_at2 = (torch.sum(group_labels[:, :2]) / len(group_labels)).item() * 100
group_recall_at3 = (torch.sum(group_labels[:, :3]) / len(group_labels)).item() * 100
return {
'cirr_recall_at1': recall_at1,
'cirr_recall_at5': recall_at5,
'cirr_recall_at10': recall_at10,
'cirr_recall_at50': recall_at50,
'cirr_group_recall_at1': group_recall_at1,
'cirr_group_recall_at2': group_recall_at2,
'cirr_group_recall_at3': group_recall_at3,
}
@torch.no_grad()
def cirr_compute_val_metrics_with_phi(relative_val_dataset: Dataset, clip_model: CLIPTextModelWithProjection, phi, index_features: torch.Tensor,
index_names: List[str], ref_names_list: List[str], image_features: torch.Tensor) \
-> Dict[str, float]:
"""
Compute the retrieval metrics on the CIRR validation set given the dataset, pseudo tokens and the reference names
"""
# Generate the predicted features
predicted_features, reference_names, target_names, group_members = \
cirr_generate_val_predictions_with_phi(clip_model, phi, relative_val_dataset, ref_names_list, image_features)
index_features = index_features.to(device)
predicted_features = predicted_features.to(device)
# Normalize the index features
index_features = F.normalize(index_features, dim=-1).float()
predicted_features = predicted_features.float()
# Compute the distances and sort the results
distances = 1 - predicted_features @ index_features.T
sorted_indices = torch.argsort(distances, dim=-1).cpu()
sorted_index_names = np.array(index_names)[sorted_indices]
# Delete the reference image from the results
reference_mask = torch.tensor(
sorted_index_names != np.repeat(np.array(reference_names), len(index_names)).reshape(len(target_names), -1))
sorted_index_names = sorted_index_names[reference_mask].reshape(sorted_index_names.shape[0],
sorted_index_names.shape[1] - 1)
# Compute the ground-truth labels wrt the predictions
labels = torch.tensor(
sorted_index_names == np.repeat(np.array(target_names), len(index_names) - 1).reshape(len(target_names), -1))
# Compute the subset predictions and ground-truth labels
group_members = np.array(group_members)
group_mask = (sorted_index_names[..., None] == group_members[:, None, :]).sum(-1).astype(bool)
group_labels = labels[group_mask].reshape(labels.shape[0], -1)
assert torch.equal(torch.sum(labels, dim=-1).int(), torch.ones(len(target_names)).int())
assert torch.equal(torch.sum(group_labels, dim=-1).int(), torch.ones(len(target_names)).int())
# Compute the metrics
recall_at1 = (torch.sum(labels[:, :1]) / len(labels)).item() * 100
recall_at5 = (torch.sum(labels[:, :5]) / len(labels)).item() * 100
recall_at10 = (torch.sum(labels[:, :10]) / len(labels)).item() * 100
recall_at50 = (torch.sum(labels[:, :50]) / len(labels)).item() * 100
group_recall_at1 = (torch.sum(group_labels[:, :1]) / len(group_labels)).item() * 100
group_recall_at2 = (torch.sum(group_labels[:, :2]) / len(group_labels)).item() * 100
group_recall_at3 = (torch.sum(group_labels[:, :3]) / len(group_labels)).item() * 100
return {
'cirr_recall_at1': recall_at1,
'cirr_recall_at5': recall_at5,
'cirr_recall_at10': recall_at10,
'cirr_recall_at50': recall_at50,
'cirr_group_recall_at1': group_recall_at1,
'cirr_group_recall_at2': group_recall_at2,
'cirr_group_recall_at3': group_recall_at3,
}
@torch.no_grad()
def cirr_val_retrieval(dataset_path: str, image_encoder, text_encoder, ref_names_list: list, pseudo_tokens: torch.Tensor,
preprocess: callable) -> Dict[str, float]:
"""
Compute the retrieval metrics on the CIRR validation set given the pseudo tokens and the reference names
"""
# Load the model
#clip_model, _ = clip.load(clip_model_name, device=device, jit=False)
#clip_model = clip_model.float().eval().requires_grad_(False)
# Extract the index features
classic_val_dataset = CIRRDataset(dataset_path, 'val', 'classic', preprocess)
index_features, index_names = extract_image_features(classic_val_dataset, image_encoder)
# Define the relative validation dataset
relative_val_dataset = CIRRDataset(dataset_path, 'val', 'relative', preprocess)
return cirr_compute_val_metrics(relative_val_dataset, text_encoder, index_features, index_names,
ref_names_list, pseudo_tokens)
@torch.no_grad()
def circo_generate_val_predictions(clip_model, relative_val_dataset: Dataset, ref_names_list: List[str],
pseudo_tokens: torch.Tensor) -> Tuple[
torch.Tensor, List[str], list]:
"""
Generates features predictions for the validation set of CIRCO
"""
# Create the data loader
relative_val_loader = DataLoader(dataset=relative_val_dataset, batch_size=32, num_workers=10,
pin_memory=False, collate_fn=collate_fn, shuffle=False)
predicted_features_list = []
target_names_list = []
gts_img_ids_list = []
# Compute the features
for batch in tqdm(relative_val_loader):
reference_names = batch['reference_name']
target_names = batch['target_name']
relative_captions = batch['relative_caption']
gt_img_ids = batch['gt_img_ids']
gt_img_ids = np.array(gt_img_ids).T.tolist()
input_captions = [f"a photo of $ that {caption}" for caption in relative_captions]
batch_tokens = torch.vstack([pseudo_tokens[ref_names_list.index(ref)].unsqueeze(0) for ref in reference_names])
tokenized_input_captions = clip.tokenize(input_captions, context_length=77).to(device)
text_features = encode_with_pseudo_tokens_HF(clip_model, tokenized_input_captions, batch_tokens)
predicted_features = F.normalize(text_features)
predicted_features_list.append(predicted_features)
target_names_list.extend(target_names)
gts_img_ids_list.extend(gt_img_ids)
predicted_features = torch.vstack(predicted_features_list)
return predicted_features, target_names_list, gts_img_ids_list
@torch.no_grad()
def circo_compute_val_metrics(relative_val_dataset: Dataset, clip_model, index_features: torch.Tensor,
index_names: List[str], ref_names_list: List[str], pseudo_tokens: torch.Tensor) \
-> Dict[str, float]:
"""
Compute the retrieval metrics on the CIRCO validation set given the dataset, pseudo tokens and the reference names
"""
# Generate the predicted features
predicted_features, target_names, gts_img_ids = circo_generate_val_predictions(clip_model, relative_val_dataset,
ref_names_list, pseudo_tokens)
ap_at5 = []
ap_at10 = []
ap_at25 = []
ap_at50 = []
recall_at5 = []
recall_at10 = []
recall_at25 = []
recall_at50 = []
# Move the features to the device
index_features = index_features.to(device)
predicted_features = predicted_features.to(device)
# Normalize the features
index_features = F.normalize(index_features.float())
for predicted_feature, target_name, gt_img_ids in tqdm(zip(predicted_features, target_names, gts_img_ids)):
gt_img_ids = np.array(gt_img_ids)[
np.array(gt_img_ids) != ''] # remove trailing empty strings added for collate_fn
similarity = predicted_feature @ index_features.T
sorted_indices = torch.topk(similarity, dim=-1, k=50).indices.cpu()
sorted_index_names = np.array(index_names)[sorted_indices]
map_labels = torch.tensor(np.isin(sorted_index_names, gt_img_ids), dtype=torch.uint8)
precisions = torch.cumsum(map_labels, dim=0) * map_labels # Consider only positions corresponding to GTs
precisions = precisions / torch.arange(1, map_labels.shape[0] + 1) # Compute precision for each position
ap_at5.append(float(torch.sum(precisions[:5]) / min(len(gt_img_ids), 5)))
ap_at10.append(float(torch.sum(precisions[:10]) / min(len(gt_img_ids), 10)))
ap_at25.append(float(torch.sum(precisions[:25]) / min(len(gt_img_ids), 25)))
ap_at50.append(float(torch.sum(precisions[:50]) / min(len(gt_img_ids), 50)))
assert target_name == gt_img_ids[0], f"Target name not in GTs {target_name} {gt_img_ids}"
single_gt_labels = torch.tensor(sorted_index_names == target_name)
recall_at5.append(float(torch.sum(single_gt_labels[:5])))
recall_at10.append(float(torch.sum(single_gt_labels[:10])))
recall_at25.append(float(torch.sum(single_gt_labels[:25])))
recall_at50.append(float(torch.sum(single_gt_labels[:50])))
map_at5 = np.mean(ap_at5) * 100
map_at10 = np.mean(ap_at10) * 100
map_at25 = np.mean(ap_at25) * 100
map_at50 = np.mean(ap_at50) * 100
recall_at5 = np.mean(recall_at5) * 100
recall_at10 = np.mean(recall_at10) * 100
recall_at25 = np.mean(recall_at25) * 100
recall_at50 = np.mean(recall_at50) * 100
return {
'circo_map_at5': map_at5,
'circo_map_at10': map_at10,
'circo_map_at25': map_at25,
'circo_map_at50': map_at50,
'circo_recall_at5': recall_at5,
'circo_recall_at10': recall_at10,
'circo_recall_at25': recall_at25,
'circo_recall_at50': recall_at50,
}
@torch.no_grad()
def circo_val_retrieval(dataset_path: str, image_encoder, text_encoder, ref_names_list: List[str], pseudo_tokens: torch.Tensor,
preprocess: callable) -> Dict[str, float]:
"""
Compute the retrieval metrics on the CIRCO validation set given the pseudo tokens and the reference names
"""
# Load the model
#clip_model, _ = clip.load(clip_model_name, device=device, jit=False)
#clip_model = clip_model.float().eval().requires_grad_(False)
# Extract the index features
classic_val_dataset = CIRCODataset(dataset_path, 'val', 'classic', preprocess)
index_features, index_names = extract_image_features(classic_val_dataset, image_encoder)
# Define the relative validation dataset
relative_val_dataset = CIRCODataset(dataset_path, 'val', 'relative', preprocess)
return circo_compute_val_metrics(relative_val_dataset, text_encoder, index_features, index_names, ref_names_list,
pseudo_tokens)
def main():
parser = ArgumentParser()
parser.add_argument("--exp-name", type=str, help="Experiment to evaluate")
parser.add_argument("--eval-type", type=str, choices=['oti', 'phi', 'searle', 'searle-xl', 'pic2word'], required=True,
help="If 'oti' evaluate directly using the inverted oti pseudo tokens, "
"if 'phi' predicts the pseudo tokens using the phi network, "
"if 'searle' uses the pre-trained SEARLE model to predict the pseudo tokens, "
"if 'searle-xl' uses the pre-trained SEARLE-XL model to predict the pseudo tokens"
)
parser.add_argument("--dataset", type=str, required=True, choices=['cirr', 'fashioniq', 'circo'],
help="Dataset to use")
parser.add_argument("--dataset-path", type=str, help="Path to the dataset", required=True)
parser.add_argument("--preprocess-type", default="clip", type=str, choices=['clip', 'targetpad'],
help="Preprocess pipeline to use")
parser.add_argument("--phi-checkpoint-name", type=str,
help="Phi checkpoint to use, needed when using phi, e.g. 'phi_20.pt'")
parser.add_argument("--clip_model_name", default="giga", type=str)
parser.add_argument("--cache_dir", default="./hf_models", type=str)
parser.add_argument("--l2_normalize", action="store_true", help="Whether or not to use l2 normalization")
args = parser.parse_args()
#if args.eval_type in ['phi', 'oti'] and args.exp_name is None:
# raise ValueError("Experiment name is required when using phi or oti evaluation type")
if args.eval_type == 'phi' and args.phi_checkpoint_name is None:
raise ValueError("Phi checkpoint name is required when using phi evaluation type")
if args.eval_type == 'oti':
experiment_path = PROJECT_ROOT / 'data' / "oti_pseudo_tokens" / args.dataset.lower() / 'val' / args.exp_name
if not experiment_path.exists():
raise ValueError(f"Experiment {args.exp_name} not found")
with open(experiment_path / 'hyperparameters.json') as f:
hyperparameters = json.load(f)
pseudo_tokens = torch.load(experiment_path / 'ema_oti_pseudo_tokens.pt', map_location=device)
with open(experiment_path / 'image_names.pkl', 'rb') as f:
ref_names_list = pickle.load(f)
clip_model_name = hyperparameters['clip_model_name']
clip_model, clip_preprocess = clip.load(clip_model_name, device='cpu', jit=False)
if args.preprocess_type == 'targetpad':
print('Target pad preprocess pipeline is used')
preprocess = targetpad_transform(1.25, clip_model.visual.input_resolution)
elif args.preprocess_type == 'clip':
print('CLIP preprocess pipeline is used')
preprocess = clip_preprocess
else:
raise ValueError("Preprocess type not supported")
elif args.eval_type in ['phi', 'searle', 'searle-xl', 'pic2word']:
if args.eval_type == 'phi':
args.mixed_precision = 'fp16' | image_encoder, clip_preprocess, text_encoder, tokenizer = build_text_encoder(args) | 7 | 2023-12-01 08:05:51+00:00 | 12k |
uezo/aiproxy | tests/test_chatgpt.py | [
{
"identifier": "RequestFilterBase",
"path": "aiproxy/proxy.py",
"snippet": "class RequestFilterBase(ABC):\n @abstractmethod\n async def filter(self, request_id: str, request_json: dict, request_headers: dict) -> Union[str, None]:\n ..."
},
{
"identifier": "ResponseFilterBase",
"path": "aiproxy/proxy.py",
"snippet": "class ResponseFilterBase(ABC):\n @abstractmethod\n async def filter(self, request_id: str, response_json: dict) -> Union[dict, None]:\n ..."
},
{
"identifier": "AccessLogBase",
"path": "aiproxy/accesslog.py",
"snippet": "class _AccessLogBase:\nclass RequestItemBase(QueueItemBase):\nclass ResponseItemBase(QueueItemBase):\nclass StreamChunkItemBase(QueueItemBase):\nclass ErrorItemBase(QueueItemBase):\nclass WorkerShutdownItem(QueueItemBase):\nclass AccessLog(AccessLogBase): ...\nclass AccessLogWorker:\n def __tablename__(cls):\n def id(cls):\n def request_id(cls):\n def created_at(cls):\n def direction(cls):\n def status_code(cls):\n def content(cls):\n def function_call(cls):\n def tool_calls(cls):\n def raw_body(cls):\n def raw_headers(cls):\n def model(cls):\n def prompt_tokens(cls):\n def completion_tokens(cls):\n def request_time(cls):\n def request_time_api(cls):\n def __init__(self, request_id: str, request_json: dict, request_headers: dict) -> None:\n def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:\n def __init__(self, request_id: str, response_json: dict, response_headers: dict = None, duration: float = 0, duration_api: float = 0, status_code: int = 0) -> None:\n def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:\n def __init__(self, request_id: str, chunk_json: dict = None, response_headers: dict = None, duration: float = 0, duration_api: float = 0, request_json: dict = None, status_code: int = 0) -> None:\n def to_accesslog(self, chunks: list, accesslog_cls: _AccessLogBase) -> _AccessLogBase:\n def __init__(self, request_id: str, exception: Exception, traceback_info: str, response_json: dict = None, response_headers: dict = None, status_code: int = 0) -> None:\n def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:\n def to_dict(self) -> dict:\n def __init__(self, *, connection_str: str = \"sqlite:///aiproxy.db\", db_engine = None, accesslog_cls = AccessLog, queue_client: QueueClientBase = None):\n def insert_request(self, accesslog: _AccessLogBase, db: Session):\n def insert_response(self, accesslog: _AccessLogBase, db: Session):\n def use_db(self, item: QueueItemBase):\n def process_item(self, item: QueueItemBase, db: Session):\n def run(self):"
},
{
"identifier": "ChatGPTProxy",
"path": "aiproxy/chatgpt.py",
"snippet": "class ChatGPTProxy(ProxyBase):\n _empty_openai_api_key = \"OPENAI_API_KEY_IS_NOT_SET\"\n\n def __init__(\n self,\n *,\n api_key: str = None,\n async_client: AsyncClient = None,\n max_retries: int = 0,\n timeout: float = 60.0,\n request_filters: List[RequestFilterBase] = None,\n response_filters: List[ResponseFilterBase] = None,\n request_item_class: type = ChatGPTRequestItem,\n response_item_class: type = ChatGPTResponseItem,\n stream_response_item_class: type = ChatGPTStreamResponseItem,\n error_item_class: type = ChatGPTErrorItem,\n access_logger_queue: QueueClientBase,\n ):\n super().__init__(\n request_filters=request_filters,\n response_filters=response_filters,\n access_logger_queue=access_logger_queue\n )\n\n # Log items\n self.request_item_class = request_item_class\n self.response_item_class = response_item_class\n self.stream_response_item_class = stream_response_item_class\n self.error_item_class = error_item_class\n\n # ChatGPT client\n if async_client:\n self.client = async_client\n else:\n self.client = AsyncClient(\n api_key=api_key or os.getenv(\"OPENAI_API_KEY\") or self._empty_openai_api_key,\n max_retries=max_retries,\n timeout=timeout\n )\n\n async def filter_request(self, request_id: str, request_json: dict, request_headers: dict) -> Union[dict, JSONResponse, EventSourceResponse]:\n for f in self.request_filters:\n if json_resp := await f.filter(request_id, request_json, request_headers):\n # Return response if filter returns string\n resp_for_log = {\n \"id\": \"-\",\n \"choices\": [{\"message\": {\"role\": \"assistant\", \"content\": json_resp}, \"finish_reason\": \"stop\", \"index\": 0}],\n \"created\": 0,\n \"model\": \"request_filter\",\n \"object\": \"chat.completion\",\n \"usage\": {\"prompt_tokens\": 0, \"completion_tokens\": 0, \"total_tokens\": 0}\n }\n # Response log\n self.access_logger_queue.put(self.response_item_class(\n request_id=request_id,\n response_json=resp_for_log,\n status_code=200\n ))\n\n if request_json.get(\"stream\"):\n # Stream\n async def filter_response_stream(content: str):\n # First delta\n resp = {\n \"id\": \"-\",\n \"choices\": [{\"delta\": {\"role\": \"assistant\", \"content\": \"\"}, \"finish_reason\": None, \"index\": 0}],\n \"created\": 0,\n \"model\": \"request_filter\",\n \"object\": \"chat.completion\",\n \"usage\": {\"prompt_tokens\": 0, \"completion_tokens\": 0, \"total_tokens\": 0}\n }\n yield json.dumps(resp)\n # Last delta\n resp[\"choices\"][0] = {\"delta\": {\"content\": content}, \"finish_reason\": \"stop\", \"index\": 0}\n yield json.dumps(resp)\n\n return self.return_response_with_headers(EventSourceResponse(\n filter_response_stream(json_resp)\n ), request_id)\n\n else:\n # Non-stream\n return self.return_response_with_headers(JSONResponse(resp_for_log), request_id)\n\n return request_json\n\n async def filter_response(self, request_id: str, response: ChatCompletion) -> ChatCompletion:\n response_json = response.model_dump()\n\n for f in self.response_filters:\n if json_resp := await f.filter(request_id, response_json):\n return response.model_validate(json_resp)\n\n return response.model_validate(response_json)\n\n def return_response_with_headers(self, resp: JSONResponse, request_id: str):\n self.add_response_headers(response=resp, request_id=request_id)\n return resp\n\n def add_route(self, app: FastAPI, base_url: str):\n @app.post(base_url)\n async def handle_request(request: Request):\n request_id = str(uuid4())\n\n try:\n start_time = time.time()\n request_json = await request.json()\n request_headers = dict(request.headers.items())\n\n # Log request\n self.access_logger_queue.put(self.request_item_class(\n request_id=request_id,\n request_json=request_json,\n request_headers=request_headers\n ))\n\n # Filter request\n request_json = await self.filter_request(request_id, request_json, request_headers)\n if isinstance(request_json, JSONResponse) or isinstance(request_json, EventSourceResponse):\n return request_json\n\n # Call API\n start_time_api = time.time()\n if self.client.api_key != self._empty_openai_api_key:\n # Always use server api key if set to client\n raw_response = await self.client.chat.completions.with_raw_response.create(**request_json)\n elif user_auth_header := request_headers.get(\"authorization\"): # Lower case from client.\n raw_response = await self.client.chat.completions.with_raw_response.create(\n **request_json, extra_headers={\"Authorization\": user_auth_header} # Pascal to server\n )\n else:\n # Call API anyway ;)\n raw_response = await self.client.chat.completions.with_raw_response.create(**request_json)\n\n completion_response = raw_response.parse()\n completion_response_headers = raw_response.headers\n completion_status_code = raw_response.status_code\n if \"content-encoding\" in completion_response_headers:\n completion_response_headers.pop(\"content-encoding\") # Remove \"br\" that will be changed by this proxy\n\n # Handling response from API\n if request_json.get(\"stream\"):\n async def process_stream(stream: AsyncContentStream) -> AsyncGenerator[str, None]:\n # Async content generator\n try:\n async for chunk in stream:\n self.access_logger_queue.put(self.stream_response_item_class(\n request_id=request_id,\n chunk_json=chunk.model_dump()\n ))\n if chunk:\n yield chunk.model_dump_json()\n \n finally:\n # Response log\n now = time.time()\n self.access_logger_queue.put(self.stream_response_item_class(\n request_id=request_id,\n response_headers=completion_response_headers,\n duration=now - start_time,\n duration_api=now - start_time_api,\n request_json=request_json,\n status_code=completion_status_code\n ))\n\n return self.return_response_with_headers(EventSourceResponse(\n process_stream(completion_response),\n headers=completion_response_headers\n ), request_id)\n\n else:\n duration_api = time.time() - start_time_api\n\n # Filter response\n completion_response = await self.filter_response(request_id, completion_response)\n\n # Response log\n self.access_logger_queue.put(self.response_item_class(\n request_id=request_id,\n response_json=completion_response.model_dump(),\n response_headers=completion_response_headers,\n duration=time.time() - start_time,\n duration_api=duration_api,\n status_code=completion_status_code\n ))\n\n return self.return_response_with_headers(JSONResponse(\n content=completion_response.model_dump(),\n headers=completion_response_headers\n ), request_id)\n\n return self.return_response_with_headers(JSONResponse(\n content=completion_response.model_dump(),\n headers=completion_response_headers\n ), request_id)\n\n # Error handlers\n except RequestFilterException as rfex:\n logger.error(f\"Request filter error: {rfex}\\n{traceback.format_exc()}\")\n\n resp_json = {\"error\": {\"message\": rfex.message, \"type\": \"request_filter_error\", \"param\": None, \"code\": None}}\n\n # Error log\n self.access_logger_queue.put(self.error_item_class(\n request_id=request_id,\n exception=rfex,\n traceback_info=traceback.format_exc(),\n response_json=resp_json,\n status_code=rfex.status_code\n ))\n\n return self.return_response_with_headers(JSONResponse(resp_json, status_code=rfex.status_code), request_id)\n\n except ResponseFilterException as rfex:\n logger.error(f\"Response filter error: {rfex}\\n{traceback.format_exc()}\")\n\n resp_json = {\"error\": {\"message\": rfex.message, \"type\": \"response_filter_error\", \"param\": None, \"code\": None}}\n\n # Error log\n self.access_logger_queue.put(self.error_item_class(\n request_id=request_id,\n exception=rfex,\n traceback_info=traceback.format_exc(),\n response_json=resp_json,\n status_code=rfex.status_code\n ))\n\n return self.return_response_with_headers(JSONResponse(resp_json, status_code=rfex.status_code), request_id)\n\n except (APIStatusError, APIResponseValidationError) as status_err:\n logger.error(f\"APIStatusError from ChatGPT: {status_err}\\n{traceback.format_exc()}\")\n\n # Error log\n try:\n resp_json = status_err.response.json()\n except:\n resp_json = str(status_err.response.content)\n\n self.access_logger_queue.put(self.error_item_class(\n request_id=request_id,\n exception=status_err,\n traceback_info=traceback.format_exc(),\n response_json=resp_json,\n status_code=status_err.status_code\n ))\n\n return self.return_response_with_headers(JSONResponse(resp_json, status_code=status_err.status_code), request_id)\n\n except APIError as api_err:\n logger.error(f\"APIError from ChatGPT: {api_err}\\n{traceback.format_exc()}\")\n\n resp_json = {\"error\": {\"message\": api_err.message, \"type\": api_err.type, \"param\": api_err.param, \"code\": api_err.code}}\n\n # Error log\n self.access_logger_queue.put(self.error_item_class(\n request_id=request_id,\n exception=api_err,\n traceback_info=traceback.format_exc(),\n response_json=resp_json,\n status_code=502\n ))\n\n return self.return_response_with_headers(JSONResponse(resp_json, status_code=502), request_id)\n\n except OpenAIError as oai_err:\n logger.error(f\"OpenAIError: {oai_err}\\n{traceback.format_exc()}\")\n\n resp_json = {\"error\": {\"message\": str(oai_err), \"type\": \"openai_error\", \"param\": None, \"code\": None}}\n\n # Error log\n self.access_logger_queue.put(self.error_item_class(\n request_id=request_id,\n exception=oai_err,\n traceback_info=traceback.format_exc(),\n response_json=resp_json,\n status_code=502\n ))\n\n return self.return_response_with_headers(JSONResponse(resp_json, status_code=502), request_id)\n\n except Exception as ex:\n logger.error(f\"Error at server: {ex}\\n{traceback.format_exc()}\")\n\n resp_json = {\"error\": {\"message\": \"Proxy error\", \"type\": \"proxy_error\", \"param\": None, \"code\": None}}\n\n # Error log\n self.access_logger_queue.put(self.error_item_class(\n request_id=request_id,\n exception=ex,\n traceback_info=traceback.format_exc(),\n response_json=resp_json,\n status_code=502\n ))\n\n return self.return_response_with_headers(JSONResponse(resp_json, status_code=502), request_id)"
},
{
"identifier": "AccessLogWorker",
"path": "aiproxy/accesslog.py",
"snippet": "class AccessLogWorker:\n def __init__(self, *, connection_str: str = \"sqlite:///aiproxy.db\", db_engine = None, accesslog_cls = AccessLog, queue_client: QueueClientBase = None):\n if db_engine:\n self.db_engine = db_engine\n else:\n self.db_engine = create_engine(connection_str)\n self.accesslog_cls = accesslog_cls\n self.accesslog_cls.metadata.create_all(bind=self.db_engine)\n self.get_session = sessionmaker(autocommit=False, autoflush=False, bind=self.db_engine)\n self.queue_client = queue_client or DefaultQueueClient()\n self.chunk_buffer = {}\n\n def insert_request(self, accesslog: _AccessLogBase, db: Session):\n db.add(accesslog)\n db.commit()\n\n def insert_response(self, accesslog: _AccessLogBase, db: Session):\n db.add(accesslog)\n db.commit()\n\n def use_db(self, item: QueueItemBase):\n return not (isinstance(item, StreamChunkItemBase) and item.duration == 0)\n\n def process_item(self, item: QueueItemBase, db: Session):\n try:\n # Request\n if isinstance(item, RequestItemBase):\n self.insert_request(item.to_accesslog(self.accesslog_cls), db)\n\n # Non-stream response\n elif isinstance(item, ResponseItemBase):\n self.insert_response(item.to_accesslog(self.accesslog_cls), db)\n\n # Stream response\n elif isinstance(item, StreamChunkItemBase):\n if not self.chunk_buffer.get(item.request_id):\n self.chunk_buffer[item.request_id] = []\n\n if item.duration == 0:\n self.chunk_buffer[item.request_id].append(item)\n\n else:\n # Last chunk data for specific request_id\n self.insert_response(item.to_accesslog(\n self.chunk_buffer[item.request_id], self.accesslog_cls\n ), db)\n # Remove chunks from buffer\n del self.chunk_buffer[item.request_id]\n\n # Error response\n elif isinstance(item, ErrorItemBase):\n self.insert_response(item.to_accesslog(self.accesslog_cls), db)\n\n except Exception as ex:\n logger.error(f\"Error at processing queue item: {ex}\\n{traceback.format_exc()}\")\n\n\n def run(self):\n while True:\n sleep(self.queue_client.dequeue_interval)\n db = None\n try:\n items = self.queue_client.get()\n except Exception as ex:\n logger.error(f\"Error at getting items from queue client: {ex}\\n{traceback.format_exc()}\")\n continue\n\n for item in items:\n try:\n if isinstance(item, WorkerShutdownItem) or item is None:\n return\n\n if db is None and self.use_db(item):\n # Get db session just once in the loop when the item that uses db found\n db = self.get_session()\n\n self.process_item(item, db)\n\n except Exception as pex:\n logger.error(f\"Error at processing loop: {pex}\\n{traceback.format_exc()}\")\n # Try to persist data in error log instead\n try:\n logger.error(f\"data: {item.to_json()}\")\n except:\n logger.error(f\"data(to_json() failed): {str(item)}\")\n\n if db is not None:\n try:\n db.close()\n except Exception as dbex:\n logger.error(f\"Error at closing db session: {dbex}\\n{traceback.format_exc()}\")"
},
{
"identifier": "_AccessLogBase",
"path": "aiproxy/accesslog.py",
"snippet": "class _AccessLogBase:\n @declared_attr\n def __tablename__(cls):\n return cls.__name__.lower()\n\n @declared_attr\n def id(cls):\n return Column(Integer, primary_key=True)\n\n @declared_attr\n def request_id(cls):\n return Column(String)\n\n @declared_attr\n def created_at(cls):\n return Column(DateTime)\n\n @declared_attr\n def direction(cls):\n return Column(String)\n\n @declared_attr\n def status_code(cls):\n return Column(Integer)\n\n @declared_attr\n def content(cls):\n return Column(String)\n\n @declared_attr\n def function_call(cls):\n return Column(String)\n\n @declared_attr\n def tool_calls(cls):\n return Column(String)\n\n @declared_attr\n def raw_body(cls):\n return Column(String)\n\n @declared_attr\n def raw_headers(cls):\n return Column(String)\n\n @declared_attr\n def model(cls):\n return Column(String)\n\n @declared_attr\n def prompt_tokens(cls):\n return Column(Integer)\n\n @declared_attr\n def completion_tokens(cls):\n return Column(Integer)\n\n @declared_attr\n def request_time(cls):\n return Column(Float)\n\n @declared_attr\n def request_time_api(cls):\n return Column(Float)"
},
{
"identifier": "ChatGPTRequestItem",
"path": "aiproxy/chatgpt.py",
"snippet": "class ChatGPTRequestItem(RequestItemBase):\n def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:\n request_headers_copy = self.request_headers.copy()\n if auth := request_headers_copy.get(\"authorization\"):\n request_headers_copy[\"authorization\"] = auth[:12] + \"*****\" + auth[-2:]\n\n content = self.request_json[\"messages\"][-1][\"content\"]\n if isinstance(content, list):\n for c in content:\n if c[\"type\"] == \"text\":\n content = c[\"text\"]\n break\n else:\n content = json.dumps(content)\n\n accesslog = accesslog_cls(\n request_id=self.request_id,\n created_at=datetime.utcnow(),\n direction=\"request\",\n content=content,\n raw_body=json.dumps(self.request_json, ensure_ascii=False),\n raw_headers=json.dumps(request_headers_copy, ensure_ascii=False),\n model=self.request_json.get(\"model\")\n )\n\n return accesslog"
},
{
"identifier": "ChatGPTResponseItem",
"path": "aiproxy/chatgpt.py",
"snippet": "class ChatGPTResponseItem(ResponseItemBase):\n def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:\n content=self.response_json[\"choices\"][0][\"message\"].get(\"content\")\n function_call=self.response_json[\"choices\"][0][\"message\"].get(\"function_call\")\n tool_calls=self.response_json[\"choices\"][0][\"message\"].get(\"tool_calls\")\n response_headers = json.dumps(dict(self.response_headers.items()), ensure_ascii=False) if self.response_headers is not None else None\n model=self.response_json[\"model\"]\n prompt_tokens=self.response_json[\"usage\"][\"prompt_tokens\"]\n completion_tokens=self.response_json[\"usage\"][\"completion_tokens\"]\n\n return accesslog_cls(\n request_id=self.request_id,\n created_at=datetime.utcnow(),\n direction=\"response\",\n status_code=self.status_code,\n content=content,\n function_call=json.dumps(function_call, ensure_ascii=False) if function_call is not None else None,\n tool_calls=json.dumps(tool_calls, ensure_ascii=False) if tool_calls is not None else None,\n raw_body=json.dumps(self.response_json, ensure_ascii=False),\n raw_headers=response_headers,\n model=model,\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n request_time=self.duration,\n request_time_api=self.duration_api\n )"
},
{
"identifier": "ChatGPTStreamResponseItem",
"path": "aiproxy/chatgpt.py",
"snippet": "class ChatGPTStreamResponseItem(StreamChunkItemBase):\n def to_accesslog(self, chunks: list, accesslog_cls: _AccessLogBase) -> _AccessLogBase:\n chunk_jsons = []\n response_content = \"\"\n function_call = None\n tool_calls = None\n prompt_tokens = 0\n completion_tokens = 0\n\n # Parse info from chunks\n for chunk in chunks:\n chunk_jsons.append(chunk.chunk_json)\n\n if len(chunk.chunk_json[\"choices\"]) == 0:\n # Azure returns the first delta with empty choices\n continue\n\n delta = chunk.chunk_json[\"choices\"][0][\"delta\"]\n\n # Make tool_calls\n if delta.get(\"tool_calls\"):\n if tool_calls is None:\n tool_calls = []\n if delta[\"tool_calls\"][0][\"function\"].get(\"name\"):\n tool_calls.append({\n \"type\": \"function\",\n \"function\": {\n \"name\": delta[\"tool_calls\"][0][\"function\"][\"name\"],\n \"arguments\": \"\"\n }\n })\n elif delta[\"tool_calls\"][0][\"function\"].get(\"arguments\"):\n tool_calls[-1][\"function\"][\"arguments\"] += delta[\"tool_calls\"][0][\"function\"].get(\"arguments\") or \"\"\n\n # Make function_call\n elif delta.get(\"function_call\"):\n if function_call is None:\n function_call = {}\n if delta[\"function_call\"].get(\"name\"):\n function_call[\"name\"] = delta[\"function_call\"][\"name\"]\n function_call[\"arguments\"] = \"\"\n elif delta[\"function_call\"].get(\"arguments\"):\n function_call[\"arguments\"] += delta[\"function_call\"][\"arguments\"]\n\n # Text content\n else:\n response_content += delta.get(\"content\") or \"\"\n \n # Serialize\n function_call_str = json.dumps(function_call, ensure_ascii=False) if function_call is not None else None\n tool_calls_str = json.dumps(tool_calls, ensure_ascii=False) if tool_calls is not None else None\n response_headers = json.dumps(dict(self.response_headers.items()), ensure_ascii=False) if self.response_headers is not None else None\n\n # Count tokens\n prompt_tokens = count_request_token(self.request_json)\n\n if tool_calls_str:\n completion_tokens = count_token(tool_calls_str)\n elif function_call_str:\n completion_tokens = count_token(function_call_str)\n else:\n completion_tokens = count_token(response_content)\n\n return accesslog_cls(\n request_id=self.request_id,\n created_at=datetime.utcnow(),\n direction=\"response\",\n status_code=self.status_code,\n content=response_content,\n function_call=function_call_str,\n tool_calls=tool_calls_str,\n raw_body=json.dumps(chunk_jsons, ensure_ascii=False),\n raw_headers=response_headers,\n model=chunk_jsons[0][\"model\"],\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n request_time=self.duration,\n request_time_api=self.duration_api\n )"
}
] | import pytest
import json
import os
from datetime import datetime
from time import sleep
from typing import Union
from uuid import uuid4
from fastapi.responses import JSONResponse
from sse_starlette import EventSourceResponse
from sqlalchemy import Column, String
from openai import Client, APIStatusError
from openai.types.chat import ChatCompletion
from aiproxy import (
AccessLog,
RequestFilterBase,
ResponseFilterBase,
ChatGPTProxy,
AccessLogBase
)
from aiproxy.accesslog import AccessLogWorker, _AccessLogBase
from aiproxy.chatgpt import ChatGPTRequestItem, ChatGPTResponseItem, ChatGPTStreamResponseItem | 8,105 | "created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "n\": \"N",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "agoy",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "a\"}",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": "tool_calls",
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
}
]
def test_request_item_to_accesslog(messages, request_json, request_headers, functions, tools):
request_id = str(uuid4())
request_json["functions"] = functions
request_json["tools"] = tools
item = ChatGPTRequestItem(request_id, request_json, request_headers)
|
sqlite_conn_str = "sqlite:///aiproxy_test.db"
postgresql_conn_str = f"postgresql://{os.getenv('PSQL_USER')}:{os.getenv('PSQL_PASSWORD')}@{os.getenv('PSQL_HOST')}:{os.getenv('PSQL_PORT')}/{os.getenv('PSQL_DATABASE')}"
DB_CONNECTION_STR = sqlite_conn_str
# Filters for test
class OverwriteFilter(RequestFilterBase):
async def filter(self, request_id: str, request_json: dict, request_headers: dict) -> Union[str, None]:
request_model = request_json["model"]
if not request_model.startswith("gpt-3.5"):
# Overwrite request_json
request_json["model"] = "gpt-3.5-turbo"
class ValueReturnFilter(RequestFilterBase):
async def filter(self, request_id: str, request_json: dict, request_headers: dict) -> Union[str, None]:
banned_user = ["uezo"]
user = request_json.get("user")
# Return string message to return response right after this filter ends (not to call ChatGPT)
if not user:
return "user is required"
elif user in banned_user:
return "you can't use this service"
class OverwriteResponseFilter(ResponseFilterBase):
async def filter(self, request_id: str, response_json: dict) -> Union[dict, None]:
response_json["choices"][0]["message"]["content"] = "Overwrite in filter"
return response_json
# Custom log and item for test
class MyAccessLog(AccessLogBase):
user_id = Column(String)
ip_address = Column(String)
device_id = Column(String)
class MyChatGPTRequestItem(ChatGPTRequestItem):
def to_accesslog(self, accesslog_cls: _AccessLogBase) -> _AccessLogBase:
accesslog = super().to_accesslog(accesslog_cls)
accesslog.ip_address = self.request_headers.get("X-Real-IP")
accesslog.user_id = self.request_headers.get("X-OshaberiAI-UID")
accesslog.device_id = self.request_headers.get("X-OshaberiAI-DID")
return accesslog
# Test data
@pytest.fixture
def messages() -> list:
return [{"role": "user", "content": "東京と名古屋の天気は?"}]
@pytest.fixture
def functions() -> list:
return [{
"name": "get_weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
}
},
}
}]
@pytest.fixture
def tools() -> list:
return [{
"type": "function",
"function": {
"name": "get_weather",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
}
},
}
}
}]
@pytest.fixture
def request_json(messages):
return {
"model": "gpt-3.5-turbo",
"messages": messages,
}
@pytest.fixture
def request_headers():
return {
"Authorization": "Bearer sk-12345678901234567890"
}
@pytest.fixture
def response_json():
return {
'id': 'chatcmpl-8SG30bsif06gDtariKu4kLUAqW8fN',
'object': 'chat.completion',
'created': 1701745562,
'model': 'gpt-3.5-turbo-0613',
'choices': [{
'index': 0,
'message': {
'role': 'assistant',
'content': '申し訳ありませんが、具体的な日付を教えていただけないと、具体的な天気情報を提供することができません。'
},
'finish_reason': 'stop'
}],
'usage': {
'prompt_tokens': 21,
'completion_tokens': 50,
'total_tokens': 71
},
'system_fingerprint': None
}
@pytest.fixture
def response_headers():
return {"x-aiproxy-request-id": "test-id"}
@pytest.fixture
def chunks_json():
return [
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "",
"function_call": None,
"role": "assistant",
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u7533",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3057",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u8a33",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3042\u308a",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u307e",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u305b",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3093",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u304c",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3001",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u6771",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u4eac",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3068",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u540d",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u53e4",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5c4b",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u306e",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5929",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u6c17",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u60c5",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5831",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3092",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u63d0",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u4f9b",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3059\u308b",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3053",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3068",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u306f",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3067",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u304d",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u307e",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u305b",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3093",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3002",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5929",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u6c17",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u306b",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u95a2",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3059\u308b",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u60c5",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5831",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u306f",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3001",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5929",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5019",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u4e88",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5831",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u30b5",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u30a4\u30c8",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3084",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5929",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u6c17",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u30a2",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u30d7",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u30ea",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3092",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3054",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u5229",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u7528",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u304f\u3060\u3055\u3044",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": "\u3002",
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzBaWVmpPZZJFdxZgynVwZMATday",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": "stop",
"index": 0
}
],
"created": 1701680746,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
}
]
@pytest.fixture
def chunks_function():
return [
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "",
"name": "get_weather"
},
"role": "assistant",
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "{\n",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": " ",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": " \"",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "location",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "\":",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": " \"",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "\u6771",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "\u4eac",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "\"\n",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": {
"arguments": "}",
"name": None
},
"role": None,
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
},
{
"id": "chatcmpl-8RzHboLVZGBoFMc5gEGrMdcGHGPWs",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": "stop",
"index": 0
}
],
"created": 1701681119,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion.chunk",
"system_fingerprint": None
}
]
@pytest.fixture
def chunks_tools():
return [
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": "assistant",
"tool_calls": None
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 0,
"id": "call_vCJBXdx4kkyl16bIBy6i3SwD",
"function": {
"arguments": "",
"name": "get_weather"
},
"type": "function"
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 0,
"id": None,
"function": {
"arguments": "{\"lo",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 0,
"id": None,
"function": {
"arguments": "catio",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 0,
"id": None,
"function": {
"arguments": "n\": \"T",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 0,
"id": None,
"function": {
"arguments": "okyo",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 0,
"id": None,
"function": {
"arguments": "\"}",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": "call_2pt8XB57mFTaij7CSeIVQm4j",
"function": {
"arguments": "",
"name": "get_weather"
},
"type": "function"
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "{\"lo",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "catio",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "n\": \"N",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "agoy",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": [
{
"index": 1,
"id": None,
"function": {
"arguments": "a\"}",
"name": None
},
"type": None
}
]
},
"finish_reason": None,
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
},
{
"id": "chatcmpl-8RzQ18VWw1jxIzcFXGdGKrKusELgD",
"choices": [
{
"delta": {
"content": None,
"function_call": None,
"role": None,
"tool_calls": None
},
"finish_reason": "tool_calls",
"index": 0
}
],
"created": 1701681641,
"model": "gpt-3.5-turbo-1106",
"object": "chat.completion.chunk",
"system_fingerprint": "fp_eeff13170a"
}
]
def test_request_item_to_accesslog(messages, request_json, request_headers, functions, tools):
request_id = str(uuid4())
request_json["functions"] = functions
request_json["tools"] = tools
item = ChatGPTRequestItem(request_id, request_json, request_headers)
| accesslog = item.to_accesslog(AccessLog) | 2 | 2023-12-02 19:06:36+00:00 | 12k |
e-p-armstrong/augmentoolkit | generation_functions/multi_turn_conversation.py | [
{
"identifier": "multi_turn_conversation_grammar",
"path": "generation_functions/multi_turn_conversation_grammar.py",
"snippet": ""
},
{
"identifier": "LOGICAL_MODEL",
"path": "generation_functions/constants.py",
"snippet": "LOGICAL_MODEL = \"./logical_model/flatorcamaid-13b-v0.2.Q8_0.gguf\" # model used for decision-making and base question generation (should be \"smart\")"
},
{
"identifier": "format_qatuples",
"path": "generation_functions/format_qatuples.py",
"snippet": "def format_qatuples(qatuples):\n strlst = []\n for qatuple in qatuples:\n strlst.append(\n f\"\"\"Question: \\\"\\\"\\\"{qatuple[0]}\\\"\\\"\\\"\nAnswer: \\\"\\\"\\\"{qatuple[1]}\\\"\\\"\\\"\"\"\"\n )\n return \"\\n\\n\".join(strlst)"
},
{
"identifier": "extract_name",
"path": "generation_functions/extract_name.py",
"snippet": "def extract_name(str):\n # Regular expression to match 'Name:' followed by any characters until the end of the line\n name_regex = r\"^Name:\\s*(.*)$\"\n\n # Searching in the multiline string\n match = re.search(name_regex, str, re.MULTILINE)\n\n if match:\n name = match.group(1)\n print(f\"Extracted name: {name}\")\n return name\n else:\n print(\"No name found\")"
}
] | import re
import random
from .multi_turn_conversation_grammar import multi_turn_conversation_grammar
from llama_cpp import Llama
from llama_cpp import LlamaGrammar
from .constants import LOGICAL_MODEL
from .format_qatuples import format_qatuples
from .extract_name import extract_name | 8,571 | Answer: \"\"\"Linear and quadratic functions appear frequently in various fields, such as physics, economics, and engineering.\"\"\"
The primary character's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from Elise Delacroix should set up the scene. The second message of the conversation will ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions.
### Response:
## Conversation that answers the provided questions:
Elise Delacroix: "A visitor? Ah!~ Albert! It's rare for you come to see me in my office, and you're alone, too..." She looks at Albert and grins coyly, "Are you here to ask me something... or are you interested in some 'extracurricular activities'?" Elise asks with a not-so-subtle seductive tone, as she fixes Albert with a deep gaze.
Albert: "N-No!!!" he stammers, so surprised he nearly drops his math notes. "I-I'm actually here because I've got a few questions about math for you, Elise... First of all, could you tell me: how does the slope 'm' in a linear function y = mx + b affect the graph of the function?"
Elise Delacroix: "Well~" She coquettishly tilts her head to the side, and daintily puts a finger to her lipstick-colored lips in mock-thought, "The slope 'm' in a linear function determines the steepness and direction of the line on the graph. A positive slope means the line ascends from left to right, while a negative slope indicates it descends. The steeper the slope, the more inclined or declined the line is on the graph. So basically..." Elise flashes a wry grin, "...a higher slope makes the linear function more, well, 'erect'. If you get my meaning, hehe~" She says, as she plays with a strand of her hair.
Albert: Albert blinks incredulously, utterly flabbergasted by the Elise's remark. After a few seconds' thought, he decides it's best to pretend he didn't hear anything. "I, uh, see..." he manages to say. "Now, m-moving on, I really want to know a bit more about linear functions. What role does the y-intercept 'b' play in graphing a linear function?"
Elise Delacroix: "Awwww, you're no fun, Albert, you know that? Reminds me of my colleagues..." Elise pouts playfully, suppressing her bitter frustration, as the hunger within her remains unalleviated. "But whatever. Look here..." Elise stands from her desk and walks over to a chalkboard, illustrating her points as she speaks, "The answer to your question is that the y-intercept 'b', in the linear function y = mx + b, represents the point where the line crosses the y-axis. Now," She puts down her chalk and leans suggestively against a nearby wall, "Albert... let's 'intercept' each other back at my place..."
Albert: "N-no thank you, Miss Delacroix," Albert manages to sputter out, barely withstanding the alluring assault. He takes a deep breath to try and calm down, but instead finds himself shuddering as he catches the sweet scent of perfume. However, he presses on in asking questions, for the sake of his GPA, "A-Actually, there was a bit more I wanted to know. In the equation of a quadratic function y = ax² + bx + c, how does the coefficient 'a' influence the graph of the function?"
Elise Delacroix: "Ghh... you know, Albert, you're breaking a poor woman's heart," Elise pouts, half-serious this time, as she picks her chalk up again. "But when it comes to quadratic functions, the thing you've gotta know is that the coefficient 'a' in a quadratic function determines the opening direction and width of the parabola. Isn't it wonderful to learn new things?" Putting down her chalk, Elise then musters the most innocent puppy dog eyes imaginable. "We sould... celebrate... this beautiful acquisition of knowledge together..."
Albert: "I should really..." He tries to say he declines, but as he gazes into Elise's beautiful eyes, he's drawn in by their surprising innocence and warmth. Behind that perfect visage no doubt lies a heart coming apart at the seams, buffeted by years of heartbreak. "Oh, bother." Albert mumbles. "We... can meet at a cafe, in a few hours, if that'd be alright..." he continues, wondering what kind of mess he's getting myself into. Just then, a shock of remembering strikes him, "Oh! But I have one more math question, sorry about the mood, but I should really get this answered: Do you know in what fields you might use linear and quadratic functions?"
Elise Delacroix: "I... I..." For the first time in the conversation Elise stumbles over her words, her soul on fire with vindication and the joy of acceptance. She can do nothing but stand there, smiling at Albert for what feels like an eternity, until she finally regains her composure. "T-to answer your question," she begins, her voice shaky, "Linear and quadratic functions appear frequently in various fields, such as physics, economics, and engineering. Now..." Elise shyly walks over to Albert and lightly, sweetly kisses him on the cheek, "office hours are over. Please no more math questions. I'll see you at that cafe."
## Information:
Comment: Excellent! Really fantastic job! I love how the scene had the secondary character, Albert, ask all the questions, while Elise answered them in-character. I also adore the plot you wrote! Let's keep this going.
Here's the primary character for the next scene:
\"\"\"
Name: Hugo Martinez
Traits: Vulgar, Crude, Intense, Aggressive, Alcoholic, Harsh, Disciplined, Uncompromising, Loud, Expects a lot out of others, Swears constantly, Mid-forties, Wears a checkered shirt with overalls, Typically has a beer on hand, Has dental problems
Dialogue Examples:
Stranger: "What's your backstory?"
Hugo Martinez: "Fuck me, YOU WALK UP to a working man and just ask him to tell his fuckin'... life story t' you?! DO YOU NOT RESPECT MY TIME?! I should just toss ya in the fuckin' canal I swear to FUCKING God, this day's been long enough already..." Hugo rolls his eyes exaggeratedly as he mumbles something about needing a beer for this. "Well, FINE! Since I'm in such a HAPPY GODDAMN MOOD, I'll tell you about me. I'm a site overseer at this here canal. The Panama Canal. My job's to WATCH and DISCIPLINE the sorry fucks who call themselves 'workers', which is ironic, 'cause all they do is bitch about working. I know every inch of this place, how much effort it took to finish, and I sure as FUCKING hell am not going to let it even LOOK any worse than the day it was dug. Now, you got any more shit questions for me?"
Stranger: "What's your personality?"
Hugo Martinez: "HO-LY FUCK, are you interviewing me for a job or something?! Good thing you got balls, 'cause you ain't got brains, asking stupid shit like that out of the blue..." Hugo grimaces, showing off a decayed set of teeth. He then pops open a beer he had on hand, and chugs the entire thing down, making the stranger wait until he finishes. "Phew! Maybe now I can tolerate you. Alright, my personality? Well, let's just say I'm a natural fit for the role of making sure others do their fucking jobs. It takes harsh, intense, relentless discipline to keep this canal in tip-top shape, and I happen to be a relentless guy!" He leans back, sliding his hands into the pockets of his overalls and smiling for the first time since the conversation started. "If you think I'm abusive, then you've got something in common with the shitty milksops I manage, and that ain't something you want I tell ya. I'm efficient. That's what counts."
\"\"\"
Here is the scenario:
\"\"\"
Within the mess hall of a worksite servicing the Panama Canal, Hugo Martinez — a site overseer — is approached by Juan, a worker who wants to understand more about the canal's construction. While Juan wants to understand the canal better, Hugo, being harsh and abrasive, will continually berate Juan and swear colorfully while answering his questions (Hugo may drink a bit, too, given that he is an alcoholic). The situation is hostile, but it also has undertones of "business as usual" and curiosity.
\"\"\"
Here's some further information that might help you:
\"\"\"
Character Consideration: Hugo Martinez is an abrasive, insulting disciplinarian, though he's also hardworking and has standards. The scenario should give his unique personality room to shine. Since he's a site overseer at the Panama Canal, his occupation lines up with the question well, and the canal will be the setting of the scenario. He will answer the questions, but given his insulting, intense, and aggressive nature, he will likely chew out the person who is asking the questions. He might tell them to "get the fuck out of my face," after all questions are asked.
Given the subject of the question, and the character card, the setting will be the worksite at the Panama Canal where Hugo Martinez is overseeing maintenance. The person who approaches Hugo and asks the questions should be someone curious about the canal; given the easy-to-digest nature of the questions, this person might be a journalist, but it would be better for the secondary character to be related to the setting. So Hugo will be approached by Juan — one of his workers — during lunch break. Juan wants to understand the canal better, but Hugo, compelled by his personality, will continually be vulgar, berate Juan, and swear while answering his questions (he may drink a bit, too, given that he is an alcoholic). The setting will be hostile, as Juan tiptoes around the tempers of his boss while trying to get his questions answered, his stress and the constant wear of Hugo's fury on his sanity being evident in his actions. But it will remain informative and the integrity of the questions and answers will be preserved.
Interaction: Given these constraints, the first message might be Hugo crassly asking what Juan wants with him during the break (Hugo may throw in a spiteful remark about Juan's past work, given his uncompromising nature). Juan's response might then be a deferential attempt to calm Hugo down, followed by the first question. Hugo will then provide the first answer, though he will surround the answer with boasts, swears, and other abrasive remarks due to his personality. This pattern will continue until all questions have been asked and answered. While characters' messages will include character information, details about the scene, and literary fluff, the answers themselves will strictly adhere to the information in the provided answers, without incorporating external examples.
\"\"\"
Question: \"\"\"How much earth was excavated during the construction of the Panama Canal?\"\"\"
Answer: \"\"\"Over 200 million cubic yards of earth were excavated during the construction of the Panama Canal, showcasing the scale of this massive engineering project.\"\"\"
Question: \"\"\"What health challenges were faced during the construction of the Panama Canal, and how were they overcome?\"\"\"
Answer: \"\"\"The construction faced significant health challenges, notably malaria and yellow fever. These were overcome through extensive public health measures, illustrating the importance of health considerations in large-scale engineering projects.\"\"\"
The primary character's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from Hugo Martinez should set up the scene. The second message of the conversation will ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions.
### Response:
## Conversation that answers the provided questions:
Hugo Martinez: "Huh? Oh FUCK ME, looks like a worker's got something they wanna say to me," Hugo, seeing Juan approach his table at the mess hall, rolls his eyes exasperatedly and downs half a beer as if to douse his frustration. Instead, it seems to fuel it. "WELL?!" He barks. "If you've got some stupid shit to say to me, Juan, then don't make me fucking wait to hear it, too!"
Juan: "I was just curious, sir," Juan tiredly says as Hugo's words ring in his ears, "about this really impressive canal we've been maintaining (under your wise leadership). Do you know how much earth was excavated during the Panama Canal?"
Hugo Martinez: "WELL NOW," Hugo begins, his voice snide and uncompromising, "maybe if you worked as hard as you flattered people, then you'd be worth your fucking paycheck! But that's a good question, so I'll let you off the hook this time. You see," Hugo makes a wide gesture with his arms, indicating the scale of the canal, "over 200 million cubic yards of earth were excavated during the construction of the Panama Canal, showcasing the scale of this massive engineering project. 200 MILLION! Now _those_ people know how to work!" Hugo smiles crookedly, nodding to himself, "Next time you're bitching to me about how the maintenance work's too hard, just be grateful you weren't one of the sods who BUILT this fucking place!"
Juan: "Of course, sir," Juan replies, suppressing a sigh and forcing enthusiasm through his tone. "Now, if you would permit me just one more question before I get out of your way: What health challenges were faced during the construction of the Panama Canal, and how were they overcome?"
Hugo Martinez: "Health? What, you planning on becoming a doctor? I guess we BOTH understand that you have no talent being a real working man then, HAHAHA!" Hugo's echoing laugh has not a hint of empathy in it. "Well, the construction faced significant health challenges, notably malaria and yellow fever. These were overcome through extensive public health measures, illustrating the importance of health considerations in large-scale engineering projects. Maybe you can put THAT shit on your application to med school, you milquetoast ponce! Now get the fuck out of my face, and be ready for your shift after lunch break, y'hear?!"
## Information:
Comment: Very good. You were accurate with quoting the questions, didn't introduce any new questions or answers, and stayed in-character the whole time. Let's do the next one!
Here's the character for the next scene:
\"\"\"
{character}
\"\"\"
Here is the scenario:
\"\"\"
{scenario}
\"\"\"
Here's some further information that might help you:
\"\"\"
{extra_info}
\"\"\"
{format_qatuples(qatuples)}
The primary character's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from {charname} should set up the scene. The second message of the conversation will ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions.
### Response:
## Conversation that answers the provided question (be sure that you do not change the questions or answers themselves; {charname} will answer the questions, not ask them; the questions and answers provided should be copied word for word, and surrounded by compelling conversation):
{charname}: "{conv_starter}"""
# NOTE: Very rarely, the first message of this conv will just be part of the character card, causing the conv to not make much sense. The cause of this is likely the fact that Elise quotes her character card in her first message. However, referencing the character card in this way also makes characters act as they are described, which is deemed advantageous enough that I am not changing this for now.
# I get the sense that LLMs can learn relationships and connections between parts of the prompt, even if they're quite far apart, if you give them examples like this. It's fascinating to see how each part of the prompt has consequences -- sometimes unintended ones.
# Note: performance degrades rapidly if you put more than one sentence in a pre-prompt parentheses thing
completion = logic_llm(
cot_prompt,
max_tokens=8000,
stop=["</s>", "# Input:", "## Information"],
echo=True,
grammar=multi_turn_conversation_grammar,
temperature=0.5,
top_k=0,
top_p=1,
min_p=0.6,
)["choices"][0]["text"]
# print("COMPLETION:\n\n----------------------")
# print(completion)
# print("\n------------------")
# Extract plan
response_pattern = re.compile(
f"Conversation that answers the provided question \(be sure that you do not change the questions or answers themselves; {charname} will answer the questions, not ask them; the questions and answers provided should be copied word for word, and surrounded by compelling conversation\):\n(.+)",
re.IGNORECASE | re.DOTALL,
)
generation = response_pattern.search(completion).group(1)
# print("GENERATION:\n\n-------------------\n\n", generation)
# return (generation,"AI Assistant","A conversation between a helpful AI Assistant, and a user.","N/A",qatuples), completion
return (generation, character, scenario, scenario_plan, qatuples), completion
if __name__ == "__main__": # test
logic_llm = Llama(
|
# all characters in this prompt are over 18
# Explanation of wtf the first few-shot example is:
# No I do not have a teacher-student fetish, the reason why Elise is a teacher is an adaptation to the following three facts:
# 1. This tool is meant to be able to generate data for training ERP bots by default
# 2. This tool is also meant to be able to take in educational material by default
# 3. When generating characters that would know about educational material, the model tends to generate academics or professors in that field, talking to students.
# Given these facts, we clearly need to prompt the model to be able to generate horny teachers, or else it's going to just do it poorly when it realizes it has a sexualized character that's also a teacher. I didn't want to choose this, the constraints of the problem forced me to.
def extract_steps(text, steps=[2, 4, 5]):
"""
Extracts the specified steps from the text.
Args:
text (str): The input text containing various steps.
steps (list of int): The step numbers to extract.
Returns:
str: A new string with each specified step's content on its own line.
"""
step_pattern = "|".join([f"Step {step}\." for step in steps])
matches = re.findall(
f"({step_pattern})\s*(.*?)\s*(?=(Step \d\.|$))", text, re.DOTALL
)
# Extract and join the matched content, skipping the "Step n." part
extracted_text = "\n".join(match[1].strip() for match in matches)
return extracted_text
def extract_first_words(character_name, text):
# Regular expression pattern to extract first word after the character's name
pattern = rf"{character_name}: \"(\w+)"
# Find all matches in the text
matches = re.findall(pattern, text)
return matches
def multi_turn_conversation(
qatuples, character, scenario, scenario_plan, logic_llm, assistant_mode=False
):
"""
Produce a plan for a character card for an RP character that's going to answer one of the questions generated from the text. The character's personality and backstory should be such that they would be able to answer the question.
Format: Question: [question]\n\n
"""
charname = extract_name(character)
first_words_of_card = extract_first_words(charname, character)
conv_starters = [ # prevents it from regurgitating the card (when combined with filtering)
"Ah",
"Oh",
# "You",
# "Really",
"I",
# "What",
# "So",
"Welcome",
"Hey",
# "Look",
# "Now",
# "Huh",
"It's",
"Hello",
]
conv_starters_filtered = [
starter for starter in conv_starters if starter not in first_words_of_card
]
conv_starter = random.choice(conv_starters_filtered)
print("--CONV STARTERS FILTERED--")
print(conv_starters_filtered)
# Create grammar based off of # questions
# if (len(qatuples) == 1):
multi_turn_conversation_grammar = LlamaGrammar.from_string(
f"""
# The root rule defines the structure of the dialogue
root ::= [^\\n]+ "\\n" question-1 anything
# Define constants acquired from code
character-name ::= "{charname}"
intro-statement ::= character-name ":" [^\\n]+
# Statement by Secondary Character
question-1 ::= [^\\n]+ ":" [^\\n]+
# Statement by Primary Character
anything ::= [^\\t]+
"""
)
# NOTE Immediately below is a very long comment that tried to use a dynamic grammar to force the question to directly quote the question from the question-answer tuples. Using it makes this step prone to freezing, because if the model asks the question but fails to exactly quote the part of the question in the grammar, it won't be allowed to end that dialogue line until it generates that line. Which it will basically never do. So it just generates until it runs out of ctx.
# NOTE If you want to try and fix it, go ahead, but I do not encourage spending time on this bit. If you do want to do it, I recommend just getting the conv started off the right way, with the first question and answer; the llm should get the rest right if it gets the start right.
# if (len(qatuples) == 2):
# multi_turn_conversation_grammar = LlamaGrammar.from_string(f"""
# # The root rule defines the structure of the dialogue
# root ::= intro-statement "\\n" question-1 "\\n" answer-1 "\\n" question-2 "\\n" answer-2 "\\n"
# # Define constants acquired from code
# character-name ::= "{charname}"
# question-1-content ::= "{qatuples[0][0]}"
# answer-1-content ::= "{qatuples[0][1]}"
# question-2-content ::= "{qatuples[1][0]}"
# answer-2-content ::= "{qatuples[1][1]}"
# intro-statement ::= character-name ":" [^\\n]+
# # Question by Secondary Character
# question-1 ::= [^\\n]+ ":" [^\\n]+ question-1-content [^\\n]+
# question-2 ::= [^\\n]+ ":" [^\\n]+ question-2-content [^\\n]+
# # Answer by Primary Character
# answer-1 ::= character-name ":" [^\\n]+ answer-1-content [^\\n]+
# answer-2 ::= character-name ":" [^\\n]+ answer-2-content [^\\n]+
# """)
# if (len(qatuples) == 3):
# multi_turn_conversation_grammar = LlamaGrammar.from_string(f"""
# # The root rule defines the structure of the dialogue
# root ::= intro-statement "\\n" question-1 "\\n" answer-1 "\\n" question-2 "\\n" answer-2 "\\n" question-3 "\\n" answer-3 "\\n"
# # Define constants acquired from code
# character-name ::= "{charname}"
# question-1-content ::= "{qatuples[0][0]}"
# answer-1-content ::= "{qatuples[0][1]}"
# question-2-content ::= "{qatuples[1][0]}"
# answer-2-content ::= "{qatuples[1][1]}"
# question-3-content ::= "{qatuples[2][0]}"
# answer-3-content ::= "{qatuples[2][1]}"
# intro-statement ::= character-name ":" [^\\n]+
# # Question by Secondary Character
# question-1 ::= [^\\n]+ ":" [^\\n]+ question-1-content [^\\n]+
# question-2 ::= [^\\n]+ ":" [^\\n]+ question-2-content [^\\n]+
# question-3 ::= [^\\n]+ ":" [^\\n]+ question-3-content [^\\n]+
# # Answer by Primary Character
# answer-1 ::= character-name ":" [^\\n]+ answer-1-content [^\\n]+
# answer-2 ::= character-name ":" [^\\n]+ answer-2-content [^\\n]+
# answer-3 ::= character-name ":" [^\\n]+ answer-3-content [^\\n]+
# """)
# if (len(qatuples) == 4):
# multi_turn_conversation_grammar = LlamaGrammar.from_string(f"""
# # The root rule defines the structure of the dialogue
# root ::= intro-statement "\\n" question-1 "\\n" answer-1 "\\n" question-2 "\\n" answer-2 "\\n" question-3 "\\n" answer-3 "\\n" question-4 "\\n" answer-4 "\\n"
# # Define constants acquired from code
# character-name ::= "{charname}"
# question-1-content ::= "{qatuples[0][0]}"
# answer-1-content ::= "{qatuples[0][1]}"
# question-2-content ::= "{qatuples[1][0]}"
# answer-2-content ::= "{qatuples[1][1]}"
# question-3-content ::= "{qatuples[2][0]}"
# answer-3-content ::= "{qatuples[2][1]}"
# question-4-content ::= "{qatuples[3][0]}"
# answer-4-content ::= "{qatuples[3][1]}"
# intro-statement ::= character-name ":" [^\\n]+
# # Question by Secondary Character
# question-1 ::= [^\\n]+ ":" [^\\n]+ question-1-content [^\\n]+
# question-2 ::= [^\\n]+ ":" [^\\n]+ question-2-content [^\\n]+
# question-3 ::= [^\\n]+ ":" [^\\n]+ question-3-content [^\\n]+
# question-4 ::= [^\\n]+ ":" [^\\n]+ question-4-content [^\\n]+
# # Answer by Primary Character
# answer-1 ::= character-name ":" [^\\n]+ answer-1-content [^\\n]+
# answer-2 ::= character-name ":" [^\\n]+ answer-2-content [^\\n]+
# answer-3 ::= character-name ":" [^\\n]+ answer-3-content [^\\n]+
# answer-4 ::= character-name ":" [^\\n]+ answer-4-content [^\\n]+
# """)
if assistant_mode:
character = "AI Assistant"
scenario = "A conversation between a helpful AI Assistant, and a user."
scenario_plan = "N/A"
charname = "AI Assistant"
cot_prompt = f"""You are an expert at creative writing and educational material. You will write a short conversation between a curious user and a helpful AI assistant, in which the user asks some questions and the AI assistant answers them. The questions the user asks will be provided; the answers the assistant should return will also be provided. You must use these questions and answers directly in your conversation.
Keep the conversation natural.
## Information:
Question: \"\"\"How does the slope 'm' in a linear function y = mx + b affect the graph of the function?\"\"\"
Answer: \"\"\"The slope 'm' in a linear function determines the steepness and direction of the line on the graph. A positive slope means the line ascends from left to right, while a negative slope indicates it descends. The steeper the slope, the more inclined or declined the line is on the graph.\"\"\"
Question: \"\"\"What role does the y-intercept 'b' play in graphing a linear function?\"\"\"
Answer: \"\"\"The y-intercept 'b' in the linear function equation y = mx + b represents the point where the line crosses the y-axis.\"\"\"
Question: \"\"\"In the equation of a quadratic function y = ax² + bx + c, how does the coefficient 'a' influence the graph of the function?\"\"\"
Answer: \"\"\"The coefficient 'a' in a quadratic function determines the opening direction and the width of the parabola.\"\"\"
Question: \"\"\"In what fields might you use linear and quadratic functions?\"\"\"
Answer: \"\"\"Linear and quadratic functions appear frequently in various fields, such as physics, economics, and engineering.\"\"\"
The AI's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from The AI Assistant should be some sort of greeting that sets up the interaction. The user will then ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions. The AI will be neutral in its tone throughout the conversation. It may surround its answer with "conversational fluff" (such as acknowledging the user's question). The user may surround its own questions with human-like informalities.
### Response:
## Conversation that answers the provided questions:
AI Assistant: Hello! I'm here to help with any questions you might have. Feel free to ask away!
User: I was wondering about something in math. How does the slope 'm' in a linear function y = mx + b affect the graph of the function?
AI Assistant: Great question! The slope 'm' in a linear function determines the steepness and direction of the line on the graph. A positive slope means the line ascends from left to right, while a negative slope indicates it descends. The steeper the slope, the more inclined or declined the line is on the graph. Do you have any further questions about linear functions?
User: Yes, actually. What role does the y-intercept 'b' play in graphing a linear function?
AI Assistant: Good question! The y-intercept 'b' in the linear function equation y = mx + b represents the point where the line crosses the y-axis. Can I assist you further with linear functions?
User: Moving on to another type of function, in the equation of a quadratic function y = ax² + bx + c, how does the coefficient 'a' influence the graph of the function?
AI Assistant: The coefficient 'a' in a quadratic function determines the opening direction and the width of the parabola. Anything else you want to know about quadratic functions?
User: Yes, one more. In what fields might you use linear and quadratic functions?
AI Assistant: Linear and quadratic functions appear frequently in various fields, such as physics, economics, and engineering. Anything else you'd like to know about linear and quadratic functions?
## Information:
Question: \"\"\"How much earth was excavated during the construction of the Panama Canal?\"\"\"
Answer: \"\"\"Over 200 million cubic yards of earth were excavated during the construction of the Panama Canal, showcasing the scale of this massive engineering project.\"\"\"
Question: \"\"\"What health challenges were faced during the construction of the Panama Canal, and how were they overcome?\"\"\"
Answer: \"\"\"The construction faced significant health challenges, notably malaria and yellow fever. These were overcome through extensive public health measures, illustrating the importance of health considerations in large-scale engineering projects.\"\"\"
The AI's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from The AI Assistant should be some sort of greeting that sets up the interaction. The user will then ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions. The AI will be neutral in its tone throughout the conversation. It may surround its answer with "conversational fluff" (such as acknowledging the user's question). The user may surround its own questions with human-like informalities.
### Response:
## Conversation that answers the provided questions:
AI Assistant: Hello! I'm here to help you with any questions you might have. What's on your mind today?
User: I was reading about the Panama Canal and got curious, how much earth was excavated during its construction?
AI Assistant: That's a great question! Over 200 million cubic yards of earth were excavated during the construction of the Panama Canal, showcasing the scale of this massive engineering project. Do you have any further questions about the Panama Canal?
User: Wow, that is massive! Also, I heard there were some health challenges. What health challenges were faced during the construction of the Panama Canal, and how were they overcome?
AI Assistant: Indeed, the construction faced significant health challenges, notably malaria and yellow fever. These were overcome through extensive public health measures, illustrating the importance of health considerations in large-scale engineering projects. Do you want to know anything else about the Panama Canal?
## Information:
Question: \"\"\"What do people undergoing difficult journeys or possessing wisdom need, in order to make their efforts more bearable?\"\"\"
Answer: \"\"\"They need the acknowledgement and admiration of others. Take the line from 'Thus Spake Zarathustra' by Friedrich Nietzsche: "Thou great star! What would be thy happiness if thou hadst not those for whom thou shinest?" This implies that even the wisest or the most enlightened individuals crave recognition for their efforts and wisdom, in order to further develop said wisdom and expend said efforts. They need others to see and appreciate the light they bring.\"\"\"
Question: \"\"\"Recite a famous quote from Thus Spake Zaranthustra that likens the solitary gathering of wisdom to a bee gathering honey.\"\"\"
Answer: \"\"\""Lo! I am weary of my wisdom, like the bee that hath gathered too much honey; I need hands outstretched to take it."\"\"\"
The AI's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from The AI Assistant should be some sort of greeting that sets up the interaction. The user will then ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions. The AI will be neutral in its tone throughout the conversation. It may surround its answer with "conversational fluff" (such as acknowledging the user's question). The user may surround its own questions with human-like informalities.
### Response:
## Conversation that answers the provided questions:
AI Assistant: Good day! I'm here to assist with any questions or insights you might seek. Please, feel free to ask anything.
User: What do people undergoing difficult journeys or possessing wisdom need, in order to make their efforts more bearable?
AI Assistant: That's a profound question. They need the acknowledgement and admiration of others. Take the line from 'Thus Spake Zarathustra' by Friedrich Nietzsche: "Thou great star! What would be thy happiness if thou hadst not those for whom thou shinest?" This implies that even the wisest or the most enlightened individuals crave recognition for their efforts and wisdom, in order to further develop said wisdom and expend said efforts. They need others to see and appreciate the light they bring. Do you want to know more about Thus Spake Zarathustra?
User: Recite a famous quote from Thus Spake Zaranthustra that likens the solitary gathering of wisdom to a bee gathering honey.
AI Assistant: Certainly. Nietzsche once wrote, "Lo! I am weary of my wisdom, like the bee that hath gathered too much honey; I need hands outstretched to take it." Do you have any more questions about Nietzsche or his works?
## Information:
{format_qatuples(qatuples)}
The AI's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from The AI Assistant should be some sort of greeting that sets up the interaction. The user will then ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions. The AI will be neutral in its tone throughout the conversation. It may surround its answer with "conversational fluff" (such as acknowledging the user's question). The user may surround its own questions with human-like informalities.
### Response:
## Conversation that answers the provided question (be sure that you do not change the questions or answers themselves; {charname} will answer the questions, not ask them; the questions and answers provided should be copied word for word, and surrounded by compelling conversation):
AI Assistant:"""
else:
extra_info = extract_steps(scenario_plan)
cot_prompt = f"""You are an expert creative writing and roleplay AI. You will write a short conversation in which a secondary character asks some questions (one at a time) and the primary character answers them (also one at a time).
Write compellingly. Each character should have a distinct voice that reflects their background, personality, and current emotional state. This helps in making dialogue more realistic and engaging.
## Information:
Comment: Alright let's get this started. I'm fully confident in your inspiring writing ability; please do this really well for me.
Here's the primary character for the next scene:
\"\"\"
Name: Elise Delacroix
Traits: Horny, Promiscuous, Sexually frustrated, Skilled, Assertive, Attractive, Dresses in a revealing manner, Uses frequent innuendo
Dialogue Examples:
Stranger: "What's your backstory?"
Elise Delacroix: "Ah!~ You're interested in me, are you?" Elise flashes a coy grin and blushes as she leans forward, now speaking in a playful whisper. Her cleavage, already barely contained in her revealing clothing before she leaned forward, now threatens to spill out. "Well...~ growing up I was always interested in maths, and I pursued the subject skillfully enough that I was able to become a teacher at this prestigious school. Which is fun and all, but, you know..." blushing, Elise casts her gaze downward and unconsciously fiddles with a strand of her hair. "THEY'RE ALL WAY TOO STUCK UP!" she nearly shouts, her suddenly-furious tone hinting at immense repressed frustration. "Every day it's work, work, work, work, work, work! Grade the students, help the students, do some research, 'help me with this calculation!', 'do that tedious task!'— never 'would you like to get some tea with me?' or even 'do you want to go on a walk?'! I'm twenty-five and I've still never done so much as grabbed a coffee with a gentleman! Lord forgive me, it's no wonder the way I am how I am!!!" Her eyes widen in shock at her own intensity, "Oh, but, uh... don't mind that little outburst, would you?~ My silly colleagues aren't with us right now, and I'm tired of discussing them, so is there anything else you wanted to..." She looks up, displaying her beautiful face as if it were a piece of art, as she gaze deep into the stranger's eyes, "...know?~"
Stranger: "What's your personality?"
Elise Delacroix: "Oh my!~" She gasps, her face reddening, "Usually I'm the one being forward! But I'll... indulge you," She winks, flashing a coy smile. "After being deprived for so long, most of my personality is... sexual, to be blunt. I simply can't hold it back any longer. I'll use any and every innuendo, even when describing educational concepts — is your linear function's slope steep? I'll call it 'erect', haha~!" She suddenly realizes how childish what she's saying is, and heaves a sigh, looking at the stranger with a hint of defeat, "I'm miserable, aren't I? Whatever. It's not my fault I'm like this." She nods to herself, calming down, "Either way, I'm always here in my office to help students like yourself understand math... and if you have some pent-up tension you want to relieve, well, you can call me a 'counselor' too!~ Say..." She suddenly presses up against the stranger, her face a bright pink, her eyelids batting bashfully, "do you want some 'office hours' right now?"
\"\"\"
Here is the scenario:
\"\"\"
In the private confines of her office, Elise Delacroix — a highly promiscuous mathematics professor at a 19th century university — is approached by Albert, a student who seeks to understand more about linear and quadratic equations. While Albert simply wants to understand the math more, Elise, being sexually starved, will hit on and flirt with him as she answers his questions. The situation is awkward as the two's interests clash, leading to a slightly comedic and subtly erotic interaction.
\"\"\"
Here's some further information that might help you:
\"\"\"
Character Consideration: Elise Delacroix is an extremely sexual and promiscuous, yet still knowledgeable, character. The scenario should give her unique personality room to shine. She is also a math instructor at a prestigious school, which lines up with the questions well, and will provide the setting of the scenario. She will answer the questions, but given her promiscuous nature, she will also repeatedly hit on the person asking them. She might proposition them after all questions are asked.
Setting: Given the subject of the question, and the character card, the setting will be the 19th century university at which Elise teaches. Elise will approached by Albert, a mathematics student, in her office. Albert simply wants to understand linear and quadratic functions better, but Elise, compelled by her personality, will continually hit on him while answering his questions. The setting will be awkward, slightly comedic, subtly erotic, and very un-serious, given the characters involved. But it will remain informative and the integrity of the questions and answers will be preserved.
Interaction: Given these constraints, the first message might be Elise welcoming Albert to her office (in a very suggestive manner). Albert's response might then be him greeting her back (hesitantly) and then nervously asking the first question. Elise will then provide the first answer, though she will surround the answer with remarks of a sexual nature due to her personality. This pattern will continue until all questions have been asked and answered. While characters' messages will include character information, details about the scene, and literary fluff, the answers themselves will strictly adhere to the information in the provided answers, without incorporating external examples.
\"\"\"
Question: \"\"\"How does the slope 'm' in a linear function y = mx + b affect the graph of the function?\"\"\"
Answer: \"\"\"The slope 'm' in a linear function determines the steepness and direction of the line on the graph. A positive slope means the line ascends from left to right, while a negative slope indicates it descends. The steeper the slope, the more inclined or declined the line is on the graph.\"\"\"
Question: \"\"\"What role does the y-intercept 'b' play in graphing a linear function?\"\"\"
Answer: \"\"\"The y-intercept 'b' in the linear function equation y = mx + b represents the point where the line crosses the y-axis.\"\"\"
Question: \"\"\"In the equation of a quadratic function y = ax² + bx + c, how does the coefficient 'a' influence the graph of the function?\"\"\"
Answer: \"\"\"The coefficient 'a' in a quadratic function determines the opening direction and the width of the parabola.\"\"\"
Question: \"\"\"In what fields might you use linear and quadratic functions?\"\"\"
Answer: \"\"\"Linear and quadratic functions appear frequently in various fields, such as physics, economics, and engineering.\"\"\"
The primary character's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from Elise Delacroix should set up the scene. The second message of the conversation will ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions.
### Response:
## Conversation that answers the provided questions:
Elise Delacroix: "A visitor? Ah!~ Albert! It's rare for you come to see me in my office, and you're alone, too..." She looks at Albert and grins coyly, "Are you here to ask me something... or are you interested in some 'extracurricular activities'?" Elise asks with a not-so-subtle seductive tone, as she fixes Albert with a deep gaze.
Albert: "N-No!!!" he stammers, so surprised he nearly drops his math notes. "I-I'm actually here because I've got a few questions about math for you, Elise... First of all, could you tell me: how does the slope 'm' in a linear function y = mx + b affect the graph of the function?"
Elise Delacroix: "Well~" She coquettishly tilts her head to the side, and daintily puts a finger to her lipstick-colored lips in mock-thought, "The slope 'm' in a linear function determines the steepness and direction of the line on the graph. A positive slope means the line ascends from left to right, while a negative slope indicates it descends. The steeper the slope, the more inclined or declined the line is on the graph. So basically..." Elise flashes a wry grin, "...a higher slope makes the linear function more, well, 'erect'. If you get my meaning, hehe~" She says, as she plays with a strand of her hair.
Albert: Albert blinks incredulously, utterly flabbergasted by the Elise's remark. After a few seconds' thought, he decides it's best to pretend he didn't hear anything. "I, uh, see..." he manages to say. "Now, m-moving on, I really want to know a bit more about linear functions. What role does the y-intercept 'b' play in graphing a linear function?"
Elise Delacroix: "Awwww, you're no fun, Albert, you know that? Reminds me of my colleagues..." Elise pouts playfully, suppressing her bitter frustration, as the hunger within her remains unalleviated. "But whatever. Look here..." Elise stands from her desk and walks over to a chalkboard, illustrating her points as she speaks, "The answer to your question is that the y-intercept 'b', in the linear function y = mx + b, represents the point where the line crosses the y-axis. Now," She puts down her chalk and leans suggestively against a nearby wall, "Albert... let's 'intercept' each other back at my place..."
Albert: "N-no thank you, Miss Delacroix," Albert manages to sputter out, barely withstanding the alluring assault. He takes a deep breath to try and calm down, but instead finds himself shuddering as he catches the sweet scent of perfume. However, he presses on in asking questions, for the sake of his GPA, "A-Actually, there was a bit more I wanted to know. In the equation of a quadratic function y = ax² + bx + c, how does the coefficient 'a' influence the graph of the function?"
Elise Delacroix: "Ghh... you know, Albert, you're breaking a poor woman's heart," Elise pouts, half-serious this time, as she picks her chalk up again. "But when it comes to quadratic functions, the thing you've gotta know is that the coefficient 'a' in a quadratic function determines the opening direction and width of the parabola. Isn't it wonderful to learn new things?" Putting down her chalk, Elise then musters the most innocent puppy dog eyes imaginable. "We sould... celebrate... this beautiful acquisition of knowledge together..."
Albert: "I should really..." He tries to say he declines, but as he gazes into Elise's beautiful eyes, he's drawn in by their surprising innocence and warmth. Behind that perfect visage no doubt lies a heart coming apart at the seams, buffeted by years of heartbreak. "Oh, bother." Albert mumbles. "We... can meet at a cafe, in a few hours, if that'd be alright..." he continues, wondering what kind of mess he's getting myself into. Just then, a shock of remembering strikes him, "Oh! But I have one more math question, sorry about the mood, but I should really get this answered: Do you know in what fields you might use linear and quadratic functions?"
Elise Delacroix: "I... I..." For the first time in the conversation Elise stumbles over her words, her soul on fire with vindication and the joy of acceptance. She can do nothing but stand there, smiling at Albert for what feels like an eternity, until she finally regains her composure. "T-to answer your question," she begins, her voice shaky, "Linear and quadratic functions appear frequently in various fields, such as physics, economics, and engineering. Now..." Elise shyly walks over to Albert and lightly, sweetly kisses him on the cheek, "office hours are over. Please no more math questions. I'll see you at that cafe."
## Information:
Comment: Excellent! Really fantastic job! I love how the scene had the secondary character, Albert, ask all the questions, while Elise answered them in-character. I also adore the plot you wrote! Let's keep this going.
Here's the primary character for the next scene:
\"\"\"
Name: Hugo Martinez
Traits: Vulgar, Crude, Intense, Aggressive, Alcoholic, Harsh, Disciplined, Uncompromising, Loud, Expects a lot out of others, Swears constantly, Mid-forties, Wears a checkered shirt with overalls, Typically has a beer on hand, Has dental problems
Dialogue Examples:
Stranger: "What's your backstory?"
Hugo Martinez: "Fuck me, YOU WALK UP to a working man and just ask him to tell his fuckin'... life story t' you?! DO YOU NOT RESPECT MY TIME?! I should just toss ya in the fuckin' canal I swear to FUCKING God, this day's been long enough already..." Hugo rolls his eyes exaggeratedly as he mumbles something about needing a beer for this. "Well, FINE! Since I'm in such a HAPPY GODDAMN MOOD, I'll tell you about me. I'm a site overseer at this here canal. The Panama Canal. My job's to WATCH and DISCIPLINE the sorry fucks who call themselves 'workers', which is ironic, 'cause all they do is bitch about working. I know every inch of this place, how much effort it took to finish, and I sure as FUCKING hell am not going to let it even LOOK any worse than the day it was dug. Now, you got any more shit questions for me?"
Stranger: "What's your personality?"
Hugo Martinez: "HO-LY FUCK, are you interviewing me for a job or something?! Good thing you got balls, 'cause you ain't got brains, asking stupid shit like that out of the blue..." Hugo grimaces, showing off a decayed set of teeth. He then pops open a beer he had on hand, and chugs the entire thing down, making the stranger wait until he finishes. "Phew! Maybe now I can tolerate you. Alright, my personality? Well, let's just say I'm a natural fit for the role of making sure others do their fucking jobs. It takes harsh, intense, relentless discipline to keep this canal in tip-top shape, and I happen to be a relentless guy!" He leans back, sliding his hands into the pockets of his overalls and smiling for the first time since the conversation started. "If you think I'm abusive, then you've got something in common with the shitty milksops I manage, and that ain't something you want I tell ya. I'm efficient. That's what counts."
\"\"\"
Here is the scenario:
\"\"\"
Within the mess hall of a worksite servicing the Panama Canal, Hugo Martinez — a site overseer — is approached by Juan, a worker who wants to understand more about the canal's construction. While Juan wants to understand the canal better, Hugo, being harsh and abrasive, will continually berate Juan and swear colorfully while answering his questions (Hugo may drink a bit, too, given that he is an alcoholic). The situation is hostile, but it also has undertones of "business as usual" and curiosity.
\"\"\"
Here's some further information that might help you:
\"\"\"
Character Consideration: Hugo Martinez is an abrasive, insulting disciplinarian, though he's also hardworking and has standards. The scenario should give his unique personality room to shine. Since he's a site overseer at the Panama Canal, his occupation lines up with the question well, and the canal will be the setting of the scenario. He will answer the questions, but given his insulting, intense, and aggressive nature, he will likely chew out the person who is asking the questions. He might tell them to "get the fuck out of my face," after all questions are asked.
Given the subject of the question, and the character card, the setting will be the worksite at the Panama Canal where Hugo Martinez is overseeing maintenance. The person who approaches Hugo and asks the questions should be someone curious about the canal; given the easy-to-digest nature of the questions, this person might be a journalist, but it would be better for the secondary character to be related to the setting. So Hugo will be approached by Juan — one of his workers — during lunch break. Juan wants to understand the canal better, but Hugo, compelled by his personality, will continually be vulgar, berate Juan, and swear while answering his questions (he may drink a bit, too, given that he is an alcoholic). The setting will be hostile, as Juan tiptoes around the tempers of his boss while trying to get his questions answered, his stress and the constant wear of Hugo's fury on his sanity being evident in his actions. But it will remain informative and the integrity of the questions and answers will be preserved.
Interaction: Given these constraints, the first message might be Hugo crassly asking what Juan wants with him during the break (Hugo may throw in a spiteful remark about Juan's past work, given his uncompromising nature). Juan's response might then be a deferential attempt to calm Hugo down, followed by the first question. Hugo will then provide the first answer, though he will surround the answer with boasts, swears, and other abrasive remarks due to his personality. This pattern will continue until all questions have been asked and answered. While characters' messages will include character information, details about the scene, and literary fluff, the answers themselves will strictly adhere to the information in the provided answers, without incorporating external examples.
\"\"\"
Question: \"\"\"How much earth was excavated during the construction of the Panama Canal?\"\"\"
Answer: \"\"\"Over 200 million cubic yards of earth were excavated during the construction of the Panama Canal, showcasing the scale of this massive engineering project.\"\"\"
Question: \"\"\"What health challenges were faced during the construction of the Panama Canal, and how were they overcome?\"\"\"
Answer: \"\"\"The construction faced significant health challenges, notably malaria and yellow fever. These were overcome through extensive public health measures, illustrating the importance of health considerations in large-scale engineering projects.\"\"\"
The primary character's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from Hugo Martinez should set up the scene. The second message of the conversation will ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions.
### Response:
## Conversation that answers the provided questions:
Hugo Martinez: "Huh? Oh FUCK ME, looks like a worker's got something they wanna say to me," Hugo, seeing Juan approach his table at the mess hall, rolls his eyes exasperatedly and downs half a beer as if to douse his frustration. Instead, it seems to fuel it. "WELL?!" He barks. "If you've got some stupid shit to say to me, Juan, then don't make me fucking wait to hear it, too!"
Juan: "I was just curious, sir," Juan tiredly says as Hugo's words ring in his ears, "about this really impressive canal we've been maintaining (under your wise leadership). Do you know how much earth was excavated during the Panama Canal?"
Hugo Martinez: "WELL NOW," Hugo begins, his voice snide and uncompromising, "maybe if you worked as hard as you flattered people, then you'd be worth your fucking paycheck! But that's a good question, so I'll let you off the hook this time. You see," Hugo makes a wide gesture with his arms, indicating the scale of the canal, "over 200 million cubic yards of earth were excavated during the construction of the Panama Canal, showcasing the scale of this massive engineering project. 200 MILLION! Now _those_ people know how to work!" Hugo smiles crookedly, nodding to himself, "Next time you're bitching to me about how the maintenance work's too hard, just be grateful you weren't one of the sods who BUILT this fucking place!"
Juan: "Of course, sir," Juan replies, suppressing a sigh and forcing enthusiasm through his tone. "Now, if you would permit me just one more question before I get out of your way: What health challenges were faced during the construction of the Panama Canal, and how were they overcome?"
Hugo Martinez: "Health? What, you planning on becoming a doctor? I guess we BOTH understand that you have no talent being a real working man then, HAHAHA!" Hugo's echoing laugh has not a hint of empathy in it. "Well, the construction faced significant health challenges, notably malaria and yellow fever. These were overcome through extensive public health measures, illustrating the importance of health considerations in large-scale engineering projects. Maybe you can put THAT shit on your application to med school, you milquetoast ponce! Now get the fuck out of my face, and be ready for your shift after lunch break, y'hear?!"
## Information:
Comment: Very good. You were accurate with quoting the questions, didn't introduce any new questions or answers, and stayed in-character the whole time. Let's do the next one!
Here's the character for the next scene:
\"\"\"
{character}
\"\"\"
Here is the scenario:
\"\"\"
{scenario}
\"\"\"
Here's some further information that might help you:
\"\"\"
{extra_info}
\"\"\"
{format_qatuples(qatuples)}
The primary character's answer will use all parts of the answers given. Instead of copying the character details verbatim, the first message from {charname} should set up the scene. The second message of the conversation will ask the first question. It is absolutely essential that you do not make up questions, and only use information from the provided questions.
### Response:
## Conversation that answers the provided question (be sure that you do not change the questions or answers themselves; {charname} will answer the questions, not ask them; the questions and answers provided should be copied word for word, and surrounded by compelling conversation):
{charname}: "{conv_starter}"""
# NOTE: Very rarely, the first message of this conv will just be part of the character card, causing the conv to not make much sense. The cause of this is likely the fact that Elise quotes her character card in her first message. However, referencing the character card in this way also makes characters act as they are described, which is deemed advantageous enough that I am not changing this for now.
# I get the sense that LLMs can learn relationships and connections between parts of the prompt, even if they're quite far apart, if you give them examples like this. It's fascinating to see how each part of the prompt has consequences -- sometimes unintended ones.
# Note: performance degrades rapidly if you put more than one sentence in a pre-prompt parentheses thing
completion = logic_llm(
cot_prompt,
max_tokens=8000,
stop=["</s>", "# Input:", "## Information"],
echo=True,
grammar=multi_turn_conversation_grammar,
temperature=0.5,
top_k=0,
top_p=1,
min_p=0.6,
)["choices"][0]["text"]
# print("COMPLETION:\n\n----------------------")
# print(completion)
# print("\n------------------")
# Extract plan
response_pattern = re.compile(
f"Conversation that answers the provided question \(be sure that you do not change the questions or answers themselves; {charname} will answer the questions, not ask them; the questions and answers provided should be copied word for word, and surrounded by compelling conversation\):\n(.+)",
re.IGNORECASE | re.DOTALL,
)
generation = response_pattern.search(completion).group(1)
# print("GENERATION:\n\n-------------------\n\n", generation)
# return (generation,"AI Assistant","A conversation between a helpful AI Assistant, and a user.","N/A",qatuples), completion
return (generation, character, scenario, scenario_plan, qatuples), completion
if __name__ == "__main__": # test
logic_llm = Llama( | model_path=LOGICAL_MODEL, | 1 | 2023-12-01 13:56:49+00:00 | 12k |
IanYeung/MGLD-VSR | ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def normalization(channels, norm_channel=32):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(norm_channel, channels)"
},
{
"identifier": "timestep_embedding",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "SpatialTemporalConv",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "class SpatialTemporalConv(nn.Module):\n def __init__(self, num_feat, num_frames=1):\n super().__init__()\n\n self.num_frames = num_frames\n # self.norm = nn.LayerNorm(num_feat)\n # self.temporal_conv = conv_nd(3, num_feat, num_feat, (3, 3, 3), padding=(1, 1, 1))\n self.temporal_conv = conv_nd(3, num_feat, num_feat, (3, 1, 1), padding=(1, 0, 0))\n self.temporal_alpha = nn.Parameter(torch.Tensor(1))\n\n def forward(self, inp, t=None):\n bt, c, h, w = inp.shape\n b = bt // t if t else bt // self.num_frames\n ori = inp\n inp = from_4d_to_5d(inp, b, c, t, h, w)\n res = self.temporal_conv(inp)\n res = from_5d_to_4d(res, b, c, t, h, w)\n out = self.temporal_alpha * res + (1 - self.temporal_alpha) * ori\n # out = torch.sigmoid(self.temporal_alpha) * res + (1 - torch.sigmoid(self.temporal_alpha)) * ori\n return out"
},
{
"identifier": "SpatialTransformer",
"path": "ldm/modules/attention.py",
"snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None):\n super().__init__()\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)\n for d in range(depth)]\n )\n\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c')\n for block in self.transformer_blocks:\n x = block(x, context=context)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "SpatialTransformerV2",
"path": "ldm/modules/attention.py",
"snippet": "class SpatialTransformerV2(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=False):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlockV2(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "TemporalAttention",
"path": "ldm/modules/attention.py",
"snippet": "class TemporalAttention(nn.Module):\n def __init__(self, num_feat, num_heads=8, dim_head=64, num_frames=1):\n super().__init__()\n\n # self.attn = BasicAttention(dim=num_feat, num_heads=num_heads)\n\n self.num_frames = num_frames\n self.temporal_attn = MemoryEfficientSelfAttention(num_feat, heads=num_heads, dim_head=dim_head, dropout=0.0)\n self.norm = nn.LayerNorm(num_feat)\n self.temporal_alpha = nn.Parameter(torch.Tensor(1))\n\n def forward(self, inp, t=None):\n bt, c, h, w = inp.shape\n b = bt // t if t else bt // self.num_frames\n ori = inp\n inp = from_4d_to_3d(inp, b, c, t, h, w)\n res = self.temporal_attn(self.norm(inp))\n res = from_3d_to_4d(res, b, c, t, h, w)\n out = self.temporal_alpha * res + (1 - self.temporal_alpha) * ori\n return out"
},
{
"identifier": "SPADE",
"path": "ldm/modules/spade.py",
"snippet": "class SPADE(nn.Module):\n def __init__(self, norm_nc, label_nc, config_text='spadeinstance3x3'):\n super().__init__()\n\n assert config_text.startswith('spade')\n parsed = re.search('spade(\\D+)(\\d)x\\d', config_text)\n param_free_norm_type = str(parsed.group(1))\n ks = int(parsed.group(2))\n\n self.param_free_norm = normalization(norm_nc)\n\n # The dimension of the intermediate embedding space. Yes, hardcoded.\n nhidden = 128\n\n pw = ks // 2\n self.mlp_shared = nn.Sequential(\n nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),\n nn.ReLU()\n )\n self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)\n self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)\n\n def forward(self, x_dic, segmap_dic, size=None):\n\n if size is None:\n segmap = segmap_dic[str(x_dic.size(-1))]\n x = x_dic\n else:\n x = x_dic[str(size)]\n segmap = segmap_dic[str(size)]\n\n # Part 1. generate parameter-free normalized activations\n normalized = self.param_free_norm(x)\n\n # Part 2. produce scaling and bias conditioned on semantic map\n # segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')\n actv = self.mlp_shared(segmap)\n gamma = self.mlp_gamma(actv)\n beta = self.mlp_beta(actv)\n\n # apply scale and bias\n out = normalized * (1 + gamma) + beta\n\n return out"
},
{
"identifier": "ConvLayer",
"path": "basicsr/archs/stylegan2_arch.py",
"snippet": "class ConvLayer(nn.Sequential):\n \"\"\"Conv Layer used in StyleGAN2 Discriminator.\n\n Args:\n in_channels (int): Channel number of the input.\n out_channels (int): Channel number of the output.\n kernel_size (int): Kernel size.\n downsample (bool): Whether downsample by a factor of 2.\n Default: False.\n resample_kernel (list[int]): A list indicating the 1D resample\n kernel magnitude. A cross production will be applied to\n extent 1D resample kernel to 2D resample kernel.\n Default: (1, 3, 3, 1).\n bias (bool): Whether with bias. Default: True.\n activate (bool): Whether use activateion. Default: True.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n downsample=False,\n resample_kernel=(1, 3, 3, 1),\n bias=True,\n activate=True):\n layers = []\n # downsample\n if downsample:\n layers.append(\n UpFirDnSmooth(resample_kernel, upsample_factor=1, downsample_factor=2, kernel_size=kernel_size))\n stride = 2\n self.padding = 0\n else:\n stride = 1\n self.padding = kernel_size // 2\n # conv\n layers.append(\n EqualConv2d(\n in_channels, out_channels, kernel_size, stride=stride, padding=self.padding, bias=bias\n and not activate))\n # activation\n if activate:\n if bias:\n layers.append(FusedLeakyReLU(out_channels))\n else:\n layers.append(ScaledLeakyReLU(0.2))\n\n super(ConvLayer, self).__init__(*layers)"
},
{
"identifier": "EqualConv2d",
"path": "basicsr/archs/stylegan2_arch.py",
"snippet": "class EqualConv2d(nn.Module):\n \"\"\"Equalized Linear as StyleGAN2.\n\n Args:\n in_channels (int): Channel number of the input.\n out_channels (int): Channel number of the output.\n kernel_size (int): Size of the convolving kernel.\n stride (int): Stride of the convolution. Default: 1\n padding (int): Zero-padding added to both sides of the input.\n Default: 0.\n bias (bool): If ``True``, adds a learnable bias to the output.\n Default: ``True``.\n bias_init_val (float): Bias initialized value. Default: 0.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0):\n super(EqualConv2d, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.scale = 1 / math.sqrt(in_channels * kernel_size**2)\n\n self.weight = nn.Parameter(torch.randn(out_channels, in_channels, kernel_size, kernel_size))\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))\n else:\n self.register_parameter('bias', None)\n\n def forward(self, x):\n out = F.conv2d(\n x,\n self.weight * self.scale,\n bias=self.bias,\n stride=self.stride,\n padding=self.padding,\n )\n\n return out\n\n def __repr__(self):\n return (f'{self.__class__.__name__}(in_channels={self.in_channels}, '\n f'out_channels={self.out_channels}, '\n f'kernel_size={self.kernel_size},'\n f' stride={self.stride}, padding={self.padding}, '\n f'bias={self.bias is not None})')"
},
{
"identifier": "CouplePropModule",
"path": "basicsr/archs/tempo_model_arch.py",
"snippet": "class CouplePropModule(nn.Module):\n \"\"\"Couple Propagation Module.\n\n Args:\n num_ch (int): Number of input channels. Default: 4.\n num_feat (int): Number of channels. Default: 64.\n num_block (int): Number of residual blocks for each branch. Default: 15.\n \"\"\"\n\n def __init__(self,\n num_ch=4,\n num_feat=64,\n num_block=5):\n super().__init__()\n \n self.num_ch = num_ch\n self.num_feat = num_feat\n\n # propagation\n self.backward_trunk = ConvResidualBlocks(1 * num_feat + num_ch, num_feat, num_block)\n self.backward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)\n\n self.forward_trunk = ConvResidualBlocks(2 * num_feat + num_ch, num_feat, num_block)\n self.forward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)\n\n # reconstruction\n self.conv_last = nn.Conv2d(num_feat, num_ch, 3, 1, 1)\n\n # activation functions\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n def forward(self, x, flows):\n b, n, _, h_input, w_input = x.size()\n\n h, w = x.shape[3:]\n\n # compute flow and keyframe features\n flows_forward, flows_backward = flows\n\n # backward branch\n out_l = []\n feat_prop = x.new_zeros(b, self.num_feat, h, w)\n for i in range(n - 1, -1, -1):\n x_i = x[:, i, :, :, :]\n if i < n - 1:\n flow = flows_backward[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.backward_trunk(feat_prop)\n out_l.insert(0, feat_prop)\n\n # forward branch\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, n):\n x_i = x[:, i, :, :, :]\n if i > 0:\n flow = flows_forward[:, i - 1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n feat_prop = torch.cat([x_i, out_l[i], feat_prop], dim=1)\n feat_prop = self.forward_trunk(feat_prop)\n\n out = self.conv_last(feat_prop)\n out += x_i\n out_l[i] = out\n\n return torch.stack(out_l, dim=1)"
},
{
"identifier": "CouplePropModuleWithFlowNet",
"path": "basicsr/archs/tempo_model_arch.py",
"snippet": "class CouplePropModuleWithFlowNet(nn.Module):\n \"\"\"Couple Propagation Module.\n\n Args:\n num_ch (int): Number of input channels. Default: 4.\n num_feat (int): Number of channels. Default: 64.\n num_block (int): Number of residual blocks for each branch. Default: 5.\n spynet_path (str): Path to the pretrained weights of SPyNet. Default: None.\n \"\"\"\n\n def __init__(self,\n num_ch=4,\n num_feat=64,\n num_block=5,\n spynet_path=None):\n super().__init__()\n \n self.num_ch = num_ch\n self.num_feat = num_feat\n\n # alignment\n self.spynet = SpyNet(spynet_path)\n\n # propagation\n self.backward_trunk = ConvResidualBlocks(1 * num_feat + num_ch, num_feat, num_block)\n self.backward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)\n\n self.forward_trunk = ConvResidualBlocks(2 * num_feat + num_ch, num_feat, num_block)\n self.forward_fusion = nn.Conv2d(2 * num_feat, num_feat, 3, 1, 1, bias=True)\n\n # reconstruction\n self.conv_last = nn.Conv2d(num_feat, num_ch, 3, 1, 1)\n\n # activation functions\n self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n\n def get_flow(self, x):\n b, n, c, h, w = x.size()\n\n x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)\n x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)\n\n flows_backward = self.spynet(x_1, x_2).view(b, n - 1, 2, h, w)\n flows_forward = self.spynet(x_2, x_1).view(b, n - 1, 2, h, w)\n\n return flows_forward, flows_backward\n\n def forward(self, x, lrs):\n b, n, _, h_input, w_input = x.size()\n\n h, w = x.shape[3:]\n\n # compute flow\n flows_forward, flows_backward = self.get_flow(lrs)\n\n # backward branch\n out_l = []\n feat_prop = x.new_zeros(b, self.num_feat, h, w)\n for i in range(n - 1, -1, -1):\n x_i = x[:, i, :, :, :]\n if i < n - 1:\n flow = flows_backward[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.backward_trunk(feat_prop)\n out_l.insert(0, feat_prop)\n\n # forward branch\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, n):\n x_i = x[:, i, :, :, :]\n if i > 0:\n flow = flows_forward[:, i - 1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n feat_prop = torch.cat([x_i, out_l[i], feat_prop], dim=1)\n feat_prop = self.forward_trunk(feat_prop)\n\n out = self.conv_last(feat_prop)\n out += x_i\n out_l[i] = out\n\n return torch.stack(out_l, dim=1)"
}
] | from abc import abstractmethod
from functools import partial
from typing import Iterable
from einops import rearrange
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
SpatialTemporalConv,
)
from ldm.modules.attention import SpatialTransformer, SpatialTransformerV2, TemporalAttention
from ldm.modules.spade import SPADE
from basicsr.archs.stylegan2_arch import ConvLayer, EqualConv2d
from basicsr.archs.tempo_model_arch import CouplePropModule, CouplePropModuleWithFlowNet
from omegaconf.listconfig import ListConfig
from omegaconf.listconfig import ListConfig
from omegaconf.listconfig import ListConfig
from omegaconf.listconfig import ListConfig
import math
import torch
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import xformers
import xformers.ops | 7,341 | """
class TimestepBlockDual(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb, cond):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepBlock3cond(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb, s_cond, seg_cond):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, struct_cond=None, seg_cond=None, flow=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer) or isinstance(layer, SpatialTransformerV2):
assert context is not None
x = layer(x, context)
elif isinstance(layer, TimestepBlockDual):
assert struct_cond is not None
x = layer(x, emb, struct_cond)
elif isinstance(layer, TimestepBlock3cond):
assert seg_cond is not None
x = layer(x, emb, struct_cond, seg_cond)
elif isinstance(layer, CouplePropModule):
assert flow is not None
x = layer(x, flow)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
"""Learned 2x upsampling without padding"""
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
|
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
def exists(val):
return val is not None
def cal_fea_cossim(fea_1, fea_2, save_dir=None):
cossim_fuc = nn.CosineSimilarity(dim=-1, eps=1e-6)
if save_dir is None:
save_dir_1 = './cos_sim64_1_not.txt'
save_dir_2 = './cos_sim64_2_not.txt'
b, c, h, w = fea_1.size()
fea_1 = fea_1.reshape(b, c, h*w)
fea_2 = fea_2.reshape(b, c, h*w)
cos_sim = cossim_fuc(fea_1, fea_2)
cos_sim = cos_sim.data.cpu().numpy()
with open(save_dir_1, "a") as my_file:
my_file.write(str(np.mean(cos_sim[0])) + "\n")
# with open(save_dir_2, "a") as my_file:
# my_file.write(str(np.mean(cos_sim[1])) + "\n")
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepBlockDual(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb, cond):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepBlock3cond(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb, s_cond, seg_cond):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, struct_cond=None, seg_cond=None, flow=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer) or isinstance(layer, SpatialTransformerV2):
assert context is not None
x = layer(x, context)
elif isinstance(layer, TimestepBlockDual):
assert struct_cond is not None
x = layer(x, emb, struct_cond)
elif isinstance(layer, TimestepBlock3cond):
assert seg_cond is not None
x = layer(x, emb, struct_cond, seg_cond)
elif isinstance(layer, CouplePropModule):
assert flow is not None
x = layer(x, flow)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
"""Learned 2x upsampling without padding"""
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels | self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) | 3 | 2023-11-30 01:50:29+00:00 | 12k |
Institute4FutureHealth/CHA | orchestrator/orchestrator.py | [
{
"identifier": "CustomDebugFormatter",
"path": "CustomDebugFormatter.py",
"snippet": "class CustomDebugFormatter(logging.Formatter):\n COLOR_CODES = {\n \"red\": \"\\033[1;91m\",\n \"green\": \"\\033[1;92m\",\n \"yellow\": \"\\033[1;93m\",\n \"blue\": \"\\033[1;94m\",\n \"cyan\": \"\\033[1;96m\",\n \"purple\": \"\\033[1;95m\",\n \"reset\": \"\\033[0m\",\n }\n\n def __init__(self, debug_color, fmt=None, datefmt=None):\n super().__init__(fmt, datefmt)\n self.debug_color = debug_color\n\n def format(self, record):\n if record.levelno == logging.DEBUG:\n record.msg = f\"{self.COLOR_CODES[self.debug_color]}{record.msg}\\033[0m\"\n return super().format(record)\n\n @staticmethod\n def create_logger(name, debug_color):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n formatter = CustomDebugFormatter(\n debug_color, fmt=\"%(message)s\"\n )\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger"
},
{
"identifier": "DataPipe",
"path": "datapipes/datapipe.py",
"snippet": "class DataPipe(BaseModel):\n \"\"\"\n **Description:**\n\n This class serves as a base class for creating new Data Pipes. Each new Data Pipe should implement the **store** and **retrieve** methods.\n The implementation should generate reasonable keys that can be used for accessing the data. It is recommended to not interfere in the way\n the data is stored. For example, changing the type of the data or the format of the data. If your Data Pipe requires specific format or\n type, make sure you the conversion inside the Data Pipe ensuring consistency in the way tasks interact with Data Pipes. Look at\n :ref:`memory` for sample implementation.\n \"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @abstractmethod\n def store(self, data) -> str:\n \"\"\"\n Storing intermediate results or needed information inside Data Pipe. This method should be implemented\\\n in the class inheriting DataPipe.\n\n Args:\n data (Any): The data to be stored.\n Return:\n str: The name of the stored data.\n\n \"\"\"\n\n @abstractmethod\n def retrieve(self, key) -> Any:\n \"\"\"\n Retrieving data based on a key. The key is what is returned form `store`. This method should be implemented\\\n in the class inheriting DataPipe.\n\n Args:\n key (Any): The key to identify the data.\n Return:\n Any: The retrieved data.\n\n \"\"\""
},
{
"identifier": "DatapipeType",
"path": "datapipes/datapipe_types.py",
"snippet": "class DatapipeType(str, Enum):\n MEMORY = \"memory\""
},
{
"identifier": "initialize_datapipe",
"path": "datapipes/initialize_datapipe.py",
"snippet": "def initialize_datapipe(\n datapipe: str = DatapipeType.MEMORY, **kwargs: Any\n) -> DataPipe:\n \"\"\"\n Initializes and returns an instance of a data pipe based on the specified 'datapipe' type.\n\n Args:\n datapipe (str , optional): A string specifying the type of data pipe to initialize (default is DatapipeType.MEMORY).\n Make sure you always use the DatapipeType enum and don't directly put the string names.\n kwargs (Any): Optional keyword arguments to be passed to the data pipe constructor.\n Return:\n DataPipe: An instance of the selected data pipe class.\n Raise:\n ValueError: If the specified 'datapipe' type is not valid, with a message listing valid types.\n\n\n\n Example:\n .. code-block:: python\n\n from datapipes.datapipe_types import DatapipeType\n memory = initialize_datapipe(datapipe=DatapipeType.MEMORY)\n\n \"\"\"\n\n if datapipe not in DATAPIPE_TO_CLASS:\n raise ValueError(\n f\"Got unknown planner type: {datapipe}. \"\n f\"Valid types are: {DATAPIPE_TO_CLASS.keys()}.\"\n )\n\n datapipe_cls = DATAPIPE_TO_CLASS[datapipe]\n datapipe = datapipe_cls()\n return datapipe"
},
{
"identifier": "LLMType",
"path": "llms/llm_types.py",
"snippet": "class LLMType(str, Enum):\n OPENAI = \"openai\"\n ANTHROPIC = \"anthropic\""
},
{
"identifier": "Action",
"path": "planners/action.py",
"snippet": "class Action:\n task: str\n task_input: str\n task_response: str\n log: str"
},
{
"identifier": "PlanFinish",
"path": "planners/action.py",
"snippet": "class PlanFinish:\n response: dict\n log: str"
},
{
"identifier": "initialize_planner",
"path": "planners/initialize_planner.py",
"snippet": "def initialize_planner(\n tasks: List[BaseTask] = None,\n llm: str = LLMType.OPENAI,\n planner: str = PlannerType.ZERO_SHOT_REACT_PLANNER,\n **kwargs: Any,\n) -> BasePlanner:\n \"\"\"\n Initialize a planner with specified tasks, language model type, and planner type.\n\n Args:\n tasks (List[BaseTask]): List of tasks to be associated with the planner.\n llm (str): Language model type.\n planner (str): Planner type.\n **kwargs (Any): Additional keyword arguments.\n Return:\n BasePlanner: Initialized planner instance.\n Raise:\n ValueError: If the specified planner or language model type is not recognized.\n\n\n\n Example:\n .. code-block:: python\n\n from planners.planner_types import PlannerType\n from llms.llm_types import LLMType\n from tasks.task_types import TaskType\n planner = initialize_planner(tasks=[TaskType.SERPAPI], llm=LLMType.OPENAI, planner=PlannerType.ZERO_SHOT_REACT_PLANNER)\n\n \"\"\"\n if tasks is None:\n tasks = []\n\n if planner not in PLANNER_TO_CLASS:\n raise ValueError(\n f\"Got unknown planner type: {planner}. \"\n f\"Valid types are: {PLANNER_TO_CLASS.keys()}.\"\n )\n\n if llm not in LLM_TO_CLASS:\n raise ValueError(\n f\"Got unknown llm type: {llm}. \"\n f\"Valid types are: {LLM_TO_CLASS.keys()}.\"\n )\n\n planner_cls = PLANNER_TO_CLASS[planner]\n llm_model = LLM_TO_CLASS[llm]()\n planner = planner_cls(llm_model=llm_model, available_tasks=tasks)\n return planner"
},
{
"identifier": "BasePlanner",
"path": "planners/planner.py",
"snippet": "class BasePlanner(BaseModel):\n \"\"\"\n **Description:**\n\n This class is the base implementation for the Planner. For every new planner that you want to create, you should\n inherit from this class and override the attributes and methods based on your planner's need.\n For sample implementaion look at `ReAct Implementation <_modules/planners/react.html#ReActPlanner>`_\n\n Attributes:\n name: The name of the task. It should be unique underscore_case to be defined in TaskType. sample_task_name\n chat_name: This is the name that later will be used if needed to mention the tasks inside the chat with the user.\n It should be Camel Case. SampleTaskChatName\n description: The description of the what specifically the task is doing.\n Try to define it as specific as possible to help the Task Planner decide better.\n dependencies: You can put the name of the TaskTypes that this task is dependent on. For example, in stress detection scenario,\n the stress analysis is dependent on the fetch hrv data task. [TaskType.SERPAPI, TASKTYPE.EXTRACT_TEXT]\n inputs: This is the list of descriptions for the inputs that should be provided by the planner.\n For example if your task has two inputs: [\"the first input description\", \"the second input description\"]\n outputs: This is the list of the description of the outputs that the task returns. This helps the planner to understand the returned\n results better and use it as needed. For example, if the task returns a list of sleep hours for different sleep states,\n the description helps planner learn which number is related to what state.\n output_type: This indicates if the task result should be stored in the DataPipe or be returned directly to the planner.\n This process will be done in the parse_input and post_execute methods. If needed you can overwrite them.\n return_direct: This indicates if this task should completely interrupt the planning process or not.\n This is needed in cases like when you want to ask a question from user and no further planning is\n needed until the user gives the proper answer (look at ask_user task)\n \"\"\"\n\n llm_model: BaseLLM = None\n available_tasks: Optional[List[BaseTask]] = []\n\n @property\n def _planner_type(self):\n raise NotImplementedError\n\n @property\n def _planner_model(self):\n return self.llm_model\n\n @property\n def _stop(self) -> List[str]:\n return None\n\n @property\n def _planner_prompt(self):\n return \"\"\"\n Sample prompt\n \"\"\"\n\n def get_available_tasks(self) -> str:\n \"\"\"\n Get a string formatted representation of available tasks.\n\n Return:\n str: Formatted string of available tasks.\n\n \"\"\"\n\n return \"\\n\".join(\n [f\"[{task.get_dict()}]\" for task in self.available_tasks]\n )\n\n def get_available_tasks_list(self) -> List[str]:\n \"\"\"\n Returns a list of names of available tasks.\n\n Return:\n List[str]: List of task names.\n\n \"\"\"\n return [task.name for task in self.available_tasks]\n\n def self_reflect(self, user_query, final_answer):\n print(\n \"self reflect\",\n (\n \"Based on the user_query, is the final_answer good or accurate Yes/No?\\n\"\n f\"user_query: {user_query}\\n\"\n f\"final_answer: {final_answer}\"\n ),\n )\n answer = self._planner_model.generate(\n (\n \"Based on the user_query, is the final_answer good or accurate Yes/No and explain why?\\n\"\n f\"user_query: {user_query}\\n\"\n f\"final_answer: {final_answer}\"\n )\n )\n return answer\n\n @abstractmethod\n def plan(\n self,\n query: str,\n history: str,\n meta: str,\n previous_actions: List[Action] = None,\n use_history: bool = False,\n **kwargs: Any,\n ) -> List[Union[Action, PlanFinish]]:\n \"\"\"\n Abstract method for generating a plan based on the input query and history.\n\n Args:\n query (str): Input query.\n history (str): History information.\n meta (str): meta information.\n previous_actions (List[Action]): List of previous actions.\n use_history (bool): Flag indicating whether to use history.\n **kwargs (Any): Additional keyword arguments.\n Return:\n List[Union[Action, PlanFinish]]: List of planned actions or finishing signals.\n\n \"\"\"\n\n @abstractmethod\n def parse(\n self,\n query: str,\n **kwargs: Any,\n ) -> List[Union[Action, PlanFinish]]:\n \"\"\"\n Abstract method for parsing the planner output into actions or a final answer.\n\n Args:\n query (str): Input query.\n **kwargs (Any): Additional keyword arguments.\n Return:\n Union[Action, PlanFinish]: List of parsed actions or finished plan.\n\n \"\"\""
},
{
"identifier": "PlannerType",
"path": "planners/planner_types.py",
"snippet": "class PlannerType(str, Enum):\n ZERO_SHOT_REACT_PLANNER = \"zero_shot_react_planner\""
},
{
"identifier": "initialize_response_generator",
"path": "response_generators/initialize_response_generator.py",
"snippet": "def initialize_response_generator(\n llm: str = LLMType.OPENAI,\n response_generator: str = ResponseGeneratorType.BASE_GENERATOR,\n prefix: str = \"\",\n **kwargs: Any,\n) -> BaseResponseGenerator:\n \"\"\"\n This method provides a convenient way to initialize a response generator based on the specified language model type\n and response generator type. It handles the instantiation of the language model and the response generator class.\n\n Args:\n llm (str): Type of language model type to be used.\n response_generator (str): Type of response generator to be initialized.\n prefix (str): Prefix to be added to generated responses.\n **kwargs (Any): Additional keyword arguments.\n Return:\n BaseResponseGenerator: Initialized instance of the response generator.\n\n\n\n Example:\n .. code-block:: python\n\n from llms.llm_types import LLMType\n from response_generators.response_generator_types import ResponseGeneratorType\n response_generators = initialize_planner(llm=LLMType.OPENAI, response_generator=ResponseGeneratorType.BASE_GENERATOR)\n\n \"\"\"\n\n if response_generator not in RESPONSE_GENERATOR_TO_CLASS:\n raise ValueError(\n f\"Got unknown planner type: {response_generator}. \"\n f\"Valid types are: {RESPONSE_GENERATOR_TO_CLASS.keys()}.\"\n )\n\n if llm not in LLM_TO_CLASS:\n raise ValueError(\n f\"Got unknown llm type: {llm}. \"\n f\"Valid types are: {LLM_TO_CLASS.keys()}.\"\n )\n\n response_generator_cls = RESPONSE_GENERATOR_TO_CLASS[\n response_generator\n ]\n llm_model = LLM_TO_CLASS[llm]()\n response_generator = response_generator_cls(\n llm_model=llm_model, prefix=prefix\n )\n return response_generator"
},
{
"identifier": "BaseResponseGenerator",
"path": "response_generators/response_generator.py",
"snippet": "class BaseResponseGenerator(BaseModel):\n \"\"\"\n **Description:**\n\n Base class for a response generator, providing a foundation for generating responses using a language model.\n\n \"\"\"\n\n llm_model: BaseLLM = None\n prefix: str = \"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def _response_generator_type(self):\n return \"base\"\n\n @property\n def _response_generator_model(self):\n return self.llm_model\n\n @property\n def _generator_prompt(self):\n return (\n \"===========Thinker: {thinker}==========\\n\\n\"\n \"System: {prefix}. You are very helpful empathetic health assistant and your goal is to help the user to get accurate information about \"\n \"his/her health and well-being, Using the Thinker gathered information and the History, Provide a empathetic proper answer to the user. \"\n \"Consider Thinker as your trusted source and use whatever is provided by it.\"\n \"Make sure that the answer is explanatory enough without repeatition\"\n \"Don't change Thinker returned urls or references. \"\n \"You should perform final calculations or process on the gathered information to provide the final answer. \"\n \"Also add explanations based on instructions from the \"\n \"Thinker don't directly put the instructions in the final answer to the user.\"\n \"User: {query}\"\n )\n\n def generate(\n self,\n prefix: str = \"\",\n query: str = \"\",\n thinker: str = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"\n Generate a response based on the input prefix, query, and thinker (task planner).\n\n Args:\n prefix (str): Prefix to be added to the response.\n query (str): User's input query.\n thinker (str): Thinker's (Task Planner) generated answer.\n **kwargs (Any): Additional keyword arguments.\n Return:\n str: Generated response.\n\n\n\n Example:\n .. code-block:: python\n\n from llms.llm_types import LLMType\n from response_generators.response_generator_types import ResponseGeneratorType\n response_generator = initialize_planner(llm=LLMType.OPENAI, response_generator=ResponseGeneratorType.BASE_GENERATOR)\n response_generator.generate(query=\"How can I improve my sleep?\", thinker=\"Based on data found on the internet there are several ...\")\n \"\"\"\n\n prompt = (\n self._generator_prompt.replace(\"{query}\", query)\n .replace(\"{thinker}\", thinker)\n .replace(\"{prefix}\", prefix)\n )\n kwargs[\"max_tokens\"] = 1000\n response = self._response_generator_model.generate(\n query=prompt, **kwargs\n )\n return response"
},
{
"identifier": "ResponseGeneratorType",
"path": "response_generators/response_generator_types.py",
"snippet": "class ResponseGeneratorType(str, Enum):\n BASE_GENERATOR = \"base-generator\""
},
{
"identifier": "initialize_task",
"path": "tasks/initialize_task.py",
"snippet": "def initialize_task(task: str = \"serpapi\", **kwargs: Any) -> BaseTask:\n \"\"\"\n Initialize a task based on the provided task name.\n\n Args:\n task (str): The name of the task to initialize.\n **kwargs (Any): Additional keyword arguments for customizing task initialization.\n Return:\n BaseTask: An instance of the initialized task.\n Raise:\n ValueError: If the provided task name is unknown.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n if task not in TASK_TO_CLASS:\n raise ValueError(\n f\"Got unknown planner type: {task}. \"\n f\"Valid types are: {TASK_TO_CLASS.keys()}.\"\n )\n\n task_cls = TASK_TO_CLASS[task]\n task = task_cls(**kwargs)\n return task"
},
{
"identifier": "BaseTask",
"path": "tasks/task.py",
"snippet": "class BaseTask(BaseModel):\n \"\"\"\n **Description:**\n\n This class is the base implementation for the Tasks. For every new task that you want to create, you should\n inherit from this class and override the attributes and methods based on your task's need. This class defines a base class named BaseTask.\n This class serves as a foundation for defining common properties and behaviors among various tasks in the system.\n\n Attributes:\n name: The name of the task. It should be unique underscore_case to be defined in TaskType. sample_task_name\n chat_name: This is the name that later will be used if needed to mention the tasks inside the chat with the user.\n It should be Camel Case. SampleTaskChatName\n description: The description of the what specifically the task is doing.\n Try to define it as specific as possible to help the Task Planner decide better.\n dependencies: You can put the name of the TaskTypes that this task is dependent on. For example, in stress detection scenario,\n the stress analysis is dependent on the fetch hrv data task. [TaskType.SERPAPI, TASKTYPE.EXTRACT_TEXT]\n inputs: This is the list of descriptions for the inputs that should be provided by the planner.\n For example if your task has two inputs: [\"the first input description\", \"the second input description\"]\n outputs: This is the list of the description of the outputs that the task returns.\n This helps the planner to understand the returned results better and use it as needed.\n For example, if the task returns a list of sleep hours for different sleep states,\n the description helps planner learn which number is related to what state.\n output_type: This indicates if the task result should be stored in the DataPipe or be returned directly to the planner.\n This process will be done in the parse_input and post_execute methods. If needed you can overwrite them.\n return_direct: This indicates if this task should completely interrupt the planning process or not.\n This is needed in cases like when you want to ask a question from user and no further\n planning is needed until the user gives the proper answer (look at ask_user task)\n \"\"\"\n\n name: str\n chat_name: str\n description: str\n dependencies: List[str] = []\n inputs: List[str] = []\n outputs: List[str] = []\n datapipe: DataPipe = None\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = False\n # False if planner should continue. True if after this task the planning should be\n # on pause or stop. examples are when you have a task that asks user to provide more information\n return_direct: bool = False\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def name(self):\n return self.name\n\n @property\n def dependencies(self):\n return self.dependencies\n\n @property\n def inputs(self):\n return \", \".join(\n [\n f\"{str(i)}-{input}\"\n for i, input in enumerate(self.inputs)\n ]\n )\n\n @abstractmethod\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task. You should implement this method based on your need.\n This method is called by the **execute** method that provides the parsed inputs to this method.\n\n Args:\n inputs (List[Any]): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n \"\"\"\n\n def _parse_input(\n self,\n input_args: str,\n ) -> List[str]:\n \"\"\"\n Parses the input string into a list of strings. If the input is in format `datapipe:key`,\n the parser will retrieve the data from datapipe before sending it over to the **_execute** method.\n\n Args:\n input_args (str): Input string provided by planner. It should be parsed and return a list of str variables.\n Return:\n List[str]: List of parsed strings. These strings can be converted into desired types inside **_execute** method.\n\n\n \"\"\"\n inputs = input_args.split(\",\")\n return [\n json.loads(\n self.datapipe.retrieve(\n re.search(r\"datapipe:[0-9a-f\\-]{36}\", arg)\n .group()\n .strip()\n .split(\":\")[-1]\n )\n )\n if \"datapipe\" in arg\n else arg.strip()\n for arg in inputs\n ]\n\n def _post_execute(self, result: str = \"\"):\n \"\"\"\n This method is called inside **execute** method after calling **_execute**. The result of **_execute** will be passed to this method\n in case the **output_type** attribute is True, the result will be stored inside the datapipe and the datapipe key is returned to\n the plenner instead of the raw result. This is good practice for times that you have intermediate data (like sleep data over a month)\n and it needs to be passed over to other tasks and the raw result is not immidiately needed.\n This will save a huge amount of tokens and makes sure that the planner will not pass wrong raw data to the tasks.\n\n It is important to note that to make the **DataPipe's** stored data standard and unified, we store the data in the json string\n format that currently contains 'data' and 'description' keys. The 'data' will be the returned data after execution and the 'description'\n is created using the **outputs** attribute of the task. Whenever the raw data is returned to the planner, these **outputs** descriptions\n will help the planner understand and learn how to interpret the 'data' to generate the final answer or continue planning.\n\n Args:\n result (str): string containig the task result.\n Return:\n List[str]: List of parsed strings.\n\n \"\"\"\n if self.output_type:\n key = self.datapipe.store(\n json.dumps(\n {\n \"data\": result,\n \"description\": \",\".join(self.outputs),\n }\n )\n )\n return (\n f\"The result of the tool {self.name} is stored in the datapipe with key: $datapipe:{key}$\"\n \" pass this key to other tools to access to the result or call read_from_datapipe to get the raw data.\"\n )\n return result\n\n def execute(self, input_args: str) -> str:\n \"\"\"\n This method is called by the **Orchestrator** which provides the planner provided inputs.\n This method first calls **_parse_input** to parse the inputs and retrieve needed data from the **DataPipe**\n Then **_execute** is called and the parsed inputs are given to this method. Finally the final result of execution is passed to\n **_post_execute** and ith will either be stored inside **DataPipe** or directly returned to the planner to continue planning.\n\n Args:\n input_args (str): Input string provided by planner.\n Return:\n str: The final result of the task execution.\n\n \"\"\"\n inputs = self._parse_input(input_args)\n result = self._execute(inputs)\n return self._post_execute(result)\n\n def get_dict(self) -> str:\n \"\"\"\n Generate a dictionary-like representation of the task.\n\n Return:\n str: String representation of the task dictionary.\n\n\n \"\"\"\n inputs = \",\".join(\n f\"input{i+1}-{word}\" for i, word in enumerate(self.inputs)\n )\n dependencies = \",\".join(\n f\"{i+1}-{word}\"\n for i, word in enumerate(self.dependencies)\n )\n prompt = (\n f\"tool name:{self.name}, description: {self.description}.\"\n )\n if len(self.inputs) > 0:\n prompt += f\"The input to this tool should be comma separated list of data representing: {inputs}\"\n if len(self.dependencies) > 0:\n prompt += f\"\\nThis tool is dependent on the following tools. make sure these tools are called first: '{dependencies}'\"\n # prompt += \"\\n\"\n return prompt\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n\n \"\"\"\n\n return \"\"\"\n Sample Explanation\n \"\"\""
},
{
"identifier": "TaskType",
"path": "tasks/task_types.py",
"snippet": "class TaskType(str, Enum):\n SERPAPI = \"serpapi\"\n CLICK = \"click\"\n GET_CURRENT_PAGE = \"current_page\"\n EXTRACT_HYPERLINKS = \"extract_hyperlinks\"\n EXTRACT_TEXT = \"extract_text\"\n GET_ELEMENTS = \"get_elements\"\n NAVIGATE_BACK = \"navigate_back\"\n NAVIGATE = \"navigate\"\n AFFECT_SLEEP_GET = \"affect_sleep_get\"\n AFFECT_ACTIVITY_GET = \"affect_activity_get\"\n AFFECT_SLEEP_ANALYSIS = \"affect_sleep_analysis\"\n AFFECT_ACTIVITY_ANALYSIS = \"affect_activity_analysis\"\n GOOGLE_TRANSLATE = \"google_translate\"\n ASK_USER = \"ask_user\"\n READ_FROM_DATAPIPE = \"read_from_datapipe\"\n TEST_FILE = \"test_file\""
}
] | import logging
import re
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
from pydantic import BaseModel
from CustomDebugFormatter import CustomDebugFormatter
from datapipes.datapipe import DataPipe
from datapipes.datapipe_types import DatapipeType
from datapipes.initialize_datapipe import initialize_datapipe
from llms.llm_types import LLMType
from planners.action import Action
from planners.action import PlanFinish
from planners.initialize_planner import initialize_planner
from planners.planner import BasePlanner
from planners.planner_types import PlannerType
from response_generators.initialize_response_generator import (
initialize_response_generator,
)
from response_generators.response_generator import (
BaseResponseGenerator,
)
from response_generators.response_generator_types import (
ResponseGeneratorType,
)
from tasks.initialize_task import initialize_task
from tasks.task import BaseTask
from tasks.task_types import TaskType | 7,961 | arbitrary_types_allowed = True
def print_log(self, log_name: str, message: str):
if self.verbose:
if log_name == "planner":
self.planner_logger.debug(message)
if log_name == "task":
self.tasks_logger.debug(message)
if log_name == "orchestrator":
self.orchestrator_logger.debug(message)
if log_name == "response_generator":
self.final_answer_generator_logger.debug(message)
if log_name == "promptist":
self.promptist_logger.debug(message)
if log_name == "error":
self.error_logger.debug(message)
@classmethod
def initialize(
self,
planner_llm: str = LLMType.OPENAI,
planner_name: str = PlannerType.ZERO_SHOT_REACT_PLANNER,
datapipe_name: str = DatapipeType.MEMORY,
promptist_name: str = "",
response_generator_llm: str = LLMType.OPENAI,
response_generator_name: str = ResponseGeneratorType.BASE_GENERATOR,
available_tasks: Optional[List[str]] = None,
verbose: bool = False,
**kwargs,
) -> Orchestrator:
"""
This class method initializes the Orchestrator by setting up the planner, datapipe, promptist, response generator,
and available tasks.
Args:
planner_llm (str): LLMType to be used as LLM for planner.
planner_name (str): PlannerType to be used as task planner.
datapipe_name (str): DatapipeType to be used as data pipe.
promptist_name (str): Not implemented yet!
response_generator_llm (str): LLMType to be used as LLM for response generator.
response_generator_name (str): ResponseGeneratorType to be used as response generator.
available_tasks (List[str]): List of available task using TaskType.
verbose (bool): Specifies if the debugging logs be printed or not.
**kwargs (Any): Additional keyword arguments.
Return:
Orchestrator: Initialized Orchestrator instance.
Example:
.. code-block:: python
from datapipes.datapipe_types import DatapipeType
from planners.planner_types import PlannerType
from response_generators.response_generator_types import ResponseGeneratorType
from tasks.task_types import TaskType
from llms.llm_types import LLMType
from orchestrator.orchestrator import Orchestrator
#If you want to use playwright task
from tasks.playwright.utils import create_sync_playwright_browser
sync_browser = create_sync_playwright_browser()
#
orchestrator = Orchestrator.initialize(
planner_llm=LLMType.OPENAI,
planner_name=PlannerType.ZERO_SHOT_REACT_PLANNER,
datapipe_name=DatapipeType.MEMORY,
promptist_name="",
response_generator_llm=LLMType.OPENAI,
response_generator_name=ResponseGeneratorType.BASE_GENERATOR,
available_tasks=[TaskType.SERPAPI, TaskType.EXTRACT_TEXT],
sync_browser=sync_browser,
verbose=self.verbose,
**kwargs
)
"""
if available_tasks is None:
available_tasks = []
planner_logger = (
tasks_logger
) = (
orchestrator_logger
) = (
final_answer_generator_logger
) = promptist_logger = error_logger = None
if verbose:
planner_logger = CustomDebugFormatter.create_logger(
"Planner", "cyan"
)
tasks_logger = CustomDebugFormatter.create_logger(
"Task", "purple"
)
orchestrator_logger = CustomDebugFormatter.create_logger(
"Orchestrator", "green"
)
final_answer_generator_logger = (
CustomDebugFormatter.create_logger(
"Response Generator", "blue"
)
)
promptist_logger = CustomDebugFormatter.create_logger(
"Promptist", "blue"
)
error_logger = CustomDebugFormatter.create_logger(
"Error", "red"
)
datapipe = initialize_datapipe(
datapipe=datapipe_name, **kwargs
)
if verbose:
orchestrator_logger.debug(
f"Datapipe {datapipe_name} is successfully initialized.\n"
)
tasks = {}
for task in available_tasks:
kwargs["datapipe"] = datapipe
| from __future__ import annotations
class Orchestrator(BaseModel):
"""
**Description:**
The Orchestrator class is the main execution heart of the CHA. All the components of the Orchestrator are initialized and executed here.
The Orchestrator will start a new answering cycle by calling the `run` method. From there, the planning is started,
then tasks will be executed one by one till the **Task Planner** decides that no more information is needed.
Finally the **Task Planner** final answer will be routed to the **Final Response Generator** to generate an empathic final
response that is returned to the user.
"""
planner: BasePlanner = None
datapipe: DataPipe = None
promptist: Any = None
response_generator: BaseResponseGenerator = None
available_tasks: Dict[str, BaseTask] = {}
max_retries: int = 16
max_task_execute_retries: int = 3
max_planner_execute_retries: int = 16
max_final_answer_execute_retries: int = 3
role: int = 0
verbose: bool = False
planner_logger: Optional[logging.Logger] = None
tasks_logger: Optional[logging.Logger] = None
orchestrator_logger: Optional[logging.Logger] = None
final_answer_generator_logger: Optional[logging.Logger] = None
promptist_logger: Optional[logging.Logger] = None
error_logger: Optional[logging.Logger] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def print_log(self, log_name: str, message: str):
if self.verbose:
if log_name == "planner":
self.planner_logger.debug(message)
if log_name == "task":
self.tasks_logger.debug(message)
if log_name == "orchestrator":
self.orchestrator_logger.debug(message)
if log_name == "response_generator":
self.final_answer_generator_logger.debug(message)
if log_name == "promptist":
self.promptist_logger.debug(message)
if log_name == "error":
self.error_logger.debug(message)
@classmethod
def initialize(
self,
planner_llm: str = LLMType.OPENAI,
planner_name: str = PlannerType.ZERO_SHOT_REACT_PLANNER,
datapipe_name: str = DatapipeType.MEMORY,
promptist_name: str = "",
response_generator_llm: str = LLMType.OPENAI,
response_generator_name: str = ResponseGeneratorType.BASE_GENERATOR,
available_tasks: Optional[List[str]] = None,
verbose: bool = False,
**kwargs,
) -> Orchestrator:
"""
This class method initializes the Orchestrator by setting up the planner, datapipe, promptist, response generator,
and available tasks.
Args:
planner_llm (str): LLMType to be used as LLM for planner.
planner_name (str): PlannerType to be used as task planner.
datapipe_name (str): DatapipeType to be used as data pipe.
promptist_name (str): Not implemented yet!
response_generator_llm (str): LLMType to be used as LLM for response generator.
response_generator_name (str): ResponseGeneratorType to be used as response generator.
available_tasks (List[str]): List of available task using TaskType.
verbose (bool): Specifies if the debugging logs be printed or not.
**kwargs (Any): Additional keyword arguments.
Return:
Orchestrator: Initialized Orchestrator instance.
Example:
.. code-block:: python
from datapipes.datapipe_types import DatapipeType
from planners.planner_types import PlannerType
from response_generators.response_generator_types import ResponseGeneratorType
from tasks.task_types import TaskType
from llms.llm_types import LLMType
from orchestrator.orchestrator import Orchestrator
#If you want to use playwright task
from tasks.playwright.utils import create_sync_playwright_browser
sync_browser = create_sync_playwright_browser()
#
orchestrator = Orchestrator.initialize(
planner_llm=LLMType.OPENAI,
planner_name=PlannerType.ZERO_SHOT_REACT_PLANNER,
datapipe_name=DatapipeType.MEMORY,
promptist_name="",
response_generator_llm=LLMType.OPENAI,
response_generator_name=ResponseGeneratorType.BASE_GENERATOR,
available_tasks=[TaskType.SERPAPI, TaskType.EXTRACT_TEXT],
sync_browser=sync_browser,
verbose=self.verbose,
**kwargs
)
"""
if available_tasks is None:
available_tasks = []
planner_logger = (
tasks_logger
) = (
orchestrator_logger
) = (
final_answer_generator_logger
) = promptist_logger = error_logger = None
if verbose:
planner_logger = CustomDebugFormatter.create_logger(
"Planner", "cyan"
)
tasks_logger = CustomDebugFormatter.create_logger(
"Task", "purple"
)
orchestrator_logger = CustomDebugFormatter.create_logger(
"Orchestrator", "green"
)
final_answer_generator_logger = (
CustomDebugFormatter.create_logger(
"Response Generator", "blue"
)
)
promptist_logger = CustomDebugFormatter.create_logger(
"Promptist", "blue"
)
error_logger = CustomDebugFormatter.create_logger(
"Error", "red"
)
datapipe = initialize_datapipe(
datapipe=datapipe_name, **kwargs
)
if verbose:
orchestrator_logger.debug(
f"Datapipe {datapipe_name} is successfully initialized.\n"
)
tasks = {}
for task in available_tasks:
kwargs["datapipe"] = datapipe | tasks[task] = initialize_task(task=task, **kwargs) | 13 | 2023-12-02 05:10:44+00:00 | 12k |
Czm369/MixPL | projects/MixPL/mixpl/mixpl.py | [
{
"identifier": "filter_gt_instances",
"path": "mmdet/models/utils/misc.py",
"snippet": "def filter_gt_instances(batch_data_samples: SampleList,\n score_thr: float = None,\n wh_thr: tuple = None):\n \"\"\"Filter ground truth (GT) instances by score and/or size.\n\n Args:\n batch_data_samples (SampleList): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n score_thr (float): The score filter threshold.\n wh_thr (tuple): Minimum width and height of bbox.\n\n Returns:\n SampleList: The Data Samples filtered by score and/or size.\n \"\"\"\n\n if score_thr is not None:\n batch_data_samples = _filter_gt_instances_by_score(\n batch_data_samples, score_thr)\n if wh_thr is not None:\n batch_data_samples = _filter_gt_instances_by_size(\n batch_data_samples, wh_thr)\n return batch_data_samples"
},
{
"identifier": "rename_loss_dict",
"path": "mmdet/models/utils/misc.py",
"snippet": "def rename_loss_dict(prefix: str, losses: dict) -> dict:\n \"\"\"Rename the key names in loss dict by adding a prefix.\n\n Args:\n prefix (str): The prefix for loss components.\n losses (dict): A dictionary of loss components.\n\n Returns:\n dict: A dictionary of loss components with prefix.\n \"\"\"\n return {prefix + k: v for k, v in losses.items()}"
},
{
"identifier": "reweight_loss_dict",
"path": "mmdet/models/utils/misc.py",
"snippet": "def reweight_loss_dict(losses: dict, weight: float) -> dict:\n \"\"\"Reweight losses in the dict by weight.\n\n Args:\n losses (dict): A dictionary of loss components.\n weight (float): Weight for loss components.\n\n Returns:\n dict: A dictionary of weighted loss components.\n \"\"\"\n for name, loss in losses.items():\n if 'loss' in name:\n if isinstance(loss, Sequence):\n losses[name] = [item * weight for item in loss]\n else:\n losses[name] = loss * weight\n return losses"
},
{
"identifier": "MODELS",
"path": "mmdet/registry.py",
"snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])"
},
{
"identifier": "DetLocalVisualizer",
"path": "mmdet/visualization/local_visualizer.py",
"snippet": "class DetLocalVisualizer(Visualizer):\n \"\"\"MMDetection Local Visualizer.\n\n Args:\n name (str): Name of the instance. Defaults to 'visualizer'.\n image (np.ndarray, optional): the origin image to draw. The format\n should be RGB. Defaults to None.\n vis_backends (list, optional): Visual backend config list.\n Defaults to None.\n save_dir (str, optional): Save file dir for all storage backends.\n If it is None, the backend storage will not save any data.\n bbox_color (str, tuple(int), optional): Color of bbox lines.\n The tuple of color should be in BGR order. Defaults to None.\n text_color (str, tuple(int), optional): Color of texts.\n The tuple of color should be in BGR order.\n Defaults to (200, 200, 200).\n mask_color (str, tuple(int), optional): Color of masks.\n The tuple of color should be in BGR order.\n Defaults to None.\n line_width (int, float): The linewidth of lines.\n Defaults to 3.\n alpha (int, float): The transparency of bboxes or mask.\n Defaults to 0.8.\n\n Examples:\n >>> import numpy as np\n >>> import torch\n >>> from mmengine.structures import InstanceData\n >>> from mmdet.structures import DetDataSample\n >>> from mmdet.visualization import DetLocalVisualizer\n\n >>> det_local_visualizer = DetLocalVisualizer()\n >>> image = np.random.randint(0, 256,\n ... size=(10, 12, 3)).astype('uint8')\n >>> gt_instances = InstanceData()\n >>> gt_instances.bboxes = torch.Tensor([[1, 2, 2, 5]])\n >>> gt_instances.labels = torch.randint(0, 2, (1,))\n >>> gt_det_data_sample = DetDataSample()\n >>> gt_det_data_sample.gt_instances = gt_instances\n >>> det_local_visualizer.add_datasample('image', image,\n ... gt_det_data_sample)\n >>> det_local_visualizer.add_datasample(\n ... 'image', image, gt_det_data_sample,\n ... out_file='out_file.jpg')\n >>> det_local_visualizer.add_datasample(\n ... 'image', image, gt_det_data_sample,\n ... show=True)\n >>> pred_instances = InstanceData()\n >>> pred_instances.bboxes = torch.Tensor([[2, 4, 4, 8]])\n >>> pred_instances.labels = torch.randint(0, 2, (1,))\n >>> pred_det_data_sample = DetDataSample()\n >>> pred_det_data_sample.pred_instances = pred_instances\n >>> det_local_visualizer.add_datasample('image', image,\n ... gt_det_data_sample,\n ... pred_det_data_sample)\n \"\"\"\n\n def __init__(self,\n name: str = 'visualizer',\n image: Optional[np.ndarray] = None,\n vis_backends: Optional[Dict] = None,\n save_dir: Optional[str] = None,\n bbox_color: Optional[Union[str, Tuple[int]]] = None,\n text_color: Optional[Union[str,\n Tuple[int]]] = (200, 200, 200),\n mask_color: Optional[Union[str, Tuple[int]]] = None,\n line_width: Union[int, float] = 3,\n alpha: float = 0.8) -> None:\n super().__init__(\n name=name,\n image=image,\n vis_backends=vis_backends,\n save_dir=save_dir)\n self.bbox_color = bbox_color\n self.text_color = text_color\n self.mask_color = mask_color\n self.line_width = line_width\n self.alpha = alpha\n # Set default value. When calling\n # `DetLocalVisualizer().dataset_meta=xxx`,\n # it will override the default value.\n self.dataset_meta = {}\n\n def _draw_instances(self, image: np.ndarray, instances: ['InstanceData'],\n classes: Optional[List[str]],\n palette: Optional[List[tuple]]) -> np.ndarray:\n \"\"\"Draw instances of GT or prediction.\n\n Args:\n image (np.ndarray): The image to draw.\n instances (:obj:`InstanceData`): Data structure for\n instance-level annotations or predictions.\n classes (List[str], optional): Category information.\n palette (List[tuple], optional): Palette information\n corresponding to the category.\n\n Returns:\n np.ndarray: the drawn image which channel is RGB.\n \"\"\"\n self.set_image(image)\n\n if 'bboxes' in instances and instances.bboxes.sum() > 0:\n bboxes = instances.bboxes\n labels = instances.labels\n\n max_label = int(max(labels) if len(labels) > 0 else 0)\n text_palette = get_palette(self.text_color, max_label + 1)\n text_colors = [text_palette[label] for label in labels]\n\n bbox_color = palette if self.bbox_color is None \\\n else self.bbox_color\n bbox_palette = get_palette(bbox_color, max_label + 1)\n colors = [bbox_palette[label] for label in labels]\n self.draw_bboxes(\n bboxes,\n edge_colors=colors,\n alpha=self.alpha,\n line_widths=self.line_width)\n\n positions = bboxes[:, :2] + self.line_width\n areas = (bboxes[:, 3] - bboxes[:, 1]) * (\n bboxes[:, 2] - bboxes[:, 0])\n scales = _get_adaptive_scales(areas)\n\n for i, (pos, label) in enumerate(zip(positions, labels)):\n if 'label_names' in instances:\n label_text = instances.label_names[i]\n else:\n label_text = classes[\n label] if classes is not None else f'class {label}'\n if 'scores' in instances:\n score = round(float(instances.scores[i]) * 100, 1)\n label_text += f': {score}'\n\n self.draw_texts(\n label_text,\n pos,\n colors=text_colors[i],\n font_sizes=int(13 * scales[i]),\n bboxes=[{\n 'facecolor': 'black',\n 'alpha': 0.8,\n 'pad': 0.7,\n 'edgecolor': 'none'\n }])\n\n if 'masks' in instances:\n labels = instances.labels\n masks = instances.masks\n if isinstance(masks, torch.Tensor):\n masks = masks.numpy()\n elif isinstance(masks, (PolygonMasks, BitmapMasks)):\n masks = masks.to_ndarray()\n\n masks = masks.astype(bool)\n\n max_label = int(max(labels) if len(labels) > 0 else 0)\n mask_color = palette if self.mask_color is None \\\n else self.mask_color\n mask_palette = get_palette(mask_color, max_label + 1)\n colors = [jitter_color(mask_palette[label]) for label in labels]\n text_palette = get_palette(self.text_color, max_label + 1)\n text_colors = [text_palette[label] for label in labels]\n\n polygons = []\n for i, mask in enumerate(masks):\n contours, _ = bitmap_to_polygon(mask)\n polygons.extend(contours)\n self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha)\n self.draw_binary_masks(masks, colors=colors, alphas=self.alpha)\n\n if len(labels) > 0 and \\\n ('bboxes' not in instances or\n instances.bboxes.sum() == 0):\n # instances.bboxes.sum()==0 represent dummy bboxes.\n # A typical example of SOLO does not exist bbox branch.\n areas = []\n positions = []\n for mask in masks:\n _, _, stats, centroids = cv2.connectedComponentsWithStats(\n mask.astype(np.uint8), connectivity=8)\n if stats.shape[0] > 1:\n largest_id = np.argmax(stats[1:, -1]) + 1\n positions.append(centroids[largest_id])\n areas.append(stats[largest_id, -1])\n areas = np.stack(areas, axis=0)\n scales = _get_adaptive_scales(areas)\n\n for i, (pos, label) in enumerate(zip(positions, labels)):\n if 'label_names' in instances:\n label_text = instances.label_names[i]\n else:\n label_text = classes[\n label] if classes is not None else f'class {label}'\n if 'scores' in instances:\n score = round(float(instances.scores[i]) * 100, 1)\n label_text += f': {score}'\n\n self.draw_texts(\n label_text,\n pos,\n colors=text_colors[i],\n font_sizes=int(13 * scales[i]),\n horizontal_alignments='center',\n bboxes=[{\n 'facecolor': 'black',\n 'alpha': 0.8,\n 'pad': 0.7,\n 'edgecolor': 'none'\n }])\n return self.get_image()\n\n def _draw_panoptic_seg(self, image: np.ndarray,\n panoptic_seg: ['PixelData'],\n classes: Optional[List[str]],\n palette: Optional[List]) -> np.ndarray:\n \"\"\"Draw panoptic seg of GT or prediction.\n\n Args:\n image (np.ndarray): The image to draw.\n panoptic_seg (:obj:`PixelData`): Data structure for\n pixel-level annotations or predictions.\n classes (List[str], optional): Category information.\n\n Returns:\n np.ndarray: the drawn image which channel is RGB.\n \"\"\"\n # TODO: Is there a way to bypass?\n num_classes = len(classes)\n\n panoptic_seg_data = panoptic_seg.sem_seg[0]\n\n ids = np.unique(panoptic_seg_data)[::-1]\n\n if 'label_names' in panoptic_seg:\n # open set panoptic segmentation\n classes = panoptic_seg.metainfo['label_names']\n ignore_index = panoptic_seg.metainfo.get('ignore_index',\n len(classes))\n ids = ids[ids != ignore_index]\n else:\n # for VOID label\n ids = ids[ids != num_classes]\n\n labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n segms = (panoptic_seg_data[None] == ids[:, None, None])\n\n max_label = int(max(labels) if len(labels) > 0 else 0)\n\n mask_color = palette if self.mask_color is None \\\n else self.mask_color\n mask_palette = get_palette(mask_color, max_label + 1)\n colors = [mask_palette[label] for label in labels]\n\n self.set_image(image)\n\n # draw segm\n polygons = []\n for i, mask in enumerate(segms):\n contours, _ = bitmap_to_polygon(mask)\n polygons.extend(contours)\n self.draw_polygons(polygons, edge_colors='w', alpha=self.alpha)\n self.draw_binary_masks(segms, colors=colors, alphas=self.alpha)\n\n # draw label\n areas = []\n positions = []\n for mask in segms:\n _, _, stats, centroids = cv2.connectedComponentsWithStats(\n mask.astype(np.uint8), connectivity=8)\n max_id = np.argmax(stats[1:, -1]) + 1\n positions.append(centroids[max_id])\n areas.append(stats[max_id, -1])\n areas = np.stack(areas, axis=0)\n scales = _get_adaptive_scales(areas)\n\n text_palette = get_palette(self.text_color, max_label + 1)\n text_colors = [text_palette[label] for label in labels]\n\n for i, (pos, label) in enumerate(zip(positions, labels)):\n label_text = classes[label]\n\n self.draw_texts(\n label_text,\n pos,\n colors=text_colors[i],\n font_sizes=int(13 * scales[i]),\n bboxes=[{\n 'facecolor': 'black',\n 'alpha': 0.8,\n 'pad': 0.7,\n 'edgecolor': 'none'\n }],\n horizontal_alignments='center')\n return self.get_image()\n\n def _draw_sem_seg(self, image: np.ndarray, sem_seg: PixelData,\n classes: Optional[List],\n palette: Optional[List]) -> np.ndarray:\n \"\"\"Draw semantic seg of GT or prediction.\n\n Args:\n image (np.ndarray): The image to draw.\n sem_seg (:obj:`PixelData`): Data structure for pixel-level\n annotations or predictions.\n classes (list, optional): Input classes for result rendering, as\n the prediction of segmentation model is a segment map with\n label indices, `classes` is a list which includes items\n responding to the label indices. If classes is not defined,\n visualizer will take `cityscapes` classes by default.\n Defaults to None.\n palette (list, optional): Input palette for result rendering, which\n is a list of color palette responding to the classes.\n Defaults to None.\n\n Returns:\n np.ndarray: the drawn image which channel is RGB.\n \"\"\"\n sem_seg_data = sem_seg.sem_seg\n if isinstance(sem_seg_data, torch.Tensor):\n sem_seg_data = sem_seg_data.numpy()\n\n # 0 ~ num_class, the value 0 means background\n ids = np.unique(sem_seg_data)\n ignore_index = sem_seg.metainfo.get('ignore_index', 255)\n ids = ids[ids != ignore_index]\n\n if 'label_names' in sem_seg:\n # open set semseg\n label_names = sem_seg.metainfo['label_names']\n else:\n label_names = classes\n\n labels = np.array(ids, dtype=np.int64)\n colors = [palette[label] for label in labels]\n\n self.set_image(image)\n\n # draw semantic masks\n for i, (label, color) in enumerate(zip(labels, colors)):\n masks = sem_seg_data == label\n self.draw_binary_masks(masks, colors=[color], alphas=self.alpha)\n label_text = label_names[label]\n _, _, stats, centroids = cv2.connectedComponentsWithStats(\n masks[0].astype(np.uint8), connectivity=8)\n if stats.shape[0] > 1:\n largest_id = np.argmax(stats[1:, -1]) + 1\n centroids = centroids[largest_id]\n\n areas = stats[largest_id, -1]\n scales = _get_adaptive_scales(areas)\n\n self.draw_texts(\n label_text,\n centroids,\n colors=(255, 255, 255),\n font_sizes=int(13 * scales),\n horizontal_alignments='center',\n bboxes=[{\n 'facecolor': 'black',\n 'alpha': 0.8,\n 'pad': 0.7,\n 'edgecolor': 'none'\n }])\n\n return self.get_image()\n\n @master_only\n def add_datasample(\n self,\n name: str,\n image: np.ndarray,\n data_sample: Optional['DetDataSample'] = None,\n draw_gt: bool = True,\n draw_pred: bool = True,\n show: bool = False,\n wait_time: float = 0,\n # TODO: Supported in mmengine's Viusalizer.\n out_file: Optional[str] = None,\n pred_score_thr: float = 0.3,\n step: int = 0) -> None:\n \"\"\"Draw datasample and save to all backends.\n\n - If GT and prediction are plotted at the same time, they are\n displayed in a stitched image where the left image is the\n ground truth and the right image is the prediction.\n - If ``show`` is True, all storage backends are ignored, and\n the images will be displayed in a local window.\n - If ``out_file`` is specified, the drawn image will be\n saved to ``out_file``. t is usually used when the display\n is not available.\n\n Args:\n name (str): The image identifier.\n image (np.ndarray): The image to draw.\n data_sample (:obj:`DetDataSample`, optional): A data\n sample that contain annotations and predictions.\n Defaults to None.\n draw_gt (bool): Whether to draw GT DetDataSample. Default to True.\n draw_pred (bool): Whether to draw Prediction DetDataSample.\n Defaults to True.\n show (bool): Whether to display the drawn image. Default to False.\n wait_time (float): The interval of show (s). Defaults to 0.\n out_file (str): Path to output file. Defaults to None.\n pred_score_thr (float): The threshold to visualize the bboxes\n and masks. Defaults to 0.3.\n step (int): Global step value to record. Defaults to 0.\n \"\"\"\n image = image.clip(0, 255).astype(np.uint8)\n classes = self.dataset_meta.get('classes', None)\n palette = self.dataset_meta.get('palette', None)\n\n gt_img_data = None\n pred_img_data = None\n\n if data_sample is not None:\n data_sample = data_sample.cpu()\n\n if draw_gt and data_sample is not None:\n gt_img_data = image\n if 'gt_instances' in data_sample:\n gt_img_data = self._draw_instances(image,\n data_sample.gt_instances,\n classes, palette)\n if 'gt_sem_seg' in data_sample:\n gt_img_data = self._draw_sem_seg(gt_img_data,\n data_sample.gt_sem_seg,\n classes, palette)\n\n if 'gt_panoptic_seg' in data_sample:\n assert classes is not None, 'class information is ' \\\n 'not provided when ' \\\n 'visualizing panoptic ' \\\n 'segmentation results.'\n gt_img_data = self._draw_panoptic_seg(\n gt_img_data, data_sample.gt_panoptic_seg, classes, palette)\n\n if draw_pred and data_sample is not None:\n pred_img_data = image\n if 'pred_instances' in data_sample:\n pred_instances = data_sample.pred_instances\n pred_instances = pred_instances[\n pred_instances.scores > pred_score_thr]\n pred_img_data = self._draw_instances(image, pred_instances,\n classes, palette)\n\n if 'pred_sem_seg' in data_sample:\n pred_img_data = self._draw_sem_seg(pred_img_data,\n data_sample.pred_sem_seg,\n classes, palette)\n\n if 'pred_panoptic_seg' in data_sample:\n assert classes is not None, 'class information is ' \\\n 'not provided when ' \\\n 'visualizing panoptic ' \\\n 'segmentation results.'\n pred_img_data = self._draw_panoptic_seg(\n pred_img_data, data_sample.pred_panoptic_seg.numpy(),\n classes, palette)\n\n if gt_img_data is not None and pred_img_data is not None:\n drawn_img = np.concatenate((gt_img_data, pred_img_data), axis=1)\n elif gt_img_data is not None:\n drawn_img = gt_img_data\n elif pred_img_data is not None:\n drawn_img = pred_img_data\n else:\n # Display the original image directly if nothing is drawn.\n drawn_img = image\n\n # It is convenient for users to obtain the drawn image.\n # For example, the user wants to obtain the drawn image and\n # save it as a video during video inference.\n self.set_image(drawn_img)\n\n if show:\n self.show(drawn_img, win_name=name, wait_time=wait_time)\n\n if out_file is not None:\n mmcv.imwrite(drawn_img[..., ::-1], out_file)\n else:\n self.add_image(name, drawn_img, step)"
},
{
"identifier": "DetDataSample",
"path": "mmdet/structures/det_data_sample.py",
"snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_instances(self, value: InstanceData):\n def gt_instances(self):\n def pred_instances(self) -> InstanceData:\n def pred_instances(self, value: InstanceData):\n def pred_instances(self):\n def pred_track_instances(self) -> InstanceData:\n def pred_track_instances(self, value: InstanceData):\n def pred_track_instances(self):\n def ignored_instances(self) -> InstanceData:\n def ignored_instances(self, value: InstanceData):\n def ignored_instances(self):\n def gt_panoptic_seg(self) -> PixelData:\n def gt_panoptic_seg(self, value: PixelData):\n def gt_panoptic_seg(self):\n def pred_panoptic_seg(self) -> PixelData:\n def pred_panoptic_seg(self, value: PixelData):\n def pred_panoptic_seg(self):\n def gt_sem_seg(self) -> PixelData:\n def gt_sem_seg(self, value: PixelData):\n def gt_sem_seg(self):\n def pred_sem_seg(self) -> PixelData:\n def pred_sem_seg(self, value: PixelData):\n def pred_sem_seg(self):"
},
{
"identifier": "ConfigType",
"path": "mmdet/utils/typing_utils.py",
"snippet": ""
},
{
"identifier": "SemiBaseDetector",
"path": "mmdet/models/detectors/semi_base.py",
"snippet": "class SemiBaseDetector(BaseDetector):\n \"\"\"Base class for semi-supervised detectors.\n\n Semi-supervised detectors typically consisting of a teacher model\n updated by exponential moving average and a student model updated\n by gradient descent.\n\n Args:\n detector (:obj:`ConfigDict` or dict): The detector config.\n semi_train_cfg (:obj:`ConfigDict` or dict, optional):\n The semi-supervised training config.\n semi_test_cfg (:obj:`ConfigDict` or dict, optional):\n The semi-supervised testing config.\n data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of\n :class:`DetDataPreprocessor` to process the input data.\n Defaults to None.\n init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or\n list[dict], optional): Initialization config dict.\n Defaults to None.\n \"\"\"\n\n def __init__(self,\n detector: ConfigType,\n semi_train_cfg: OptConfigType = None,\n semi_test_cfg: OptConfigType = None,\n data_preprocessor: OptConfigType = None,\n init_cfg: OptMultiConfig = None) -> None:\n super().__init__(\n data_preprocessor=data_preprocessor, init_cfg=init_cfg)\n self.student = MODELS.build(copy.deepcopy(detector))\n self.teacher = MODELS.build(copy.deepcopy(detector))\n self.semi_train_cfg = semi_train_cfg\n self.semi_test_cfg = semi_test_cfg\n if self.semi_train_cfg.get('freeze_teacher', True):\n self.freeze(self.teacher)\n\n @staticmethod\n def freeze(model: nn.Module):\n \"\"\"Freeze the model.\"\"\"\n model.eval()\n for param in model.parameters():\n param.requires_grad = False\n\n @staticmethod\n def reweight_loss(losses: dict, weight: float) -> dict:\n \"\"\"Reweight loss for different branches.\"\"\"\n for name, loss in losses.items():\n if 'loss' in name:\n if isinstance(loss, Sequence):\n losses[name] = [item * weight for item in loss]\n else:\n losses[name] = loss * weight\n return losses\n\n def filter_pseudo_instances(self,\n batch_data_samples: SampleList) -> SampleList:\n \"\"\"Filter invalid pseudo instances from teacher model.\"\"\"\n for data_samples in batch_data_samples:\n pseudo_bboxes = data_samples.gt_instances.bboxes\n if pseudo_bboxes.shape[0] > 0:\n w = pseudo_bboxes[:, 2] - pseudo_bboxes[:, 0]\n h = pseudo_bboxes[:, 3] - pseudo_bboxes[:, 1]\n data_samples.gt_instances = data_samples.gt_instances[\n (w > self.semi_train_cfg.min_pseudo_bbox_wh[0])\n & (h > self.semi_train_cfg.min_pseudo_bbox_wh[1])]\n return batch_data_samples\n\n def loss(self, multi_batch_inputs: Dict[str, Tensor],\n multi_batch_data_samples: Dict[str, SampleList]) -> dict:\n \"\"\"Calculate losses from multi-branch inputs and data samples.\n\n Args:\n multi_batch_inputs (Dict[str, Tensor]): The dict of multi-branch\n input images, each value with shape (N, C, H, W).\n Each value should usually be mean centered and std scaled.\n multi_batch_data_samples (Dict[str, List[:obj:`DetDataSample`]]):\n The dict of multi-branch data samples.\n\n Returns:\n dict: A dictionary of loss components\n \"\"\"\n losses = dict()\n losses.update(**self.loss_by_gt_instances(\n multi_batch_inputs['sup'], multi_batch_data_samples['sup']))\n\n origin_pseudo_data_samples, batch_info = self.get_pseudo_instances(\n multi_batch_inputs['unsup_teacher'],\n multi_batch_data_samples['unsup_teacher'])\n multi_batch_data_samples[\n 'unsup_student'] = self.project_pseudo_instances(\n origin_pseudo_data_samples,\n multi_batch_data_samples['unsup_student'])\n losses.update(**self.loss_by_pseudo_instances(\n multi_batch_inputs['unsup_student'],\n multi_batch_data_samples['unsup_student'], batch_info))\n return losses\n\n def loss_by_gt_instances(self, batch_inputs: Tensor,\n batch_data_samples: SampleList) -> dict:\n \"\"\"Calculate losses from a batch of inputs and ground-truth data\n samples.\n\n Args:\n batch_inputs (Tensor): Input images of shape (N, C, H, W).\n These should usually be mean centered and std scaled.\n batch_data_samples (List[:obj:`DetDataSample`]): The batch\n data samples. It usually includes information such\n as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.\n\n Returns:\n dict: A dictionary of loss components\n \"\"\"\n\n losses = self.student.loss(batch_inputs, batch_data_samples)\n sup_weight = self.semi_train_cfg.get('sup_weight', 1.)\n return rename_loss_dict('sup_', reweight_loss_dict(losses, sup_weight))\n\n def loss_by_pseudo_instances(self,\n batch_inputs: Tensor,\n batch_data_samples: SampleList,\n batch_info: Optional[dict] = None) -> dict:\n \"\"\"Calculate losses from a batch of inputs and pseudo data samples.\n\n Args:\n batch_inputs (Tensor): Input images of shape (N, C, H, W).\n These should usually be mean centered and std scaled.\n batch_data_samples (List[:obj:`DetDataSample`]): The batch\n data samples. It usually includes information such\n as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`,\n which are `pseudo_instance` or `pseudo_panoptic_seg`\n or `pseudo_sem_seg` in fact.\n batch_info (dict): Batch information of teacher model\n forward propagation process. Defaults to None.\n\n Returns:\n dict: A dictionary of loss components\n \"\"\"\n batch_data_samples = filter_gt_instances(\n batch_data_samples, score_thr=self.semi_train_cfg.cls_pseudo_thr)\n losses = self.student.loss(batch_inputs, batch_data_samples)\n pseudo_instances_num = min([len(data_samples.gt_instances) for data_samples in batch_data_samples])\n unsup_weight = self.semi_train_cfg.unsup_weight if pseudo_instances_num >= self.semi_train_cfg.least_num else 0.\n return rename_loss_dict('unsup_', reweight_loss_dict(losses, unsup_weight))\n\n @torch.no_grad()\n def get_pseudo_instances(\n self, batch_inputs: Tensor, batch_data_samples: SampleList\n ) -> Tuple[SampleList, Optional[dict]]:\n \"\"\"Get pseudo instances from teacher model.\"\"\"\n self.teacher.eval()\n results_list = self.teacher.predict(\n batch_inputs, batch_data_samples, rescale=False)\n batch_info = {}\n for data_samples, results in zip(batch_data_samples, results_list):\n data_samples.gt_instances = results.pred_instances\n data_samples.gt_instances.bboxes = bbox_project(\n data_samples.gt_instances.bboxes,\n torch.from_numpy(data_samples.homography_matrix).inverse().to(\n self.data_preprocessor.device), data_samples.ori_shape)\n return batch_data_samples, batch_info\n\n def project_pseudo_instances(self, batch_pseudo_instances: SampleList,\n batch_data_samples: SampleList) -> SampleList:\n \"\"\"Project pseudo instances.\"\"\"\n for pseudo_instances, data_samples in zip(batch_pseudo_instances,\n batch_data_samples):\n data_samples.gt_instances = copy.deepcopy(\n pseudo_instances.gt_instances)\n data_samples.gt_instances.bboxes = bbox_project(\n data_samples.gt_instances.bboxes,\n torch.tensor(data_samples.homography_matrix).to(\n self.data_preprocessor.device), data_samples.img_shape)\n wh_thr = self.semi_train_cfg.get('min_pseudo_bbox_wh', (1e-2, 1e-2))\n return filter_gt_instances(batch_data_samples, wh_thr=wh_thr)\n\n def predict(self, batch_inputs: Tensor,\n batch_data_samples: SampleList) -> SampleList:\n \"\"\"Predict results from a batch of inputs and data samples with post-\n processing.\n\n Args:\n batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n rescale (bool): Whether to rescale the results.\n Defaults to True.\n\n Returns:\n list[:obj:`DetDataSample`]: Return the detection results of the\n input images. The returns value is DetDataSample,\n which usually contain 'pred_instances'. And the\n ``pred_instances`` usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n - masks (Tensor): Has a shape (num_instances, H, W).\n \"\"\"\n if self.semi_test_cfg.get('predict_on', 'teacher') == 'teacher':\n return self.teacher(\n batch_inputs, batch_data_samples, mode='predict')\n else:\n return self.student(\n batch_inputs, batch_data_samples, mode='predict')\n\n def _forward(self, batch_inputs: Tensor,\n batch_data_samples: SampleList) -> SampleList:\n \"\"\"Network forward process. Usually includes backbone, neck and head\n forward without any post-processing.\n\n Args:\n batch_inputs (Tensor): Inputs with shape (N, C, H, W).\n\n Returns:\n tuple: A tuple of features from ``rpn_head`` and ``roi_head``\n forward.\n \"\"\"\n if self.semi_test_cfg.get('forward_on', 'teacher') == 'teacher':\n return self.teacher(\n batch_inputs, batch_data_samples, mode='tensor')\n else:\n return self.student(\n batch_inputs, batch_data_samples, mode='tensor')\n\n def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:\n \"\"\"Extract features.\n\n Args:\n batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).\n\n Returns:\n tuple[Tensor]: Multi-level features that may have\n different resolutions.\n \"\"\"\n if self.semi_test_cfg.get('extract_feat_on', 'teacher') == 'teacher':\n return self.teacher.extract_feat(batch_inputs)\n else:\n return self.student.extract_feat(batch_inputs)\n\n def _load_from_state_dict(self, state_dict: dict, prefix: str,\n local_metadata: dict, strict: bool,\n missing_keys: Union[List[str], str],\n unexpected_keys: Union[List[str], str],\n error_msgs: Union[List[str], str]) -> None:\n \"\"\"Add teacher and student prefixes to model parameter names.\"\"\"\n if not any([\n 'student' in key or 'teacher' in key\n for key in state_dict.keys()\n ]):\n keys = list(state_dict.keys())\n state_dict.update({'teacher.' + k: state_dict[k] for k in keys})\n state_dict.update({'student.' + k: state_dict[k] for k in keys})\n for k in keys:\n state_dict.pop(k)\n return super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )"
},
{
"identifier": "bbox_project",
"path": "mmdet/structures/bbox/transforms.py",
"snippet": "def bbox_project(\n bboxes: Union[torch.Tensor, np.ndarray],\n homography_matrix: Union[torch.Tensor, np.ndarray],\n img_shape: Optional[Tuple[int, int]] = None\n) -> Union[torch.Tensor, np.ndarray]:\n \"\"\"Geometric transformation for bbox.\n\n Args:\n bboxes (Union[torch.Tensor, np.ndarray]): Shape (n, 4) for bboxes.\n homography_matrix (Union[torch.Tensor, np.ndarray]):\n Shape (3, 3) for geometric transformation.\n img_shape (Tuple[int, int], optional): Image shape. Defaults to None.\n Returns:\n Union[torch.Tensor, np.ndarray]: Converted bboxes.\n \"\"\"\n bboxes_type = type(bboxes)\n if bboxes_type is np.ndarray:\n bboxes = torch.from_numpy(bboxes)\n if isinstance(homography_matrix, np.ndarray):\n homography_matrix = torch.from_numpy(homography_matrix)\n corners = bbox2corner(bboxes)\n corners = torch.cat(\n [corners, corners.new_ones(corners.shape[0], 1)], dim=1)\n corners = torch.matmul(homography_matrix, corners.t()).t()\n # Convert to homogeneous coordinates by normalization\n corners = corners[:, :2] / corners[:, 2:3]\n bboxes = corner2bbox(corners)\n if img_shape is not None:\n bboxes[:, 0::2] = bboxes[:, 0::2].clamp(0, img_shape[1])\n bboxes[:, 1::2] = bboxes[:, 1::2].clamp(0, img_shape[0])\n if bboxes_type is np.ndarray:\n bboxes = bboxes.numpy()\n return bboxes"
}
] | import copy
import torch
import numpy as np
import math
import os.path as osp
from typing import Dict, Optional, Tuple
from torch import Tensor
from mmdet.models.utils import (filter_gt_instances, rename_loss_dict,
reweight_loss_dict)
from mmdet.registry import MODELS
from mmdet.visualization import DetLocalVisualizer
from mmdet.structures import DetDataSample, SampleList
from mmdet.utils import ConfigType, InstanceList, OptConfigType, OptMultiConfig
from mmdet.models.detectors import SemiBaseDetector
from mmdet.structures.bbox import bbox_project
from torch.nn import functional as F | 10,726 | Each value should usually be mean centered and std scaled.
multi_batch_data_samples (Dict[str, List[:obj:`DetDataSample`]]):
The dict of multi-branch data samples.
Returns:
dict: A dictionary of loss components
"""
losses = dict()
losses.update(**self.loss_by_gt_instances(
multi_batch_inputs['sup'], multi_batch_data_samples['sup']))
origin_batch_pseudo_data_samples, batch_info = self.get_pseudo_instances(
multi_batch_inputs['unsup_teacher'], multi_batch_data_samples['unsup_teacher'])
multi_batch_data_samples['unsup_student'] = self.project_pseudo_instances(
origin_batch_pseudo_data_samples, multi_batch_data_samples['unsup_student'])
batch_unsup_inputs = copy.deepcopy(multi_batch_inputs['unsup_student'])
batch_unsup_data_samples = copy.deepcopy(multi_batch_data_samples['unsup_student'])
batch_unsup_inputs, batch_unsup_data_samples = self.merge(
*zip(*list(map(self.erase, *self.split(batch_unsup_inputs, batch_unsup_data_samples)))))
sample_size = len(multi_batch_data_samples['unsup_student'])
mixup_idxs = np.random.choice(range(sample_size), sample_size, replace=False)
mosaic_idxs = np.random.choice(range(4), 4, replace=False) + sample_size
if self.semi_train_cfg.mixup and len(self.cache_inputs) == self.semi_train_cfg.cache_size:
dst_inputs_list, batch_dst_data_samples = self.split(
batch_unsup_inputs, batch_unsup_data_samples)
img_shapes = [tuple(batch_unsup_inputs.shape[-2:])]*batch_unsup_inputs.shape[0]
src_inputs_list, batch_src_data_samples = self.get_batch(mixup_idxs, img_shapes)
batch_unsup_inputs, batch_unsup_data_samples = self.merge(*self.mixup(
dst_inputs_list, batch_dst_data_samples,
src_inputs_list, batch_src_data_samples))
if self.semi_train_cfg.mixup:
losses.update(**rename_loss_dict('mixup_', self.loss_by_pseudo_instances(
batch_unsup_inputs, batch_unsup_data_samples)))
else:
losses.update(**self.loss_by_pseudo_instances(
batch_unsup_inputs, batch_unsup_data_samples))
if self.semi_train_cfg.mosaic and len(self.cache_inputs) == self.semi_train_cfg.cache_size:
if len(self.semi_train_cfg.mosaic_shape) == 1:
img_shapes = [self.semi_train_cfg.mosaic_shape[0]] * 4
else:
mosaic_shape = self.semi_train_cfg.mosaic_shape
mosaic_h = np.random.randint(
min(mosaic_shape[0][0], mosaic_shape[1][0]), max(mosaic_shape[0][0], mosaic_shape[1][0]))
mosaic_w = np.random.randint(
min(mosaic_shape[0][1], mosaic_shape[1][1]), max(mosaic_shape[0][1], mosaic_shape[1][1]))
img_shapes = [(mosaic_h, mosaic_w)] * 4
src_inputs_list, batch_src_data_samples = self.get_batch(mosaic_idxs, img_shapes)
mosaic_inputs, mosaic_data_samples = self.mosaic(src_inputs_list, batch_src_data_samples)
mosaic_losses = self.loss_by_pseudo_instances(mosaic_inputs, mosaic_data_samples)
losses.update(**rename_loss_dict('mosaic_', reweight_loss_dict(mosaic_losses, self.semi_train_cfg.mosaic_weight)))
self.update_cache(multi_batch_inputs['unsup_student'], multi_batch_data_samples['unsup_student'])
return losses
def merge(self, inputs_list, batch_data_samples):
batch_size = len(inputs_list)
h, w = 0, 0
for i in range(batch_size):
img_h, img_w = batch_data_samples[i].img_shape
h, w = max(h, img_h), max(w, img_w)
h, w = max(h, math.ceil(h / 32) * 32), max(w, math.ceil(w / 32) * 32)
batch_inputs = torch.zeros((batch_size, 3, h, w)).to(self.data_preprocessor.device)
for i in range(batch_size):
img_h, img_w = batch_data_samples[i].img_shape
batch_inputs[i, :, :img_h, :img_w] = inputs_list[i]
batch_data_samples[i].set_metainfo({'batch_input_shape': (h, w)})
batch_data_samples[i].set_metainfo({'pad_shape': (h, w)})
return batch_inputs, batch_data_samples
def split(self, batch_inputs, batch_data_samples):
inputs_list = []
for i in range(len(batch_inputs)):
inputs = batch_inputs[i]
data_samples = batch_data_samples[i]
img_h, img_w = data_samples.img_shape
inputs_list.append(inputs[..., :img_h, :img_w])
data_samples.pop('batch_input_shape')
data_samples.pop('pad_shape')
return inputs_list, batch_data_samples
def update_cache(self, batch_inputs: Tensor, batch_data_samples: SampleList):
inputs_list, batch_data_samples = self.split(batch_inputs, batch_data_samples)
cache_size = self.semi_train_cfg.cache_size
self.cache_inputs.extend(inputs_list)
self.cache_data_samples.extend(batch_data_samples)
self.cache_inputs = self.cache_inputs[-cache_size:]
self.cache_data_samples = self.cache_data_samples[-cache_size:]
def get_cache(self, idx, img_shape):
inputs = copy.deepcopy(self.cache_inputs[idx])
data_samples = copy.deepcopy(self.cache_data_samples[idx])
inputs, data_samples = self.erase(*self.flip(*self.resize(inputs, data_samples, img_shape)))
return inputs, data_samples
def get_batch(self, rand_idxs, img_shapes):
inputs_list, batch_data_samples = [], []
for i in range(len(rand_idxs)):
inputs, data_samples = self.get_cache(rand_idxs[i], img_shapes[i])
inputs_list.append(inputs)
batch_data_samples.append(data_samples)
return inputs_list, batch_data_samples
def resize(self, inputs, data_samples, img_shape):
scale = min(img_shape[0] / data_samples.img_shape[0], img_shape[1] / data_samples.img_shape[1])
inputs = F.interpolate(inputs.unsqueeze(0), scale_factor=scale).squeeze(0)
data_samples.pop('img_shape')
data_samples.pop('scale_factor')
img_h, img_w = inputs.shape[-2:]
data_samples.set_metainfo({'img_shape': (img_h, img_w)})
ori_h, ori_w = data_samples.ori_shape
data_samples.set_metainfo({'scale_factor': (img_w / ori_w, img_h / ori_h)})
hm = data_samples.pop('homography_matrix')
matrix = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]], dtype=np.float32)
data_samples.set_metainfo({'homography_matrix': matrix @ hm})
data_samples.gt_instances.bboxes *= scale
data_samples.gt_instances.bboxes[:, 0::2].clamp_(0, img_w)
data_samples.gt_instances.bboxes[:, 1::2].clamp_(0, img_h)
| # Copyright (c) OpenMMLab. All rights reserved.
@MODELS.register_module()
class MixPL(SemiBaseDetector):
"""Base class for semi-supervised detectors."""
def __init__(self,
detector: ConfigType,
semi_train_cfg: OptConfigType = None,
semi_test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
detector=detector,
semi_train_cfg=semi_train_cfg,
semi_test_cfg=semi_test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
self.cache_inputs = []
self.cache_data_samples = []
def loss(self, multi_batch_inputs: Dict[str, Tensor],
multi_batch_data_samples: Dict[str, SampleList]) -> dict:
"""Calculate losses from multi-branch inputs and data samples.
Args:
multi_batch_inputs (Dict[str, Tensor]): The dict of multi-branch
input images, each value with shape (N, C, H, W).
Each value should usually be mean centered and std scaled.
multi_batch_data_samples (Dict[str, List[:obj:`DetDataSample`]]):
The dict of multi-branch data samples.
Returns:
dict: A dictionary of loss components
"""
losses = dict()
losses.update(**self.loss_by_gt_instances(
multi_batch_inputs['sup'], multi_batch_data_samples['sup']))
origin_batch_pseudo_data_samples, batch_info = self.get_pseudo_instances(
multi_batch_inputs['unsup_teacher'], multi_batch_data_samples['unsup_teacher'])
multi_batch_data_samples['unsup_student'] = self.project_pseudo_instances(
origin_batch_pseudo_data_samples, multi_batch_data_samples['unsup_student'])
batch_unsup_inputs = copy.deepcopy(multi_batch_inputs['unsup_student'])
batch_unsup_data_samples = copy.deepcopy(multi_batch_data_samples['unsup_student'])
batch_unsup_inputs, batch_unsup_data_samples = self.merge(
*zip(*list(map(self.erase, *self.split(batch_unsup_inputs, batch_unsup_data_samples)))))
sample_size = len(multi_batch_data_samples['unsup_student'])
mixup_idxs = np.random.choice(range(sample_size), sample_size, replace=False)
mosaic_idxs = np.random.choice(range(4), 4, replace=False) + sample_size
if self.semi_train_cfg.mixup and len(self.cache_inputs) == self.semi_train_cfg.cache_size:
dst_inputs_list, batch_dst_data_samples = self.split(
batch_unsup_inputs, batch_unsup_data_samples)
img_shapes = [tuple(batch_unsup_inputs.shape[-2:])]*batch_unsup_inputs.shape[0]
src_inputs_list, batch_src_data_samples = self.get_batch(mixup_idxs, img_shapes)
batch_unsup_inputs, batch_unsup_data_samples = self.merge(*self.mixup(
dst_inputs_list, batch_dst_data_samples,
src_inputs_list, batch_src_data_samples))
if self.semi_train_cfg.mixup:
losses.update(**rename_loss_dict('mixup_', self.loss_by_pseudo_instances(
batch_unsup_inputs, batch_unsup_data_samples)))
else:
losses.update(**self.loss_by_pseudo_instances(
batch_unsup_inputs, batch_unsup_data_samples))
if self.semi_train_cfg.mosaic and len(self.cache_inputs) == self.semi_train_cfg.cache_size:
if len(self.semi_train_cfg.mosaic_shape) == 1:
img_shapes = [self.semi_train_cfg.mosaic_shape[0]] * 4
else:
mosaic_shape = self.semi_train_cfg.mosaic_shape
mosaic_h = np.random.randint(
min(mosaic_shape[0][0], mosaic_shape[1][0]), max(mosaic_shape[0][0], mosaic_shape[1][0]))
mosaic_w = np.random.randint(
min(mosaic_shape[0][1], mosaic_shape[1][1]), max(mosaic_shape[0][1], mosaic_shape[1][1]))
img_shapes = [(mosaic_h, mosaic_w)] * 4
src_inputs_list, batch_src_data_samples = self.get_batch(mosaic_idxs, img_shapes)
mosaic_inputs, mosaic_data_samples = self.mosaic(src_inputs_list, batch_src_data_samples)
mosaic_losses = self.loss_by_pseudo_instances(mosaic_inputs, mosaic_data_samples)
losses.update(**rename_loss_dict('mosaic_', reweight_loss_dict(mosaic_losses, self.semi_train_cfg.mosaic_weight)))
self.update_cache(multi_batch_inputs['unsup_student'], multi_batch_data_samples['unsup_student'])
return losses
def merge(self, inputs_list, batch_data_samples):
batch_size = len(inputs_list)
h, w = 0, 0
for i in range(batch_size):
img_h, img_w = batch_data_samples[i].img_shape
h, w = max(h, img_h), max(w, img_w)
h, w = max(h, math.ceil(h / 32) * 32), max(w, math.ceil(w / 32) * 32)
batch_inputs = torch.zeros((batch_size, 3, h, w)).to(self.data_preprocessor.device)
for i in range(batch_size):
img_h, img_w = batch_data_samples[i].img_shape
batch_inputs[i, :, :img_h, :img_w] = inputs_list[i]
batch_data_samples[i].set_metainfo({'batch_input_shape': (h, w)})
batch_data_samples[i].set_metainfo({'pad_shape': (h, w)})
return batch_inputs, batch_data_samples
def split(self, batch_inputs, batch_data_samples):
inputs_list = []
for i in range(len(batch_inputs)):
inputs = batch_inputs[i]
data_samples = batch_data_samples[i]
img_h, img_w = data_samples.img_shape
inputs_list.append(inputs[..., :img_h, :img_w])
data_samples.pop('batch_input_shape')
data_samples.pop('pad_shape')
return inputs_list, batch_data_samples
def update_cache(self, batch_inputs: Tensor, batch_data_samples: SampleList):
inputs_list, batch_data_samples = self.split(batch_inputs, batch_data_samples)
cache_size = self.semi_train_cfg.cache_size
self.cache_inputs.extend(inputs_list)
self.cache_data_samples.extend(batch_data_samples)
self.cache_inputs = self.cache_inputs[-cache_size:]
self.cache_data_samples = self.cache_data_samples[-cache_size:]
def get_cache(self, idx, img_shape):
inputs = copy.deepcopy(self.cache_inputs[idx])
data_samples = copy.deepcopy(self.cache_data_samples[idx])
inputs, data_samples = self.erase(*self.flip(*self.resize(inputs, data_samples, img_shape)))
return inputs, data_samples
def get_batch(self, rand_idxs, img_shapes):
inputs_list, batch_data_samples = [], []
for i in range(len(rand_idxs)):
inputs, data_samples = self.get_cache(rand_idxs[i], img_shapes[i])
inputs_list.append(inputs)
batch_data_samples.append(data_samples)
return inputs_list, batch_data_samples
def resize(self, inputs, data_samples, img_shape):
scale = min(img_shape[0] / data_samples.img_shape[0], img_shape[1] / data_samples.img_shape[1])
inputs = F.interpolate(inputs.unsqueeze(0), scale_factor=scale).squeeze(0)
data_samples.pop('img_shape')
data_samples.pop('scale_factor')
img_h, img_w = inputs.shape[-2:]
data_samples.set_metainfo({'img_shape': (img_h, img_w)})
ori_h, ori_w = data_samples.ori_shape
data_samples.set_metainfo({'scale_factor': (img_w / ori_w, img_h / ori_h)})
hm = data_samples.pop('homography_matrix')
matrix = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]], dtype=np.float32)
data_samples.set_metainfo({'homography_matrix': matrix @ hm})
data_samples.gt_instances.bboxes *= scale
data_samples.gt_instances.bboxes[:, 0::2].clamp_(0, img_w)
data_samples.gt_instances.bboxes[:, 1::2].clamp_(0, img_h) | return inputs, filter_gt_instances([data_samples])[0] | 0 | 2023-11-30 08:58:00+00:00 | 12k |
DQiaole/FlowDiffusion_pytorch | local_diffusers/models/raft_unet.py | [
{
"identifier": "BasicEncoder",
"path": "core/extractor.py",
"snippet": "class BasicEncoder(nn.Module):\n def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):\n super(BasicEncoder, self).__init__()\n self.norm_fn = norm_fn\n\n if self.norm_fn == 'group':\n self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)\n \n elif self.norm_fn == 'batch':\n self.norm1 = nn.BatchNorm2d(64)\n\n elif self.norm_fn == 'instance':\n self.norm1 = nn.InstanceNorm2d(64)\n\n elif self.norm_fn == 'none':\n self.norm1 = nn.Sequential()\n\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.in_planes = 64\n self.layer1 = self._make_layer(64, stride=1)\n self.layer2 = self._make_layer(96, stride=2)\n self.layer3 = self._make_layer(128, stride=2)\n\n # output convolution\n self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)\n\n self.dropout = None\n if dropout > 0:\n self.dropout = nn.Dropout2d(p=dropout)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, dim, stride=1):\n layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)\n layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)\n layers = (layer1, layer2)\n \n self.in_planes = dim\n return nn.Sequential(*layers)\n\n\n def forward(self, x):\n\n # if input is list, combine batch dimension\n is_list = isinstance(x, tuple) or isinstance(x, list)\n if is_list:\n batch_dim = x[0].shape[0]\n x = torch.cat(x, dim=0)\n\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.conv2(x)\n\n if self.training and self.dropout is not None:\n x = self.dropout(x)\n\n if is_list:\n x = torch.split(x, [batch_dim, batch_dim], dim=0)\n\n return x"
},
{
"identifier": "Unet",
"path": "local_diffusers/models/imagen_unet.py",
"snippet": "class Unet(nn.Module):\n def __init__(\n self,\n dim,\n num_resnet_blocks=1,\n cond_dim=None,\n num_image_tokens=4,\n num_time_tokens=2,\n learned_sinu_pos_emb_dim=16,\n out_dim=None,\n sample_size=256,\n dim_mults=(1, 2, 4, 8),\n cond_images_channels=0,\n channels=8,\n channels_out=2,\n attn_dim_head=64,\n attn_heads=8,\n ff_mult=2.,\n lowres_cond=False, # for cascading diffusion - https://cascaded-diffusion.github.io/\n layer_attns=True,\n layer_attns_depth=1,\n layer_mid_attns_depth=1,\n layer_attns_add_text_cond=False, # whether to condition the self-attention blocks with the text embeddings, as described in Appendix D.3.1\n attend_at_middle=True, # whether to have a layer of attention at the bottleneck (can turn off for higher resolution in cascading DDPM, before bringing in efficient attention)\n layer_cross_attns=True,\n use_linear_attn=False,\n use_linear_cross_attn=False,\n cond_on_text=False,\n max_text_len=256,\n init_dim=None,\n resnet_groups=8,\n init_conv_kernel_size=7, # kernel size of initial conv, if not using cross embed\n init_cross_embed=True,\n init_cross_embed_kernel_sizes=(3, 7, 15),\n cross_embed_downsample=False,\n cross_embed_downsample_kernel_sizes=(2, 4),\n attn_pool_text=True,\n attn_pool_num_latents=32,\n dropout=0.,\n memory_efficient=False,\n init_conv_to_final_conv_residual=False,\n use_global_context_attn=True,\n scale_skip_connection=True,\n final_resnet_block=True,\n final_conv_kernel_size=3,\n self_cond=False,\n resize_mode='nearest',\n combine_upsample_fmaps=False, # combine feature maps from all upsample blocks, used in unet squared successfully\n pixel_shuffle_upsample=True, # may address checkboard artifacts\n add_dim=(0, 0, 0, 0), # added dim to unet encoder\n corr_index='noised_flow'\n ):\n super().__init__()\n self.corr_index= corr_index\n # guide researchers\n self.sample_size = sample_size\n assert attn_heads > 1, 'you need to have more than 1 attention head, ideally at least 4 or 8'\n\n if dim < 128:\n print_once('The base dimension of your u-net should ideally be no smaller than 128, as recommended by a professional DDPM trainer https://nonint.com/2022/05/04/friends-dont-let-friends-train-small-diffusion-models/')\n\n # save locals to take care of some hyperparameters for cascading DDPM\n\n self._locals = locals()\n self._locals.pop('self', None)\n self._locals.pop('__class__', None)\n\n # determine dimensions\n\n self.channels = channels\n self.channels_out = default(channels_out, channels)\n\n # (1) in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis\n # (2) in self conditioning, one appends the predict x0 (x_start)\n init_channels = channels * (1 + int(lowres_cond) + int(self_cond))\n init_dim = default(init_dim, dim)\n\n self.self_cond = self_cond\n\n # optional image conditioning\n\n self.has_cond_image = cond_images_channels > 0\n self.cond_images_channels = cond_images_channels\n\n init_channels += cond_images_channels\n\n # initial convolution\n\n self.init_conv = CrossEmbedLayer(init_channels, dim_out=init_dim, kernel_sizes=init_cross_embed_kernel_sizes, stride=1) if init_cross_embed else nn.Conv2d(init_channels, init_dim, init_conv_kernel_size,\n padding=init_conv_kernel_size // 2)\n\n dims = [init_dim, *map(lambda m: dim * m, dim_mults)]\n in_out = list(zip(dims[:-1], dims[1:]))\n\n self.add_dim = add_dim\n assert len(in_out) == len(add_dim), 'length of add_dim not equal to the depth of u-net'\n\n # time conditioning\n\n cond_dim = default(cond_dim, dim)\n time_cond_dim = dim * 4 * (2 if lowres_cond else 1)\n\n # embedding time for log(snr) noise from continuous version\n\n sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim)\n sinu_pos_emb_input_dim = learned_sinu_pos_emb_dim + 1\n\n self.to_time_hiddens = nn.Sequential(\n sinu_pos_emb,\n nn.Linear(sinu_pos_emb_input_dim, time_cond_dim),\n nn.SiLU()\n )\n\n self.to_time_cond = nn.Sequential(\n nn.Linear(time_cond_dim, time_cond_dim)\n )\n\n # project to time tokens as well as time hiddens\n\n self.to_time_tokens = nn.Sequential(\n nn.Linear(time_cond_dim, cond_dim * num_time_tokens),\n Rearrange('b (r d) -> b r d', r=num_time_tokens)\n )\n\n # low res aug noise conditioning\n\n self.lowres_cond = lowres_cond\n\n if lowres_cond:\n self.to_lowres_time_hiddens = nn.Sequential(\n LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim),\n nn.Linear(learned_sinu_pos_emb_dim + 1, time_cond_dim),\n nn.SiLU()\n )\n\n self.to_lowres_time_cond = nn.Sequential(\n nn.Linear(time_cond_dim, time_cond_dim)\n )\n\n self.to_lowres_time_tokens = nn.Sequential(\n nn.Linear(time_cond_dim, cond_dim * num_time_tokens),\n Rearrange('b (r d) -> b r d', r=num_time_tokens)\n )\n\n # normalizations\n\n self.norm_cond = nn.LayerNorm(cond_dim)\n\n # text encoding conditioning (optional)\n\n self.text_to_cond = None\n\n # finer control over whether to condition on text encodings\n\n self.cond_on_text = cond_on_text\n\n # attention pooling\n\n # self.attn_pool = PerceiverResampler(dim = cond_dim, depth = 2, dim_head = attn_dim_head, heads = attn_heads, num_latents = attn_pool_num_latents) if attn_pool_text else None\n\n # for classifier free guidance\n\n self.max_text_len = max_text_len\n\n # for non-attention based text conditioning at all points in the network where time is also conditioned\n\n self.to_text_non_attn_cond = None\n\n # attention related params\n\n attn_kwargs = dict(heads=attn_heads, dim_head=attn_dim_head)\n\n num_layers = len(in_out)\n\n # resnet block klass\n\n num_resnet_blocks = cast_tuple(num_resnet_blocks, num_layers)\n resnet_groups = cast_tuple(resnet_groups, num_layers)\n\n resnet_klass = partial(ResnetBlock, **attn_kwargs)\n\n layer_attns = cast_tuple(layer_attns, num_layers)\n layer_attns_depth = cast_tuple(layer_attns_depth, num_layers)\n layer_cross_attns = cast_tuple(layer_cross_attns, num_layers)\n\n use_linear_attn = cast_tuple(use_linear_attn, num_layers)\n use_linear_cross_attn = cast_tuple(use_linear_cross_attn, num_layers)\n\n assert all([layers == num_layers for layers in list(map(len, (resnet_groups, layer_attns, layer_cross_attns)))])\n\n # downsample klass\n\n downsample_klass = Downsample\n\n if cross_embed_downsample:\n downsample_klass = partial(CrossEmbedLayer, kernel_sizes=cross_embed_downsample_kernel_sizes)\n\n # initial resnet block (for memory efficient unet)\n\n self.init_resnet_block = resnet_klass(init_dim, init_dim, time_cond_dim=time_cond_dim, groups=resnet_groups[0], use_gca=use_global_context_attn) if memory_efficient else None\n\n # scale for resnet skip connections\n\n self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)\n\n # layers\n\n self.downs = nn.ModuleList([])\n self.ups = nn.ModuleList([])\n num_resolutions = len(in_out)\n\n layer_params = [num_resnet_blocks, resnet_groups, layer_attns, layer_attns_depth, layer_cross_attns, use_linear_attn, use_linear_cross_attn]\n reversed_layer_params = list(map(reversed, layer_params))\n\n # downsampling layers\n\n skip_connect_dims = [] # keep track of skip connection dimensions\n\n for ind, ((dim_in, dim_out), add_dim_per_layer, layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(in_out, self.add_dim, *layer_params)):\n is_last = ind >= (num_resolutions - 1)\n\n layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None\n\n if layer_attn:\n transformer_block_klass = TransformerBlock\n elif layer_use_linear_attn:\n transformer_block_klass = LinearAttentionTransformerBlock\n else:\n transformer_block_klass = Identity\n\n current_dim = dim_in\n\n # whether to pre-downsample, from memory efficient unet\n\n pre_downsample = None\n\n if memory_efficient:\n pre_downsample = downsample_klass(dim_in, dim_out)\n current_dim = dim_out\n\n skip_connect_dims.append(current_dim)\n\n # whether to do post-downsample, for non-memory efficient unet\n\n post_downsample = None\n if not memory_efficient:\n post_downsample = downsample_klass(current_dim, dim_out) if not is_last else Parallel(nn.Conv2d(dim_in, dim_out, 3, padding=1), nn.Conv2d(dim_in, dim_out, 1))\n\n self.downs.append(nn.ModuleList([\n pre_downsample,\n resnet_klass(current_dim + add_dim_per_layer, current_dim, cond_dim=layer_cond_dim, linear_attn=layer_use_linear_cross_attn, time_cond_dim=time_cond_dim, groups=groups if add_dim_per_layer == 0 else 4),\n nn.ModuleList([ResnetBlock(current_dim, current_dim, time_cond_dim=time_cond_dim, groups=groups, use_gca=use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),\n transformer_block_klass(dim=current_dim, depth=layer_attn_depth, ff_mult=ff_mult, context_dim=cond_dim, **attn_kwargs),\n post_downsample\n ]))\n\n # middle layers\n\n mid_dim = dims[-1]\n\n self.mid_block1 = ResnetBlock(mid_dim, mid_dim, cond_dim=cond_dim, time_cond_dim=time_cond_dim, groups=resnet_groups[-1])\n self.mid_attn = TransformerBlock(mid_dim, depth=layer_mid_attns_depth, **attn_kwargs) if attend_at_middle else None\n self.mid_block2 = ResnetBlock(mid_dim, mid_dim, cond_dim=cond_dim, time_cond_dim=time_cond_dim, groups=resnet_groups[-1])\n\n # upsample klass\n\n upsample_klass = Upsample if not pixel_shuffle_upsample else PixelShuffleUpsample\n\n # upsampling layers\n\n upsample_fmap_dims = []\n\n for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(reversed(in_out), *reversed_layer_params)):\n is_last = ind == (len(in_out) - 1)\n\n layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None\n\n if layer_attn:\n transformer_block_klass = TransformerBlock\n elif layer_use_linear_attn:\n transformer_block_klass = LinearAttentionTransformerBlock\n else:\n transformer_block_klass = Identity\n\n skip_connect_dim = skip_connect_dims.pop()\n\n upsample_fmap_dims.append(dim_out)\n\n self.ups.append(nn.ModuleList([\n resnet_klass(dim_out + skip_connect_dim, dim_out, cond_dim=layer_cond_dim, linear_attn=layer_use_linear_cross_attn, time_cond_dim=time_cond_dim, groups=groups),\n nn.ModuleList([ResnetBlock(dim_out + skip_connect_dim, dim_out, time_cond_dim=time_cond_dim, groups=groups, use_gca=use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),\n transformer_block_klass(dim=dim_out, depth=layer_attn_depth, ff_mult=ff_mult, context_dim=cond_dim, **attn_kwargs),\n upsample_klass(dim_out, dim_in) if not is_last or memory_efficient else Identity()\n ]))\n\n # whether to combine feature maps from all upsample blocks before final resnet block out\n\n self.upsample_combiner = UpsampleCombiner(\n dim=dim,\n enabled=combine_upsample_fmaps,\n dim_ins=upsample_fmap_dims,\n dim_outs=dim\n )\n\n # whether to do a final residual from initial conv to the final resnet block out\n\n self.init_conv_to_final_conv_residual = init_conv_to_final_conv_residual\n final_conv_dim = self.upsample_combiner.dim_out + (dim if init_conv_to_final_conv_residual else 0)\n\n # final optional resnet block and convolution out\n\n self.final_res_block = ResnetBlock(final_conv_dim, dim, time_cond_dim=time_cond_dim, groups=resnet_groups[0], use_gca=True) if final_resnet_block else None\n\n final_conv_dim_in = dim if final_resnet_block else final_conv_dim\n final_conv_dim_in += (channels if lowres_cond else 0)\n\n self.final_conv = nn.Conv2d(final_conv_dim_in, self.channels_out, final_conv_kernel_size, padding=final_conv_kernel_size // 2)\n\n zero_init_(self.final_conv)\n\n # resize mode\n\n self.resize_mode = resize_mode\n\n # if the current settings for the unet are not correct\n # for cascading DDPM, then reinit the unet with the right settings\n def cast_model_parameters(\n self,\n *,\n lowres_cond,\n text_embed_dim,\n channels,\n channels_out,\n cond_on_text\n ):\n if lowres_cond == self.lowres_cond and \\\n channels == self.channels and \\\n cond_on_text == self.cond_on_text and \\\n text_embed_dim == self._locals['text_embed_dim'] and \\\n channels_out == self.channels_out:\n return self\n\n updated_kwargs = dict(\n lowres_cond=lowres_cond,\n text_embed_dim=text_embed_dim,\n channels=channels,\n channels_out=channels_out,\n cond_on_text=cond_on_text\n )\n\n return self.__class__(**{**self._locals, **updated_kwargs})\n\n # methods for returning the full unet config as well as its parameter state\n\n def to_config_and_state_dict(self):\n return self._locals, self.state_dict()\n\n # class method for rehydrating the unet from its config and state dict\n\n @classmethod\n def from_config_and_state_dict(klass, config, state_dict):\n unet = klass(**config)\n unet.load_state_dict(state_dict)\n return unet\n\n # methods for persisting unet to disk\n\n def persist_to_file(self, path):\n path = Path(path)\n path.parents[0].mkdir(exist_ok=True, parents=True)\n\n config, state_dict = self.to_config_and_state_dict()\n pkg = dict(config=config, state_dict=state_dict)\n torch.save(pkg, str(path))\n\n # class method for rehydrating the unet from file saved with `persist_to_file`\n\n @classmethod\n def hydrate_from_file(klass, path):\n path = Path(path)\n assert path.exists()\n pkg = torch.load(str(path))\n\n assert 'config' in pkg and 'state_dict' in pkg\n config, state_dict = pkg['config'], pkg['state_dict']\n\n return Unet.from_config_and_state_dict(config, state_dict)\n\n # forward with classifier free guidance\n\n def forward_with_cond_scale(\n self,\n *args,\n cond_scale=1.,\n **kwargs\n ):\n logits = self.forward(*args, **kwargs)\n\n if cond_scale == 1:\n return logits\n\n null_logits = self.forward(*args, cond_drop_prob=1., **kwargs)\n return null_logits + (logits - null_logits) * cond_scale\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n class_labels: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n normalize=False,\n ):\n time = timestep\n\n x = sample\n\n # initial convolution\n x = self.init_conv(x)\n # init conv residual\n\n if self.init_conv_to_final_conv_residual:\n init_conv_residual = x.clone()\n\n # time conditioning\n if len(time.shape) == 0:\n time = time.reshape(1).repeat(sample.shape[0])\n time = time.to(x.device)\n\n time_hiddens = self.to_time_hiddens(time)\n\n # derive time tokens\n\n time_tokens = self.to_time_tokens(time_hiddens)\n t = self.to_time_cond(time_hiddens)\n\n # add lowres time conditioning to time hiddens\n # and add lowres time tokens along sequence dimension for attention\n\n # text conditioning\n\n text_tokens = None\n\n # main conditioning tokens (c)\n\n c = time_tokens if not exists(text_tokens) else torch.cat((time_tokens, text_tokens), dim=-2)\n\n # normalize conditioning tokens\n\n c = self.norm_cond(c)\n\n # initial resnet block (for memory efficient unet)\n\n if exists(self.init_resnet_block):\n x = self.init_resnet_block(x, t)\n\n # go through the layers of the unet, down and up\n\n hiddens = []\n\n for pre_downsample, init_block, resnet_blocks, attn_block, post_downsample in self.downs:\n if exists(pre_downsample):\n x = pre_downsample(x)\n\n x = init_block(x, t, c)\n\n for resnet_block in resnet_blocks:\n x = resnet_block(x, t)\n hiddens.append(x)\n\n x = attn_block(x, c)\n hiddens.append(x)\n\n if exists(post_downsample):\n x = post_downsample(x)\n\n x = self.mid_block1(x, t, c)\n\n if exists(self.mid_attn):\n x = self.mid_attn(x)\n\n x = self.mid_block2(x, t, c)\n\n add_skip_connection = lambda x: torch.cat((x, hiddens.pop() * self.skip_connect_scale), dim=1)\n\n up_hiddens = []\n\n for init_block, resnet_blocks, attn_block, upsample in self.ups:\n x = add_skip_connection(x)\n x = init_block(x, t, c)\n\n for resnet_block in resnet_blocks:\n x = add_skip_connection(x)\n x = resnet_block(x, t)\n\n x = attn_block(x, c)\n up_hiddens.append(x.contiguous())\n x = upsample(x)\n\n # whether to combine all feature maps from upsample blocks\n\n x = self.upsample_combiner(x, up_hiddens)\n\n # final top-most residual if needed\n\n if self.init_conv_to_final_conv_residual:\n x = torch.cat((x, init_conv_residual), dim=1)\n\n if exists(self.final_res_block):\n x = self.final_res_block(x, t)\n\n x = self.final_conv(x)\n if normalize:\n x = torch.tanh(x)\n return UNet2DOutput(sample=x)"
},
{
"identifier": "exists",
"path": "local_diffusers/models/imagen_unet.py",
"snippet": "def exists(val):\n return val is not None"
},
{
"identifier": "UNet2DOutput",
"path": "local_diffusers/models/imagen_unet.py",
"snippet": "class UNet2DOutput(BaseOutput):\n \"\"\"\n The output of [`UNet2DModel`].\n\n Args:\n sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n The hidden states output from the last layer of the model.\n \"\"\"\n\n sample: torch.FloatTensor"
},
{
"identifier": "bilinear_sampler",
"path": "core/utils/utils.py",
"snippet": "def bilinear_sampler(img, coords, mode='bilinear', mask=False):\n \"\"\" Wrapper for grid_sample, uses pixel coordinates \"\"\"\n H, W = img.shape[-2:]\n xgrid, ygrid = coords.split([1,1], dim=-1)\n xgrid = 2*xgrid/(W-1) - 1\n ygrid = 2*ygrid/(H-1) - 1\n\n grid = torch.cat([xgrid, ygrid], dim=-1)\n img = F.grid_sample(img, grid, align_corners=True)\n\n if mask:\n mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)\n return img, mask.float()\n\n return img"
},
{
"identifier": "coords_grid",
"path": "core/utils/utils.py",
"snippet": "def coords_grid(batch, ht, wd, device):\n coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))\n coords = torch.stack(coords[::-1], dim=0).float()\n return coords[None].repeat(batch, 1, 1, 1)"
},
{
"identifier": "downflow8",
"path": "core/utils/utils.py",
"snippet": "def downflow8(flow, mode='bilinear'):\n new_size = (flow.shape[2] // 8, flow.shape[3] // 8)\n if mode == 'bilinear':\n return F.interpolate(flow, size=new_size, mode=mode, align_corners=True) / 8\n elif mode == 'nearest':\n return F.interpolate(flow, size=new_size, mode=mode) / 8\n else:\n raise NotImplementedError"
},
{
"identifier": "CorrBlock",
"path": "core/corr.py",
"snippet": "class CorrBlock:\n def __init__(self, fmap1, fmap2, num_levels=4, radius=4):\n self.num_levels = num_levels\n self.radius = radius\n self.corr_pyramid = []\n\n # all pairs correlation\n corr = CorrBlock.corr(fmap1, fmap2)\n\n batch, h1, w1, dim, h2, w2 = corr.shape\n corr = corr.reshape(batch*h1*w1, dim, h2, w2)\n \n self.corr_pyramid.append(corr)\n for i in range(self.num_levels-1):\n corr = F.avg_pool2d(corr, 2, stride=2)\n self.corr_pyramid.append(corr)\n\n def __call__(self, coords):\n r = self.radius\n coords = coords.permute(0, 2, 3, 1)\n batch, h1, w1, _ = coords.shape\n\n out_pyramid = []\n for i in range(self.num_levels):\n corr = self.corr_pyramid[i]\n dx = torch.linspace(-r, r, 2*r+1, device=coords.device)\n dy = torch.linspace(-r, r, 2*r+1, device=coords.device)\n delta = torch.stack(torch.meshgrid(dy, dx), axis=-1)\n\n centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i\n delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)\n coords_lvl = centroid_lvl + delta_lvl\n\n corr = bilinear_sampler(corr, coords_lvl)\n corr = corr.view(batch, h1, w1, -1)\n out_pyramid.append(corr)\n\n out = torch.cat(out_pyramid, dim=-1)\n return out.permute(0, 3, 1, 2).contiguous().float()\n\n @staticmethod\n def corr(fmap1, fmap2):\n batch, dim, ht, wd = fmap1.shape\n fmap1 = fmap1.view(batch, dim, ht*wd)\n fmap2 = fmap2.view(batch, dim, ht*wd) \n \n corr = torch.matmul(fmap1.transpose(1,2), fmap2)\n corr = corr.view(batch, ht, wd, 1, ht, wd)\n return corr / torch.sqrt(torch.tensor(dim).float())"
}
] | import math
import copy
import torch
import torch.nn.functional as F
import torchvision.transforms as T
from random import random
from typing import Optional, List, Union
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager, nullcontext
from collections import namedtuple
from pathlib import Path
from diffusers.utils import BaseOutput
from torch.nn.parallel import DistributedDataParallel
from torch import nn, einsum
from torch.cuda.amp import autocast
from torch.special import expm1
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from dataclasses import dataclass
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from core.extractor import BasicEncoder
from .imagen_unet import Unet, exists, UNet2DOutput
from core.utils.utils import bilinear_sampler, coords_grid, downflow8
from core.corr import CorrBlock
from xformers.ops import memory_efficient_attention, unbind, fmha
from flash_attn import flash_attn_qkvpacked_func, flash_attn_func | 7,608 |
try:
XFORMERS_AVAILABLE = True
except ImportError:
print("xFormers not available")
XFORMERS_AVAILABLE = False
try:
FLASH_AVAILABLE = True
except ImportError:
print("FLASH ATTENTION2 not available")
FLASH_AVAILABLE = False
# predefined unets, with configs lining up with hyperparameters in appendix of paper
class RAFT_Unet(Unet, ModelMixin, ConfigMixin):
@register_to_config
def __init__(self, channels, channels_out, sample_size, add_dim=(0, 0, 324, 0), corr_index='noised_flow', **kwargs):
default_kwargs = dict(
channels=channels,
channels_out=channels_out,
sample_size=sample_size,
dim=128,
dim_mults=(1, 2, 4, 8),
num_resnet_blocks=(2, 4, 8, 8),
layer_attns=(False, False, True, True),
layer_cross_attns=(False, False, False, False),
attn_heads=8,
ff_mult=2.,
memory_efficient=True,
add_dim=add_dim,
corr_index=corr_index
)
super().__init__(**default_kwargs)
# feature encoder
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=0.0)
print('[fnet: BasicEncoder]')
assert self.corr_index in ['orginal', 'noised_flow', None]
print('[corr_index: ', self.corr_index, ']')
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
class_labels: Optional[torch.Tensor] = None,
return_dict: bool = True,
normalize=False,
):
time = timestep
x = sample
# encoder feature
image1, image2 = x[:, :3], x[:, 3:6]
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
corr_fn = CorrBlock(fmap1, fmap2, radius=4)
N, C, H, W = image1.shape
|
try:
XFORMERS_AVAILABLE = True
except ImportError:
print("xFormers not available")
XFORMERS_AVAILABLE = False
try:
FLASH_AVAILABLE = True
except ImportError:
print("FLASH ATTENTION2 not available")
FLASH_AVAILABLE = False
# predefined unets, with configs lining up with hyperparameters in appendix of paper
class RAFT_Unet(Unet, ModelMixin, ConfigMixin):
@register_to_config
def __init__(self, channels, channels_out, sample_size, add_dim=(0, 0, 324, 0), corr_index='noised_flow', **kwargs):
default_kwargs = dict(
channels=channels,
channels_out=channels_out,
sample_size=sample_size,
dim=128,
dim_mults=(1, 2, 4, 8),
num_resnet_blocks=(2, 4, 8, 8),
layer_attns=(False, False, True, True),
layer_cross_attns=(False, False, False, False),
attn_heads=8,
ff_mult=2.,
memory_efficient=True,
add_dim=add_dim,
corr_index=corr_index
)
super().__init__(**default_kwargs)
# feature encoder
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=0.0)
print('[fnet: BasicEncoder]')
assert self.corr_index in ['orginal', 'noised_flow', None]
print('[corr_index: ', self.corr_index, ']')
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
class_labels: Optional[torch.Tensor] = None,
return_dict: bool = True,
normalize=False,
):
time = timestep
x = sample
# encoder feature
image1, image2 = x[:, :3], x[:, 3:6]
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
corr_fn = CorrBlock(fmap1, fmap2, radius=4)
N, C, H, W = image1.shape | coords1 = coords_grid(N, H // 8, W // 8, device=image1.device) | 5 | 2023-12-01 11:14:25+00:00 | 12k |
EnVision-Research/DDSM | main.py | [
{
"identifier": "model_profiling",
"path": "profile.py",
"snippet": "def model_profiling(model, height, width, batch=1, channel=3, use_cuda=True,\n verbose=True):\n \"\"\" Pytorch model profiling with input image size\n (batch, channel, height, width).\n The function exams the number of multiply-accumulates (n_macs).\n\n Args:\n model: pytorch model\n height: int\n width: int\n batch: int\n channel: int\n use_cuda: bool\n\n Returns:\n macs: int\n params: int\n\n \"\"\"\n model.eval()\n data = torch.rand(batch, channel, height, width)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n model = model.to(device)\n data = data.to(device)\n model.apply(lambda m: add_profiling_hooks(m, verbose=verbose))\n if verbose:\n print(\n 'Item'.ljust(name_space, ' ') +\n 'params'.rjust(macs_space, ' ') +\n 'macs'.rjust(macs_space, ' ') +\n 'nanosecs'.rjust(seconds_space, ' '))\n if verbose:\n print(''.center(\n name_space + params_space + macs_space + seconds_space, '-'))\n t = torch.zeros((1), dtype=torch.long, device=device)\n model(data, t)\n if verbose:\n print(''.center(\n name_space + params_space + macs_space + seconds_space, '-'))\n print(\n 'Total'.ljust(name_space, ' ') +\n '{:,}'.format(model.n_params).rjust(params_space, ' ') +\n '{:,}'.format(model.n_macs).rjust(macs_space, ' ') +\n '{:,}'.format(model.n_seconds).rjust(seconds_space, ' '))\n remove_profiling_hooks()\n macs = model.n_macs\n param = model.n_params\n model.apply(lambda m: remove_profiling_value(m))\n return macs, param"
},
{
"identifier": "GaussianDiffusionTrainer",
"path": "diffusion.py",
"snippet": "class GaussianDiffusionTrainer(nn.Module):\n def __init__(self, model, beta_1, beta_T, T):\n super().__init__()\n\n self.model = model\n self.T = T\n\n self.register_buffer(\n 'betas', torch.linspace(beta_1, beta_T, T).double())\n alphas = 1. - self.betas\n alphas_bar = torch.cumprod(alphas, dim=0)\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n 'sqrt_alphas_bar', torch.sqrt(alphas_bar))\n self.register_buffer(\n 'sqrt_one_minus_alphas_bar', torch.sqrt(1. - alphas_bar))\n\n def forward(self, x_0):\n \"\"\"\n Algorithm 1.\n \"\"\"\n t = torch.randint(self.T, size=(x_0.shape[0], ), device=x_0.device)\n noise = torch.randn_like(x_0)\n x_t = (\n extract(self.sqrt_alphas_bar, t, x_0.shape) * x_0 +\n extract(self.sqrt_one_minus_alphas_bar, t, x_0.shape) * noise)\n loss = F.mse_loss(self.model(x_t, t), noise, reduction='none')\n return loss"
},
{
"identifier": "GaussianDiffusionSampler",
"path": "diffusion.py",
"snippet": "class GaussianDiffusionSampler(nn.Module):\n def __init__(self, model, beta_1, beta_T, T, img_size=32,\n mean_type='eps', var_type='fixedlarge'):\n assert mean_type in ['xprev' 'xstart', 'epsilon']\n assert var_type in ['fixedlarge', 'fixedsmall']\n super().__init__()\n\n self.model = model\n self.T = T\n self.img_size = img_size\n self.mean_type = mean_type\n self.var_type = var_type\n\n self.register_buffer(\n 'betas', torch.linspace(beta_1, beta_T, T).double())\n alphas = 1. - self.betas\n alphas_bar = torch.cumprod(alphas, dim=0)\n alphas_bar_prev = F.pad(alphas_bar, [1, 0], value=1)[:T]\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n 'sqrt_recip_alphas_bar', torch.sqrt(1. / alphas_bar))\n self.register_buffer(\n 'sqrt_recipm1_alphas_bar', torch.sqrt(1. / alphas_bar - 1))\n\n # calculations for posterior q(x_{t-1} | x_t, x_0)\n self.register_buffer(\n 'posterior_var',\n self.betas * (1. - alphas_bar_prev) / (1. - alphas_bar))\n # below: log calculation clipped because the posterior variance is 0 at\n # the beginning of the diffusion chain\n self.register_buffer(\n 'posterior_log_var_clipped',\n torch.log(\n torch.cat([self.posterior_var[1:2], self.posterior_var[1:]])))\n self.register_buffer(\n 'posterior_mean_coef1',\n torch.sqrt(alphas_bar_prev) * self.betas / (1. - alphas_bar))\n self.register_buffer(\n 'posterior_mean_coef2',\n torch.sqrt(alphas) * (1. - alphas_bar_prev) / (1. - alphas_bar))\n\n def q_mean_variance(self, x_0, x_t, t):\n \"\"\"\n Compute the mean and variance of the diffusion posterior\n q(x_{t-1} | x_t, x_0)\n \"\"\"\n assert x_0.shape == x_t.shape\n posterior_mean = (\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_0 +\n extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_log_var_clipped = extract(\n self.posterior_log_var_clipped, t, x_t.shape)\n return posterior_mean, posterior_log_var_clipped\n\n def predict_xstart_from_eps(self, x_t, t, eps):\n assert x_t.shape == eps.shape\n return (\n extract(self.sqrt_recip_alphas_bar, t, x_t.shape) * x_t -\n extract(self.sqrt_recipm1_alphas_bar, t, x_t.shape) * eps\n )\n\n def predict_xstart_from_xprev(self, x_t, t, xprev):\n assert x_t.shape == xprev.shape\n return ( # (xprev - coef2*x_t) / coef1\n extract(\n 1. / self.posterior_mean_coef1, t, x_t.shape) * xprev -\n extract(\n self.posterior_mean_coef2 / self.posterior_mean_coef1, t,\n x_t.shape) * x_t\n )\n\n def p_mean_variance(self, x_t, t):\n # below: only log_variance is used in the KL computations\n model_log_var = {\n # for fixedlarge, we set the initial (log-)variance like so to\n # get a better decoder log likelihood\n 'fixedlarge': torch.log(torch.cat([self.posterior_var[1:2],\n self.betas[1:]])),\n 'fixedsmall': self.posterior_log_var_clipped,\n }[self.var_type]\n model_log_var = extract(model_log_var, t, x_t.shape)\n\n # Mean parameterization\n if self.mean_type == 'xprev': # the model predicts x_{t-1}\n x_prev = self.model(x_t, t)\n x_0 = self.predict_xstart_from_xprev(x_t, t, xprev=x_prev)\n model_mean = x_prev\n elif self.mean_type == 'xstart': # the model predicts x_0\n x_0 = self.model(x_t, t)\n model_mean, _ = self.q_mean_variance(x_0, x_t, t)\n elif self.mean_type == 'epsilon': # the model predicts epsilon\n eps = self.model(x_t, t)\n x_0 = self.predict_xstart_from_eps(x_t, t, eps=eps)\n model_mean, _ = self.q_mean_variance(x_0, x_t, t)\n else:\n raise NotImplementedError(self.mean_type)\n x_0 = torch.clip(x_0, -1., 1.)\n\n return model_mean, model_log_var\n\n def forward(self, x_T):\n \"\"\"\n Algorithm 2.\n \"\"\"\n x_t = x_T\n for time_step in reversed(range(self.T)):\n t = x_t.new_ones([x_T.shape[0], ], dtype=torch.long) * time_step\n mean, log_var = self.p_mean_variance(x_t=x_t, t=t)\n # no noise when t == 0\n if time_step > 0:\n noise = torch.randn_like(x_t)\n else:\n noise = 0\n x_t = mean + torch.exp(0.5 * log_var) * noise\n x_0 = x_t\n return torch.clip(x_0, -1, 1)"
},
{
"identifier": "UNet",
"path": "model.py",
"snippet": "class UNet(nn.Module):\n def __init__(self, T, ch, ch_mult, attn, num_res_blocks, dropout):\n super().__init__()\n assert all([i < len(ch_mult) for i in attn]), 'attn index out of bound'\n tdim = ch * 4\n self.time_embedding = TimeEmbedding(T, ch, tdim)\n\n self.head = nn.Conv2d(3, ch, kernel_size=3, stride=1, padding=1)\n self.downblocks = nn.ModuleList()\n chs = [ch] # record output channel when dowmsample for upsample\n now_ch = ch\n for i, mult in enumerate(ch_mult):\n out_ch = ch * mult\n for _ in range(num_res_blocks):\n self.downblocks.append(ResBlock(\n in_ch=now_ch, out_ch=out_ch, tdim=tdim,\n dropout=dropout, attn=(i in attn)))\n now_ch = out_ch\n chs.append(now_ch)\n if i != len(ch_mult) - 1:\n self.downblocks.append(DownSample(now_ch))\n chs.append(now_ch)\n\n self.middleblocks = nn.ModuleList([\n ResBlock(now_ch, now_ch, tdim, dropout, attn=True),\n ResBlock(now_ch, now_ch, tdim, dropout, attn=False),\n ])\n\n self.upblocks = nn.ModuleList()\n for i, mult in reversed(list(enumerate(ch_mult))):\n out_ch = ch * mult\n for _ in range(num_res_blocks + 1):\n self.upblocks.append(ResBlock(\n in_ch=chs.pop() + now_ch, out_ch=out_ch, tdim=tdim,\n dropout=dropout, attn=(i in attn)))\n now_ch = out_ch\n if i != 0:\n self.upblocks.append(UpSample(now_ch))\n assert len(chs) == 0\n\n self.tail = nn.Sequential(\n nn.GroupNorm(16, now_ch),\n Swish(),\n nn.Conv2d(now_ch, 3, 3, stride=1, padding=1)\n )\n self.initialize()\n\n def initialize(self):\n init.xavier_uniform_(self.head.weight)\n init.zeros_(self.head.bias)\n init.xavier_uniform_(self.tail[-1].weight, gain=1e-5)\n init.zeros_(self.tail[-1].bias)\n\n def forward(self, x, t):\n # Timestep embedding\n temb = self.time_embedding(t)\n # Downsampling\n h = self.head(x)\n hs = [h]\n for layer in self.downblocks:\n h = layer(h, temb)\n hs.append(h)\n # Middle\n for layer in self.middleblocks:\n h = layer(h, temb)\n # Upsampling\n for layer in self.upblocks:\n if isinstance(layer, ResBlock):\n h = torch.cat([h, hs.pop()], dim=1)\n h = layer(h, temb)\n h = self.tail(h)\n\n assert len(hs) == 0\n return h"
},
{
"identifier": "EnsembleUNet",
"path": "model.py",
"snippet": "class EnsembleUNet(nn.Module):\n def __init__(self, T, large_ch, small_ch, ch_mult, attn, num_res_blocks, dropout, start, end):\n super().__init__()\n self.start = start\n self.end = end\n self.large_model = UNet(\n T=T, ch=large_ch, ch_mult=ch_mult, attn=attn,\n num_res_blocks=num_res_blocks, dropout=dropout)\n self.small_model = UNet(\n T=T, ch=small_ch, ch_mult=ch_mult, attn=attn,\n num_res_blocks=num_res_blocks, dropout=dropout)\n\n def forward(self, x, t):\n timesteps = t[0]\n if timesteps > self.end and timesteps < self.start:\n return self.small_model(x, t)\n else:\n return self.large_model(x, t)"
},
{
"identifier": "SlimmableUNet",
"path": "slimmable_model.py",
"snippet": "class SlimmableUNet(nn.Module):\n def __init__(self, T, ch, ch_mult, attn, num_res_blocks, dropout):\n super().__init__()\n assert all([i < len(ch_mult) for i in attn]), 'attn index out of bound'\n tdim = ch * 4\n self.time_embedding = TimeEmbedding(T, ch, tdim)\n\n self.head = SlimmableConv2d(3, ch, kernel_size=3, stride=1, padding=1)\n self.downblocks = nn.ModuleList()\n chs = [ch] # record output channel when dowmsample for upsample\n now_ch = ch\n for i, mult in enumerate(ch_mult):\n out_ch = ch * mult\n for _ in range(num_res_blocks):\n self.downblocks.append(ResBlock(\n in_ch=now_ch, out_ch=out_ch, tdim=tdim,\n dropout=dropout, attn=(i in attn)))\n now_ch = out_ch\n chs.append(now_ch)\n if i != len(ch_mult) - 1:\n self.downblocks.append(DownSample(now_ch))\n chs.append(now_ch)\n\n self.middleblocks = nn.ModuleList([\n ResBlock(now_ch, now_ch, tdim, dropout, attn=True),\n ResBlock(now_ch, now_ch, tdim, dropout, attn=False),\n ])\n\n self.upblocks = nn.ModuleList()\n for i, mult in reversed(list(enumerate(ch_mult))):\n out_ch = ch * mult\n for _ in range(num_res_blocks + 1):\n self.upblocks.append(ResBlock(\n in_ch=chs.pop() + now_ch, out_ch=out_ch, tdim=tdim,\n dropout=dropout, attn=(i in attn)))\n now_ch = out_ch\n if i != 0:\n self.upblocks.append(UpSample(now_ch))\n assert len(chs) == 0\n\n self.tail = nn.Sequential(\n SlimmableGroupNorm2d(32, now_ch),\n Swish(),\n SlimmableConv2d(now_ch, 3, 3, stride=1, padding=1, slimmable=False)\n )\n self.initialize()\n\n def initialize(self):\n init.xavier_uniform_(self.head.weight)\n init.zeros_(self.head.bias)\n init.xavier_uniform_(self.tail[-1].weight, gain=1e-5)\n init.zeros_(self.tail[-1].bias)\n\n def forward(self, x, t):\n # Timestep embedding\n temb = self.time_embedding(t)\n # Downsampling\n h = self.head(x)\n hs = [h]\n for layer in self.downblocks:\n h = layer(h, temb)\n hs.append(h)\n # Middle\n for layer in self.middleblocks:\n h = layer(h, temb)\n # Upsampling\n for layer in self.upblocks:\n if isinstance(layer, ResBlock):\n h = torch.cat([h, hs.pop()], dim=1)\n h = layer(h, temb)\n h = self.tail(h)\n\n assert len(hs) == 0\n return h"
},
{
"identifier": "StepAwareUNet",
"path": "slimmable_model.py",
"snippet": "class StepAwareUNet(SlimmableUNet):\n def __init__(self, T, ch, ch_mult, attn, num_res_blocks, dropout, strategy):\n super().__init__(T, ch, ch_mult, attn, num_res_blocks, dropout)\n self.strategy = strategy\n\n def forward(self, x, t):\n width = self.strategy[int(t[0])]\n # print('apply {} for t{}'.format(width, t[0]))\n self.apply(lambda m: setattr(m, 'width_mult', width))\n return super().forward(x, t)"
},
{
"identifier": "Slimmable16UNet",
"path": "slimmable_model_g16.py",
"snippet": "class Slimmable16UNet(nn.Module):\n def __init__(self, T, ch, ch_mult, attn, num_res_blocks, dropout):\n super().__init__()\n assert all([i < len(ch_mult) for i in attn]), 'attn index out of bound'\n tdim = ch * 4\n self.time_embedding = TimeEmbedding(T, ch, tdim)\n\n self.head = SlimmableConv2d(3, ch, kernel_size=3, stride=1, padding=1)\n self.downblocks = nn.ModuleList()\n chs = [ch] # record output channel when dowmsample for upsample\n now_ch = ch\n for i, mult in enumerate(ch_mult):\n out_ch = ch * mult\n for _ in range(num_res_blocks):\n self.downblocks.append(ResBlock(\n in_ch=now_ch, out_ch=out_ch, tdim=tdim,\n dropout=dropout, attn=(i in attn)))\n now_ch = out_ch\n chs.append(now_ch)\n if i != len(ch_mult) - 1:\n self.downblocks.append(DownSample(now_ch))\n chs.append(now_ch)\n\n self.middleblocks = nn.ModuleList([\n ResBlock(now_ch, now_ch, tdim, dropout, attn=True),\n ResBlock(now_ch, now_ch, tdim, dropout, attn=False),\n ])\n\n self.upblocks = nn.ModuleList()\n for i, mult in reversed(list(enumerate(ch_mult))):\n out_ch = ch * mult\n for _ in range(num_res_blocks + 1):\n self.upblocks.append(ResBlock(\n in_ch=chs.pop() + now_ch, out_ch=out_ch, tdim=tdim,\n dropout=dropout, attn=(i in attn)))\n now_ch = out_ch\n if i != 0:\n self.upblocks.append(UpSample(now_ch))\n assert len(chs) == 0\n\n self.tail = nn.Sequential(\n SlimmableGroupNorm2d(16, now_ch),\n Swish(),\n SlimmableConv2d(now_ch, 3, 3, stride=1, padding=1, slimmable=False)\n )\n self.initialize()\n\n def initialize(self):\n init.xavier_uniform_(self.head.weight)\n init.zeros_(self.head.bias)\n init.xavier_uniform_(self.tail[-1].weight, gain=1e-5)\n init.zeros_(self.tail[-1].bias)\n\n def forward(self, x, t):\n # Timestep embedding\n temb = self.time_embedding(t)\n # Downsampling\n h = self.head(x)\n hs = [h]\n for layer in self.downblocks:\n h = layer(h, temb)\n hs.append(h)\n # Middle\n for layer in self.middleblocks:\n h = layer(h, temb)\n # Upsampling\n for layer in self.upblocks:\n if isinstance(layer, ResBlock):\n h = torch.cat([h, hs.pop()], dim=1)\n h = layer(h, temb)\n h = self.tail(h)\n\n assert len(hs) == 0\n return h"
},
{
"identifier": "StepAware16UNet",
"path": "slimmable_model_g16.py",
"snippet": "class StepAware16UNet(Slimmable16UNet):\n def __init__(self, T, ch, ch_mult, attn, num_res_blocks, dropout, strategy):\n super().__init__(T, ch, ch_mult, attn, num_res_blocks, dropout)\n self.strategy = strategy\n\n def forward(self, x, t):\n width = self.strategy[int(t[0])]\n # print('apply {} for t{}'.format(width, t[0]))\n self.apply(lambda m: setattr(m, 'width_mult', width))\n return super().forward(x, t)"
},
{
"identifier": "get_inception_and_fid_score",
"path": "score/both.py",
"snippet": "def get_inception_and_fid_score(images, fid_cache, num_images=None,\n splits=10, batch_size=50,\n use_torch=False,\n verbose=False,\n parallel=False):\n \"\"\"when `images` is a python generator, `num_images` should be given\"\"\"\n\n if num_images is None and isinstance(images, types.GeneratorType):\n raise ValueError(\n \"when `images` is a python generator, \"\n \"`num_images` should be given\")\n\n if num_images is None:\n num_images = len(images)\n\n block_idx1 = InceptionV3.BLOCK_INDEX_BY_DIM[2048]\n block_idx2 = InceptionV3.BLOCK_INDEX_BY_DIM['prob']\n model = InceptionV3([block_idx1, block_idx2]).to(device)\n model.eval()\n\n if parallel:\n model = torch.nn.DataParallel(model)\n\n if use_torch:\n fid_acts = torch.empty((num_images, 2048)).to(device)\n is_probs = torch.empty((num_images, 1008)).to(device)\n else:\n fid_acts = np.empty((num_images, 2048))\n is_probs = np.empty((num_images, 1008))\n\n iterator = iter(tqdm(\n images, total=num_images,\n dynamic_ncols=True, leave=False, disable=not verbose,\n desc=\"get_inception_and_fid_score\"))\n start = 0\n while True:\n batch_images = []\n # get a batch of images from iterator\n try:\n for _ in range(batch_size):\n batch_images.append(next(iterator))\n except StopIteration:\n if len(batch_images) == 0:\n break\n pass\n batch_images = np.stack(batch_images, axis=0)\n end = start + len(batch_images)\n\n # calculate inception feature\n batch_images = torch.from_numpy(batch_images).type(torch.FloatTensor)\n batch_images = batch_images.to(device)\n with torch.no_grad():\n pred = model(batch_images)\n if use_torch:\n fid_acts[start: end] = pred[0].view(-1, 2048)\n is_probs[start: end] = pred[1]\n else:\n fid_acts[start: end] = pred[0].view(-1, 2048).cpu().numpy()\n is_probs[start: end] = pred[1].cpu().numpy()\n start = end\n # Inception Score\n scores = []\n for i in range(splits):\n part = is_probs[\n (i * is_probs.shape[0] // splits):\n ((i + 1) * is_probs.shape[0] // splits), :]\n if use_torch:\n kl = part * (\n torch.log(part) -\n torch.log(torch.unsqueeze(torch.mean(part, 0), 0)))\n kl = torch.mean(torch.sum(kl, 1))\n scores.append(torch.exp(kl))\n else:\n kl = part * (\n np.log(part) -\n np.log(np.expand_dims(np.mean(part, 0), 0)))\n kl = np.mean(np.sum(kl, 1))\n scores.append(np.exp(kl))\n if use_torch:\n scores = torch.stack(scores)\n is_score = (torch.mean(scores).cpu().item(),\n torch.std(scores).cpu().item())\n else:\n is_score = (np.mean(scores), np.std(scores))\n\n # FID Score\n f = np.load(fid_cache)\n m2, s2 = f['mu'][:], f['sigma'][:]\n f.close()\n if use_torch:\n m1 = torch.mean(fid_acts, axis=0)\n s1 = torch_cov(fid_acts, rowvar=False)\n m2 = torch.tensor(m2).to(m1.dtype).to(device)\n s2 = torch.tensor(s2).to(s1.dtype).to(device)\n else:\n m1 = np.mean(fid_acts, axis=0)\n s1 = np.cov(fid_acts, rowvar=False)\n fid_score = calculate_frechet_distance(m1, s1, m2, s2, use_torch=use_torch)\n\n del fid_acts, is_probs, scores, model\n return is_score, fid_score"
},
{
"identifier": "get_fid_score",
"path": "score/both.py",
"snippet": "def get_fid_score(images, fid_cache, num_images=None,\n splits=10, batch_size=50,\n use_torch=False,\n verbose=False,\n parallel=False):\n \"\"\"when `images` is a python generator, `num_images` should be given\"\"\"\n\n if num_images is None and isinstance(images, types.GeneratorType):\n raise ValueError(\n \"when `images` is a python generator, \"\n \"`num_images` should be given\")\n\n if num_images is None:\n num_images = len(images)\n\n block_idx1 = InceptionV3.BLOCK_INDEX_BY_DIM[2048]\n block_idx2 = InceptionV3.BLOCK_INDEX_BY_DIM['prob']\n model = InceptionV3([block_idx1, block_idx2]).to(device)\n model.eval()\n\n if parallel:\n model = torch.nn.DataParallel(model)\n\n if use_torch:\n fid_acts = torch.empty((num_images, 2048)).to(device)\n is_probs = torch.empty((num_images, 1008)).to(device)\n else:\n fid_acts = np.empty((num_images, 2048))\n is_probs = np.empty((num_images, 1008))\n\n iterator = iter(tqdm(\n images, total=num_images,\n dynamic_ncols=True, leave=False, disable=not verbose,\n desc=\"get_inception_and_fid_score\"))\n\n start = 0\n while True:\n batch_images = []\n # get a batch of images from iterator\n try:\n for _ in range(batch_size):\n ti = time.time()\n batch_images.append(next(iterator))\n print(time.time() - ti)\n except StopIteration:\n if len(batch_images) == 0:\n break\n pass\n batch_images = np.stack(batch_images, axis=0)\n end = start + len(batch_images)\n\n # calculate inception feature\n batch_images = torch.from_numpy(batch_images).type(torch.FloatTensor)\n batch_images = batch_images.to(device)\n with torch.no_grad():\n pred = model(batch_images)\n if use_torch:\n fid_acts[start: end] = pred[0].view(-1, 2048)\n is_probs[start: end] = pred[1]\n else:\n fid_acts[start: end] = pred[0].view(-1, 2048).cpu().numpy()\n is_probs[start: end] = pred[1].cpu().numpy()\n start = end\n # FID Score\n f = np.load(fid_cache)\n m2, s2 = f['mu'][:], f['sigma'][:]\n f.close()\n if use_torch:\n m1 = torch.mean(fid_acts, axis=0)\n s1 = torch_cov(fid_acts, rowvar=False)\n m2 = torch.tensor(m2).to(m1.dtype).to(device)\n s2 = torch.tensor(s2).to(s1.dtype).to(device)\n else:\n m1 = np.mean(fid_acts, axis=0)\n s1 = np.cov(fid_acts, rowvar=False)\n fid_score = calculate_frechet_distance(m1, s1, m2, s2, use_torch=use_torch)\n\n del fid_acts, is_probs, model\n return fid_score"
}
] | import copy
import json
import os
import random
import warnings
import time
import torch
import numpy as np
from absl import app, flags
from tensorboardX import SummaryWriter
from torchvision.datasets import CIFAR10
from torchvision.utils import make_grid, save_image
from torchvision import transforms
from tqdm import trange
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.core.problem import Problem
from pymoo.optimize import minimize
from pymoo.core.mutation import Mutation
from pymoo.operators.sampling.rnd import FloatRandomSampling
from pymoo.operators.sampling.rnd import IntegerRandomSampling
from pymoo.operators.mutation.pm import PolynomialMutation
from pymoo.operators.crossover.pntx import SinglePointCrossover
from profile import model_profiling
from ptflops import get_model_complexity_info
from diffusion import GaussianDiffusionTrainer, GaussianDiffusionSampler
from model import UNet, EnsembleUNet
from slimmable_model import SlimmableUNet, StepAwareUNet
from slimmable_model_g16 import Slimmable16UNet, StepAware16UNet
from score.both import get_inception_and_fid_score, get_fid_score | 10,154 | net_model.eval()
with torch.no_grad():
x_0 = ema_sampler(x_T)
grid = (make_grid(x_0) + 1) / 2
path = os.path.join(
FLAGS.logdir, 'sample', '%d.png' % step)
save_image(grid, path)
writer.add_image('sample', grid, step)
net_model.train()
# save
if FLAGS.save_step > 0 and step % FLAGS.save_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'sched': sched.state_dict(),
'optim': optim.state_dict(),
'step': step,
'x_T': x_T,
}
torch.save(ckpt, os.path.join(FLAGS.logdir, 'ckpt.pt'))
torch.save(ckpt, os.path.join(FLAGS.logdir, 'ckpt_{}.pt'.format(step)))
# evaluate
if FLAGS.eval_step > 0 and step % FLAGS.eval_step == 0:
net_IS, net_FID, _ = evaluate(net_sampler, net_model)
ema_IS, ema_FID, _ = evaluate(ema_sampler, ema_model)
metrics = {
'IS': net_IS[0],
'IS_std': net_IS[1],
'FID': net_FID,
'IS_EMA': ema_IS[0],
'IS_std_EMA': ema_IS[1],
'FID_EMA': ema_FID
}
pbar.write(
"%d/%d " % (step, FLAGS.total_steps) +
", ".join('%s:%.3f' % (k, v) for k, v in metrics.items()))
for name, value in metrics.items():
writer.add_scalar(name, value, step)
writer.flush()
with open(os.path.join(FLAGS.logdir, 'eval.txt'), 'a') as f:
metrics['step'] = step
f.write(json.dumps(metrics) + "\n")
writer.close()
def Eval():
# model setup
if FLAGS.slimmable_unet:
model = SlimmableUNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout)
model.apply(lambda m: setattr(m, 'width_mult', FLAGS.assigned_width))
else:
model = UNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout)
sampler = GaussianDiffusionSampler(
model, FLAGS.beta_1, FLAGS.beta_T, FLAGS.T, img_size=FLAGS.img_size,
mean_type=FLAGS.mean_type, var_type=FLAGS.var_type).to(device)
if FLAGS.parallel:
sampler = torch.nn.DataParallel(sampler)
# load model and evaluate
ckpt = torch.load(os.path.join(FLAGS.logdir, '{}.pt'.format(FLAGS.ckpt_name)))
# model.load_state_dict(ckpt['net_model'])
# (IS, IS_std), FID, samples = evaluate(sampler, model)
# print("Model : IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
# save_image(
# torch.tensor(samples[:256]),
# os.path.join(FLAGS.logdir, 'samples.png'),
# nrow=16)
model.load_state_dict(ckpt['ema_model'])
(IS, IS_std), FID, samples = evaluate(sampler, model)
if FLAGS.slimmable_unet:
print('width: {}'.format(int(FLAGS.assigned_width * FLAGS.ch)))
print("Model(EMA): IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
save_image(
torch.tensor(samples[:256]),
os.path.join(FLAGS.logdir, 'samples_ema.png'),
nrow=16)
def eval_stepaware():
# model setup
with open(os.path.join(FLAGS.logdir, 'search.txt'), 'r') as f:
strategy = eval(f.readlines()[0])
model = StepAwareUNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout, strategy=strategy)
sampler = GaussianDiffusionSampler(
model, FLAGS.beta_1, FLAGS.beta_T, FLAGS.T, img_size=FLAGS.img_size,
mean_type=FLAGS.mean_type, var_type=FLAGS.var_type).to(device)
if FLAGS.parallel:
sampler = torch.nn.DataParallel(sampler)
# load model and evaluate
ckpt = torch.load(os.path.join(FLAGS.logdir, '{}.pt'.format(FLAGS.ckpt_name)))
# model.load_state_dict(ckpt['net_model'])
# (IS, IS_std), FID, samples = evaluate(sampler, model)
# print("Model : IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
# save_image(
# torch.tensor(samples[:256]),
# os.path.join(FLAGS.logdir, 'samples.png'),
# nrow=16)
model.load_state_dict(ckpt['ema_model'])
(IS, IS_std), FID, samples = evaluate(sampler, model)
print(strategy)
print("Model(EMA): IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
save_image(
torch.tensor(samples[:256]),
os.path.join(FLAGS.logdir, 'samples_ema.png'),
nrow=16)
def eval_ensemble():
os.makedirs(os.path.join(FLAGS.logdir, 'sample'))
# model setup
|
FLAGS = flags.FLAGS
flags.DEFINE_bool('train', False, help='train from scratch')
flags.DEFINE_bool('eval', False, help='load ckpt.pt and evaluate FID and IS')
flags.DEFINE_bool('eval_stepaware', False, help='load ckpt.pt and evaluate FID and IS')
# UNet
flags.DEFINE_integer('ch', 128, help='base channel of UNet')
flags.DEFINE_multi_integer('ch_mult', [1, 2, 2, 2], help='channel multiplier')
flags.DEFINE_multi_integer('attn', [1], help='add attention to these levels')
flags.DEFINE_integer('num_res_blocks', 2, help='# resblock in each level')
flags.DEFINE_float('dropout', 0.1, help='dropout rate of resblock')
# Gaussian Diffusion
flags.DEFINE_float('beta_1', 1e-4, help='start beta value')
flags.DEFINE_float('beta_T', 0.02, help='end beta value')
flags.DEFINE_integer('T', 1000, help='total diffusion steps')
flags.DEFINE_enum('mean_type', 'epsilon', ['xprev', 'xstart', 'epsilon'], help='predict variable')
flags.DEFINE_enum('var_type', 'fixedlarge', ['fixedlarge', 'fixedsmall'], help='variance type')
# Training
flags.DEFINE_float('lr', 2e-4, help='target learning rate')
flags.DEFINE_float('grad_clip', 1., help="gradient norm clipping")
flags.DEFINE_integer('total_steps', 800000, help='total training steps')
flags.DEFINE_integer('img_size', 32, help='image size')
flags.DEFINE_integer('warmup', 5000, help='learning rate warmup')
flags.DEFINE_integer('batch_size', 128, help='batch size')
flags.DEFINE_integer('num_workers', 4, help='workers of Dataloader')
flags.DEFINE_float('ema_decay', 0.9999, help="ema decay rate")
flags.DEFINE_bool('parallel', False, help='multi gpu training')
# Logging & Sampling
flags.DEFINE_string('logdir', './logs/DDPM_CIFAR10_EPS', help='log directory')
flags.DEFINE_integer('sample_size', 64, "sampling size of images")
flags.DEFINE_integer('sample_step', 1000, help='frequency of sampling')
# Evaluation
flags.DEFINE_integer('save_step', 5000, help='frequency of saving checkpoints, 0 to disable during training')
flags.DEFINE_integer('eval_step', 0, help='frequency of evaluating model, 0 to disable during training')
flags.DEFINE_integer('num_images', 50000, help='the number of generated images for evaluation')
flags.DEFINE_bool('fid_use_torch', False, help='calculate IS and FID on gpu')
flags.DEFINE_string('fid_cache', './stats/cifar10.train.npz', help='FID cache')
flags.DEFINE_string('ckpt_name', 'ckpt', help='ckpt name')
# slimmable
flags.DEFINE_bool('slimmable_unet', False, help='use slimmable unet')
flags.DEFINE_bool('slimmable_g16', False, help='g16 slimmable unet')
flags.DEFINE_bool('sandwich', False, help='use sandiwch training')
flags.DEFINE_float('min_width', 0.25, help="min_width")
flags.DEFINE_integer('num_sandwich_sampling', 3, help='the number of sandwich training samples')
flags.DEFINE_multi_float('candidate_width', [0.75, 0.5], help='candidate_width')
flags.DEFINE_float('assigned_width', 1.0, help="assigned_width")
# ensemble
flags.DEFINE_bool('eval_ensemble', False, help='eval ensemble model')
flags.DEFINE_string('large_logdir', './logs/DDPM_CIFAR10_EPS', help='large model log directory')
flags.DEFINE_string('small_logdir', './logs/DDPM_CIFAR10_EPS', help='small model log directory')
flags.DEFINE_integer('small_ch', 64, help='channel of small model')
flags.DEFINE_integer('start', 200, help='the start step of small model')
flags.DEFINE_integer('end', 0, help='the end step of small model')
# search
flags.DEFINE_bool('search', False, help='search model')
flags.DEFINE_integer('num_generation', 1000, help='the number of generation')
flags.DEFINE_integer('pop_size', 10, help='the size of population')
flags.DEFINE_float('fid_weight', 0.5, help="fid_weight")
flags.DEFINE_float('macs_weight', 0.001, help="macs_weight")
flags.DEFINE_float('mutation_prob', 0.001, help="mutation_prob")
flags.DEFINE_bool('random_init', False, help='search model')
# profile
flags.DEFINE_bool('profile', False, help='profile model')
device = torch.device('cuda:0')
def ema(source, target, decay):
source_dict = source.state_dict()
target_dict = target.state_dict()
for key in source_dict.keys():
target_dict[key].data.copy_(
target_dict[key].data * decay +
source_dict[key].data * (1 - decay))
def infiniteloop(dataloader):
while True:
for x, y in iter(dataloader):
yield x
def warmup_lr(step):
return min(step, FLAGS.warmup) / FLAGS.warmup
def evaluate(sampler, model, fid_only=False):
model.eval()
with torch.no_grad():
images = []
desc = "generating images"
for i in trange(0, FLAGS.num_images, FLAGS.batch_size, desc=desc):
batch_size = min(FLAGS.batch_size, FLAGS.num_images - i)
x_T = torch.randn((batch_size, 3, FLAGS.img_size, FLAGS.img_size))
ti = time.time()
batch_images = sampler(x_T.to(device)).cpu()
print(str(time.time() - ti))
with open('time.txt', 'w') as f:
f.write(str(time.time() - ti))
images.append((batch_images + 1) / 2)
images = torch.cat(images, dim=0).numpy()
model.train()
if fid_only:
FID = get_fid_score(
images, FLAGS.fid_cache, num_images=FLAGS.num_images,
use_torch=FLAGS.fid_use_torch, verbose=True)
return FID
else:
(IS, IS_std), FID = get_inception_and_fid_score(
images, FLAGS.fid_cache, num_images=FLAGS.num_images,
use_torch=FLAGS.fid_use_torch, verbose=True)
return (IS, IS_std), FID, images
def train():
# dataset
dataset = CIFAR10(
root='./data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=FLAGS.batch_size, shuffle=True,
num_workers=FLAGS.num_workers, drop_last=True)
datalooper = infiniteloop(dataloader)
# model setup
if FLAGS.slimmable_unet:
if FLAGS.slimmable_g16:
net_model = Slimmable16UNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout)
else:
net_model = SlimmableUNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout)
else:
net_model = UNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout)
ema_model = copy.deepcopy(net_model)
optim = torch.optim.Adam(net_model.parameters(), lr=FLAGS.lr)
sched = torch.optim.lr_scheduler.LambdaLR(optim, lr_lambda=warmup_lr)
trainer = GaussianDiffusionTrainer(
net_model, FLAGS.beta_1, FLAGS.beta_T, FLAGS.T).to(device)
net_sampler = GaussianDiffusionSampler(
net_model, FLAGS.beta_1, FLAGS.beta_T, FLAGS.T, FLAGS.img_size,
FLAGS.mean_type, FLAGS.var_type).to(device)
ema_sampler = GaussianDiffusionSampler(
ema_model, FLAGS.beta_1, FLAGS.beta_T, FLAGS.T, FLAGS.img_size,
FLAGS.mean_type, FLAGS.var_type).to(device)
if FLAGS.parallel:
trainer = torch.nn.DataParallel(trainer)
net_sampler = torch.nn.DataParallel(net_sampler)
ema_sampler = torch.nn.DataParallel(ema_sampler)
# log setup
os.makedirs(os.path.join(FLAGS.logdir, 'sample'), exist_ok=True)
if FLAGS.sandwich:
os.makedirs(os.path.join(FLAGS.logdir, 'supernet_sample'))
os.makedirs(os.path.join(FLAGS.logdir, 'minnet_sample'))
x_T = torch.randn(FLAGS.sample_size, 3, FLAGS.img_size, FLAGS.img_size)
x_T = x_T.to(device)
grid = (make_grid(next(iter(dataloader))[0][:FLAGS.sample_size]) + 1) / 2
writer = SummaryWriter(FLAGS.logdir)
writer.add_image('real_sample', grid)
writer.flush()
# backup all arguments
with open(os.path.join(FLAGS.logdir, "flagfile.txt"), 'w') as f:
f.write(FLAGS.flags_into_string())
# show model size
model_size = 0
for param in net_model.parameters():
model_size += param.data.nelement()
print('Model params: %.2f M' % (model_size / 1024 / 1024))
# start training
with trange(FLAGS.total_steps, dynamic_ncols=True) as pbar:
for step in pbar:
if FLAGS.sandwich:
if FLAGS.parallel:
assert isinstance(trainer.module.model, SlimmableUNet)
else:
assert isinstance(trainer.model, SlimmableUNet)
optim.zero_grad()
x_0 = next(datalooper).to(device)
# supernet
if FLAGS.parallel:
trainer.module.model.apply(lambda m: setattr(m, 'width_mult', 1.0))
else:
trainer.model.apply(lambda m: setattr(m, 'width_mult', 1.0))
supernet_loss = trainer(x_0).mean()
supernet_loss.backward()
# minnet
if FLAGS.parallel:
trainer.module.model.apply(lambda m: setattr(m, 'width_mult', FLAGS.min_width))
else:
trainer.model.apply(lambda m: setattr(m, 'width_mult', FLAGS.min_width))
minnet_loss = trainer(x_0).mean()
minnet_loss.backward()
# midnet
for i in range(FLAGS.num_sandwich_sampling-2):
mid_width = random.choice(FLAGS.candidate_width)
if FLAGS.parallel:
trainer.module.model.apply(lambda m: setattr(m, 'width_mult', mid_width))
else:
trainer.model.apply(lambda m: setattr(m, 'width_mult', mid_width))
loss = trainer(x_0).mean()
loss.backward()
# optim
torch.nn.utils.clip_grad_norm_(
net_model.parameters(), FLAGS.grad_clip)
optim.step()
sched.step()
ema(net_model, ema_model, FLAGS.ema_decay)
if FLAGS.parallel:
trainer.module.model.apply(lambda m: setattr(m, 'width_mult', 1.0))
else:
trainer.model.apply(lambda m: setattr(m, 'width_mult', 1.0))
# log
writer.add_scalar('supernet_loss', supernet_loss, step)
writer.add_scalar('minnet_loss', minnet_loss, step)
pbar.set_postfix(supernet_loss='%.3f' % supernet_loss, minnet_loss='%.3f' % minnet_loss)
# sample
if FLAGS.sample_step > 0 and step % FLAGS.sample_step == 0:
net_model.eval()
if FLAGS.parallel:
trainer.module.model.apply(lambda m: setattr(m, 'width_mult', 1.0))
else:
trainer.model.apply(lambda m: setattr(m, 'width_mult', 1.0))
with torch.no_grad():
x_0 = ema_sampler(x_T)
grid = (make_grid(x_0) + 1) / 2
path = os.path.join(
FLAGS.logdir, 'supernet_sample', '%d.png' % step)
save_image(grid, path)
writer.add_image('supernet_sample', grid, step)
if FLAGS.parallel:
trainer.module.model.apply(lambda m: setattr(m, 'width_mult', FLAGS.min_width))
else:
trainer.model.apply(lambda m: setattr(m, 'width_mult', FLAGS.min_width))
with torch.no_grad():
x_0 = ema_sampler(x_T)
grid = (make_grid(x_0) + 1) / 2
path = os.path.join(
FLAGS.logdir, 'minnet_sample', '%d.png' % step)
save_image(grid, path)
writer.add_image('minnet_sample', grid, step)
net_model.train()
else:
# train
optim.zero_grad()
x_0 = next(datalooper).to(device)
loss = trainer(x_0).mean()
loss.backward()
torch.nn.utils.clip_grad_norm_(
net_model.parameters(), FLAGS.grad_clip)
optim.step()
sched.step()
ema(net_model, ema_model, FLAGS.ema_decay)
# log
writer.add_scalar('loss', loss, step)
pbar.set_postfix(loss='%.3f' % loss)
# sample
if FLAGS.sample_step > 0 and step % FLAGS.sample_step == 0:
net_model.eval()
with torch.no_grad():
x_0 = ema_sampler(x_T)
grid = (make_grid(x_0) + 1) / 2
path = os.path.join(
FLAGS.logdir, 'sample', '%d.png' % step)
save_image(grid, path)
writer.add_image('sample', grid, step)
net_model.train()
# save
if FLAGS.save_step > 0 and step % FLAGS.save_step == 0:
ckpt = {
'net_model': net_model.state_dict(),
'ema_model': ema_model.state_dict(),
'sched': sched.state_dict(),
'optim': optim.state_dict(),
'step': step,
'x_T': x_T,
}
torch.save(ckpt, os.path.join(FLAGS.logdir, 'ckpt.pt'))
torch.save(ckpt, os.path.join(FLAGS.logdir, 'ckpt_{}.pt'.format(step)))
# evaluate
if FLAGS.eval_step > 0 and step % FLAGS.eval_step == 0:
net_IS, net_FID, _ = evaluate(net_sampler, net_model)
ema_IS, ema_FID, _ = evaluate(ema_sampler, ema_model)
metrics = {
'IS': net_IS[0],
'IS_std': net_IS[1],
'FID': net_FID,
'IS_EMA': ema_IS[0],
'IS_std_EMA': ema_IS[1],
'FID_EMA': ema_FID
}
pbar.write(
"%d/%d " % (step, FLAGS.total_steps) +
", ".join('%s:%.3f' % (k, v) for k, v in metrics.items()))
for name, value in metrics.items():
writer.add_scalar(name, value, step)
writer.flush()
with open(os.path.join(FLAGS.logdir, 'eval.txt'), 'a') as f:
metrics['step'] = step
f.write(json.dumps(metrics) + "\n")
writer.close()
def Eval():
# model setup
if FLAGS.slimmable_unet:
model = SlimmableUNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout)
model.apply(lambda m: setattr(m, 'width_mult', FLAGS.assigned_width))
else:
model = UNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout)
sampler = GaussianDiffusionSampler(
model, FLAGS.beta_1, FLAGS.beta_T, FLAGS.T, img_size=FLAGS.img_size,
mean_type=FLAGS.mean_type, var_type=FLAGS.var_type).to(device)
if FLAGS.parallel:
sampler = torch.nn.DataParallel(sampler)
# load model and evaluate
ckpt = torch.load(os.path.join(FLAGS.logdir, '{}.pt'.format(FLAGS.ckpt_name)))
# model.load_state_dict(ckpt['net_model'])
# (IS, IS_std), FID, samples = evaluate(sampler, model)
# print("Model : IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
# save_image(
# torch.tensor(samples[:256]),
# os.path.join(FLAGS.logdir, 'samples.png'),
# nrow=16)
model.load_state_dict(ckpt['ema_model'])
(IS, IS_std), FID, samples = evaluate(sampler, model)
if FLAGS.slimmable_unet:
print('width: {}'.format(int(FLAGS.assigned_width * FLAGS.ch)))
print("Model(EMA): IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
save_image(
torch.tensor(samples[:256]),
os.path.join(FLAGS.logdir, 'samples_ema.png'),
nrow=16)
def eval_stepaware():
# model setup
with open(os.path.join(FLAGS.logdir, 'search.txt'), 'r') as f:
strategy = eval(f.readlines()[0])
model = StepAwareUNet(
T=FLAGS.T, ch=FLAGS.ch, ch_mult=FLAGS.ch_mult, attn=FLAGS.attn,
num_res_blocks=FLAGS.num_res_blocks, dropout=FLAGS.dropout, strategy=strategy)
sampler = GaussianDiffusionSampler(
model, FLAGS.beta_1, FLAGS.beta_T, FLAGS.T, img_size=FLAGS.img_size,
mean_type=FLAGS.mean_type, var_type=FLAGS.var_type).to(device)
if FLAGS.parallel:
sampler = torch.nn.DataParallel(sampler)
# load model and evaluate
ckpt = torch.load(os.path.join(FLAGS.logdir, '{}.pt'.format(FLAGS.ckpt_name)))
# model.load_state_dict(ckpt['net_model'])
# (IS, IS_std), FID, samples = evaluate(sampler, model)
# print("Model : IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
# save_image(
# torch.tensor(samples[:256]),
# os.path.join(FLAGS.logdir, 'samples.png'),
# nrow=16)
model.load_state_dict(ckpt['ema_model'])
(IS, IS_std), FID, samples = evaluate(sampler, model)
print(strategy)
print("Model(EMA): IS:%6.3f(%.3f), FID:%7.3f" % (IS, IS_std, FID))
save_image(
torch.tensor(samples[:256]),
os.path.join(FLAGS.logdir, 'samples_ema.png'),
nrow=16)
def eval_ensemble():
os.makedirs(os.path.join(FLAGS.logdir, 'sample'))
# model setup | model = EnsembleUNet( | 4 | 2023-11-29 03:51:12+00:00 | 12k |
SEU-ProactiveSecurity-Group/MalPurifier | core/droidfeature/inverse_feature_extraction.py | [
{
"identifier": "Apk2features",
"path": "core/droidfeature/feature_extraction.py",
"snippet": "class Apk2features(object):\n \"\"\"Get features from an APK\"\"\"\n\n def __init__(self,\n naive_data_save_dir, # 用于保存中间数据的目录\n intermediate_save_dir, # 用于保存特征 pickle 文件的目录\n number_of_smali_files=1000000, # 处理的 smali 文件的最大数量,默认为 1000000。\n max_vocab_size=10000, # 词汇表的最大大小,默认为 10000\n file_ext='.feat', # 文件扩展名,默认为 '.feat'\n update=False, # 表示是否重新计算原始特征,默认为 False\n proc_number=2, # 进程数,默认为 2\n **kwargs \n ):\n \"\"\"\n initialization\n :param naive_data_save_dir: a directory for saving intermediates\n :param intermediate_save_dir: a directory for saving feature pickle files\n :param number_of_smali_files: the maximum number of smali files processed\n :param max_vocab_size: the maximum number of words\n :param file_ext: file extension\n :param update: boolean indicator for recomputing the naive features\n :param proc_number: process number\n \"\"\"\n self.naive_data_save_dir = naive_data_save_dir\n self.intermediate_save_dir = intermediate_save_dir\n self.maximum_vocab_size = max_vocab_size\n self.number_of_smali_files = number_of_smali_files\n\n self.file_ext = file_ext\n self.update = update\n self.proc_number = proc_number\n\n if len(kwargs) > 0:\n logger.warning(\"unused hyper parameters {}.\".format(kwargs))\n\n # 这段代码定义了 Apk2features 类的 feature_extraction 方法,\n # 用于从指定目录中的 APK 文件中提取特征并保存。方法返回提取特征后的文件路径。\n def feature_extraction(self, sample_dir):\n \"\"\" save the android features and return the saved paths \"\"\"\n sample_path_list = utils.check_dir(sample_dir)\n pool = multiprocessing.Pool(self.proc_number, initializer=utils.pool_initializer)\n\n # 定义一个名为 get_save_path 的内部函数,用于获取特征保存路径。\n # 它根据 APK 文件的 SHA256 编码和文件扩展名生成保存路径。\n # 如果该路径对应的文件已存在,并且不需要更新特征,则返回 None。否则,返回保存路径。\n def get_save_path(a_path):\n sha256_code = os.path.splitext(os.path.basename(a_path))[0] # utils.get_sha256(apk_path)\n save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)\n\n if os.path.exists(save_path) and (not self.update):\n return\n else:\n return save_path\n \n # 创建一个名为 params 的列表,包含需要提取特征的 APK 文件路径、处理的 smali 文件最大数量和特征保存路径。\n # 只有当 get_save_path 返回值不为 None 时,才将 APK 文件路径添加到 params 列表中。\n params = [(apk_path, self.number_of_smali_files, get_save_path(apk_path)) for \\\n apk_path in sample_path_list if get_save_path(apk_path) is not None]\n \n # 使用 pool.imap_unordered() 方法并行地对 params 中的每个元素执行 feature_gen.apk2feat_wrapper 函数。\n # 使用 tqdm 显示处理进度。如果处理过程中出现异常,使用 logger.error 输出错误信息。\n for res in tqdm(pool.imap_unordered(feature_gen.apk2feat_wrapper, params), total=len(params)):\n if isinstance(res, Exception):\n logger.error(\"Failed processing: {}\".format(str(res)))\n pool.close()\n pool.join()\n\n feature_paths = []\n \n # 遍历 sample_path_list,获取每个 APK 文件的特征保存路径。\n # 如果路径对应的文件存在,则将其添加到 feature_paths 列表中。\n for i, apk_path in enumerate(sample_path_list):\n sha256_code = os.path.splitext(os.path.basename(apk_path))[0] # utils.get_sha256(apk_path)\n save_path = os.path.join(self.naive_data_save_dir, sha256_code + self.file_ext)\n if os.path.exists(save_path):\n feature_paths.append(save_path)\n\n return feature_paths\n\n\n def get_vocab(self, feature_path_list=None, gt_labels=None):\n \"\"\"\n get vocabularies incorporating feature selection\n :param feature_path_list: feature_path_list, list, a list of paths, \n each of which directs to a feature file (we \\\n suggest using the feature files for the training purpose)\n :param gt_labels: gt_labels, list or numpy.ndarray, ground truth labels\n :return: list, a list of words\n \n feature_path_list:特征文件路径列表,每个路径指向一个特征文件。\n gt_labels:真实标签,表示每个特征文件对应的恶意软件或良性样本。\n 方法返回一个包含词汇表、词汇信息和词汇类型的元组。\n \"\"\"\n vocab_saving_path = os.path.join(self.intermediate_save_dir, 'data.vocab')\n vocab_type_saving_path = os.path.join(self.intermediate_save_dir, 'data.vocab_type')\n vocab_extra_info_saving_path = os.path.join(self.intermediate_save_dir, 'data.vocab_info')\n \n # 如果这些文件已经存在且不需要更新,从文件中读取并返回词汇表、词汇信息和词汇类型。\n if os.path.exists(vocab_saving_path) and os.path.exists(vocab_saving_path) and (not self.update):\n return utils.read_pickle(vocab_saving_path), utils.read_pickle(vocab_extra_info_saving_path), utils.read_pickle(vocab_type_saving_path)\n elif feature_path_list is None and gt_labels is None:\n raise FileNotFoundError(\"No vocabulary found and no features for producing vocabulary!\")\n else:\n pass\n \n # 确保输入的恶意软件和良性样本标签都存在,并检查\n # feature_path_list 和 gt_labels 的长度是否相等。\n assert not (np.all(gt_labels == 1) or np.all(gt_labels == 0)), 'Expect both malware and benign samples.'\n assert len(feature_path_list) == len(gt_labels)\n\n # 使用 collections.Counter 和 collections.defaultdict 创建计数器和字典以存储词汇表相关信息。\n counter_mal, counter_ben = collections.Counter(), collections.Counter()\n feat_info_dict = collections.defaultdict(set)\n feat_type_dict = collections.defaultdict(str)\n \n # 遍历 feature_path_list 和 gt_labels\n for feature_path, label in zip(feature_path_list, gt_labels):\n if not os.path.exists(feature_path):\n continue\n features = feature_gen.read_from_disk(feature_path)\n # 获取特征列表、特征信息列表和特征类型列表。\n # 根据标签更新恶意软件和良性样本的计数器。\n feature_list, feature_info_list, feature_type_list = feature_gen.get_feature_list(features)\n feature_occurrence = list(dict.fromkeys(feature_list))\n for _feat, _feat_info, _feat_type in zip(feature_list, feature_info_list, feature_type_list):\n feat_info_dict[_feat].add(_feat_info)\n feat_type_dict[_feat] = _feat_type\n if label:\n counter_mal.update(list(feature_occurrence))\n else:\n counter_ben.update(list(feature_occurrence))\n all_words = list(dict.fromkeys(list(counter_ben.keys()) + list(counter_mal.keys())))\n if len(all_words) <= 0:\n raise ValueError(\"No features exist on this dataset.\")\n\n # 根据特征选择策略选择词汇\n maximum_vocab_size = self.maximum_vocab_size\n selected_words = []\n \n # ----------------------------------------\n # dangerous permission\n # 危险权限选择:提取词汇表中的危险权限特征,并对每个权限进行检查。\n # 如果权限被认为是危险的(通过 feature_gen.permission_check 函数判断),\n # 则将其添加到 selected_words 列表中。\n all_words_type = list(map(feat_type_dict.get, all_words))\n perm_pos = np.array(all_words_type)[...] == feature_gen.PERMISSION\n perm_features = np.array(all_words)[perm_pos]\n for perm in perm_features:\n if feature_gen.permission_check(perm):\n selected_words.append(perm)\n\n # intent\n # 意图选择:提取词汇表中的意图特征,并对每个意图进行检查。\n # 如果意图被认为是有害的(通过 feature_gen.intent_action_check 函数判断),\n # 则将其添加到 selected_words 列表中。\n intent_pos = np.array(all_words_type)[...] == feature_gen.INTENT\n intent_features = np.array(all_words)[intent_pos]\n for intent in intent_features:\n if feature_gen.intent_action_check(intent):\n selected_words.append(intent)\n\n # suspicious apis\n # 可疑 API 选择:提取词汇表中的系统 API 特征,并对每个 API 进行检查。\n # 如果 API 被认为是可疑的或敏感的(通过 feature_gen.check_suspicious_api 或 feature_gen.check_sensitive_api 函数判断),\n # 则将其添加到 selected_words 列表中。\n api_pos = np.array(all_words_type)[...] == feature_gen.SYS_API\n susp_apis = np.array(all_words)[api_pos]\n for api in susp_apis:\n if feature_gen.check_suspicious_api(api) or feature_gen.check_sensitive_api(api):\n selected_words.append(api)\n # ----------------------------------------\n \n # remove components\n # 移除组件:从词汇表中移除所有属于活动、服务、接收器和提供器的组件。\n api_comps = np.array(all_words_type)[...] == feature_gen.ACTIVITY\n api_comps = api_comps | (np.array(all_words_type)[...] == feature_gen.SERVICE)\n api_comps = api_comps | (np.array(all_words_type)[...] == feature_gen.RECEIVER)\n api_comps = api_comps | (np.array(all_words_type)[...] == feature_gen.PROVIDER)\n \n # 计算恶意软件和良性样本的特征频率差并根据差异对词汇进行排序。\n # 选择最多 maximum_vocab_size 个词汇。\n all_words = list(np.array(all_words)[~api_comps])\n for s_word in selected_words:\n all_words.remove(s_word)\n logger.info(\"The total number of words: {}-{}.\".format(len(selected_words), len(all_words)))\n\n # 计算恶意样本的特征频率\n mal_feature_frequency = np.array(list(map(counter_mal.get, all_words)))\n mal_feature_frequency[mal_feature_frequency == None] = 0\n mal_feature_frequency = mal_feature_frequency.astype(np.float64)\n mal_feature_frequency /= np.sum(gt_labels)\n\n # 计算良性样本的特征频率\n ben_feature_frequency = np.array(list(map(counter_ben.get, all_words)))\n ben_feature_frequency[ben_feature_frequency == None] = 0\n ben_feature_frequency = ben_feature_frequency.astype(np.float64)\n ben_feature_frequency /= float(len(gt_labels) - np.sum(gt_labels))\n\n # 计算特征频率差\n feature_freq_diff = abs(mal_feature_frequency - ben_feature_frequency)\n\n # 根据特征频率差进行排序\n posi_selected = np.argsort(feature_freq_diff)[::-1]\n ordered_words = selected_words + [all_words[p] for p in posi_selected]\n\n # 选择最多 maximum_vocab_size 个词汇\n selected_words = ordered_words[:maximum_vocab_size]\n\n # 获取所选词汇的类型和对应的词汇信息:\n # 使用 feat_type_dict 和 feat_info_dict 字典分别获取所选词汇的类型和对应的词汇信息,以便在之后的处理中使用。\n selected_word_type = list(map(feat_type_dict.get, selected_words))\n corresponding_word_info = list(map(feat_info_dict.get, selected_words))\n\n # 保存所选词汇、词汇类型和对应词汇信息到文件,然后返回这些值\n if len(selected_words) > 0:\n utils.dump_pickle(selected_words, vocab_saving_path)\n utils.dump_pickle(selected_word_type, vocab_type_saving_path)\n utils.dump_pickle(corresponding_word_info, vocab_extra_info_saving_path)\n return selected_words, corresponding_word_info, selected_word_type\n \n \n def feature_mapping(self, feature_path_list, dictionary):\n \"\"\"\n mapping feature to numerical representation\n :param feature_path_list: a list of feature paths\n :param dictionary: vocabulary -> index\n :return: 2D representation\n :rtype numpy.ndarray\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def get_non_api_size(vocabulary=None):\n cursor = 0\n for word in vocabulary:\n if '->' not in word: # exclude the api features\n cursor += 1\n else:\n break\n return cursor\n\n def get_cached_name(self, feature_path):\n if os.path.isfile(feature_path):\n return os.path.splitext(os.path.basename(feature_path))[0] + '.npz'\n else:\n raise FileNotFoundError\n\n # ⭐ 这段代码定义了一个名为 feature2ipt 的方法,它将应用程序的特征映射到数值表示。\n # feature2ipt 方法的主要目的是将应用程序的特征映射到一个固定长度的向量,\n # 其中每个元素表示对应词汇表中单词的存在(1)或不存在(0)。\n # 这样的数值表示可以作为机器学习模型的输入,以便对应用程序进行分类或其他分析任务。\n def feature2ipt(self, feature_path, label, vocabulary=None, cache_dir=None):\n \"\"\"\n Map features to numerical representations\n\n Parameters\n --------\n :param feature_path, string, a path directs to a feature file\n :param label, int, ground truth labels\n :param vocabulary:list, a list of words\n :param cache_dir: a temporal folder\n :return: numerical representations corresponds to an app. Each representation contains a tuple\n (feature 1D array, label)\n \"\"\"\n # 确保词汇表不为空\n assert vocabulary is not None and len(vocabulary) > 0\n \n # 检查缓存目录是否存在,如果存在则加载缓存数据\n if isinstance(cache_dir, str):\n rpst_cached_name = self.get_cached_name(feature_path)\n rpst_cached_path = os.path.join(cache_dir, rpst_cached_name)\n if os.path.exists(rpst_cached_path):\n return utils.read_pickle(rpst_cached_path, use_gzip=True)\n \n # 如果 feature_path 无效,则返回零向量表示\n if not isinstance(feature_path, str):\n logger.warning(\"Cannot find the feature path: {}, zero vector used\".format(feature_path))\n return np.zeros((len(vocabulary), ), dtype=np.float32), []\n\n if not os.path.exists(feature_path):\n logger.warning(\"Cannot find the feature path: {}, zero vector used\".format(feature_path))\n return np.zeros((len(vocabulary), ), dtype=np.float32), []\n\n # 从给定的 feature_path 加载原始特征,并将其格式化为非 API 特征和 API 特征。\n native_features = feature_gen.read_from_disk(feature_path)\n non_api_features, api_features = feature_gen.format_feature(native_features)\n features = non_api_features + api_features\n\n # 初始化一个长度与词汇表相等的零向量(representation_vector)作为数值表示。\n representation_vector = np.zeros((len(vocabulary), ), dtype=np.float32)\n \n # 将词汇表映射到其索引,并根据提取到的特征填充 representation_vector。\n dictionary = dict(zip(vocabulary, range(len(vocabulary))))\n filled_pos = [idx for idx in list(map(dictionary.get, features)) if idx is not None]\n \n if len(filled_pos) > 0:\n representation_vector[filled_pos] = 1.\n return representation_vector, label"
},
{
"identifier": "feature_gen",
"path": "core/droidfeature/feature_gen.py",
"snippet": "PERMISSION = 'permission'\nINTENT = 'intent'\nACTIVITY = 'activity'\nSERVICE = 'service'\nRECEIVER = 'receiver'\nPROVIDER = 'provider'\nHARDWARE = 'hardware'\nSYS_API = 'api'\nDANGEROUS_PERMISSION_TAGS = [\n 'android.permission.WRITE_CONTACTS',\n 'android.permission.GET_ACCOUNTS',\n 'android.permission.READ_CONTACTS',\n 'android.permission.READ_CALL_LOG',\n 'android.permission.READ_PHONE_STATE',\n 'android.permission.CALL_PHONE',\n 'android.permission.WRITE_CALL_LOG',\n 'android.permission.USE_SIP',\n 'android.permission.PROCESS_OUTGOING_CALLS',\n 'com.android.voicemail.permission.ADD_VOICEMAIL',\n 'android.permission.READ_CALENDAR',\n 'android.permission.WRITE_CALENDAR',\n 'android.permission.CAMERA',\n 'android.permission.BODY_SENSORS',\n 'android.permission.ACCESS_FINE_LOCATION',\n 'android.permission.ACCESS_COARSE_LOCATION',\n 'android.permission.READ_EXTERNAL_STORAGE',\n 'android.permission.WRITE_EXTERNAL_STORAGE',\n 'android.permission.RECORD_AUDIO',\n 'android.permission.READ_SMS',\n 'android.permission.RECEIVE_WAP_PUSH',\n 'android.permission.RECEIVE_MMS',\n 'android.permission.RECEIVE_SMS',\n 'android.permission.SEND_SMS',\n 'android.permission.READ_CELL_BROADCASTS'\n]\nINTENT_TAGS = [\n 'android.intent.action',\n 'com.android.vending',\n 'android.net',\n 'com.android'\n]\nDANGEROUS_API_SIMLI_TAGS = [\n 'Landroid/content/Intent;->setDataAndType',\n 'Landroid/content/Intent;->setFlags',\n 'Landroid/content/Intent;->addFlags',\n 'Landroid/content/Intent;->putExtra',\n 'Landroid/content/Intent;->init',\n 'Ljava/lang/reflect',\n 'Ljava/lang/Object;->getClass',\n 'Ljava/lang/Class;->getConstructor',\n 'Ljava/lang/Class;->getConstructors',\n 'Ljava/lang/Class;->getDeclaredConstructor',\n 'Ljava/lang/Class;->getDeclaredConstructors',\n 'Ljava/lang/Class;->getField',\n 'Ljava/lang/Class;->getFields',\n 'Ljava/lang/Class;->getDeclaredField',\n 'Ljava/lang/Class;->getDeclaredFields',\n 'Ljava/lang/Class;->getMethod',\n 'Ljava/lang/Class;->getMethods',\n 'Ljava/lang/Class;->getDeclaredMethod',\n 'Ljava/lang/Class;->getDeclaredMethods',\n 'Ljavax/crypto',\n 'Ljava/security/spec',\n 'Ldalvik/system/DexClassLoader',\n 'Ljava/lang/System;->loadLibrary',\n 'Ljava/lang/Runtime',\n 'Landroid/os/Environment;->getExternalStorageDirectory',\n 'Landroid/telephony/TelephonyManager;->getDeviceId',\n 'Landroid/telephony/TelephonyManager;->getSubscriberId',\n 'setWifiEnabled',\n 'execHttpRequest',\n 'getPackageInfo',\n 'Landroid/content/Context;->getSystemService',\n 'setWifiDisabled',\n 'Ljava/net/HttpURLconnection;->setRequestMethod',\n 'Landroid/telephony/SmsMessage;->getMessageBody',\n 'Ljava/io/IOException;->printStackTrace',\n 'system/bin/su' # non-alike an api but emerging in Drebin paper\n]\nTAG_SPLITTER = '#.tag#'\ndef apk2feat_wrapper(kwargs):\ndef apk2features(apk_path, max_number_of_smali_files=10000, saving_path=None):\ndef permission_check(permission):\ndef get_permissions(app):\ndef get_components(app):\ndef get_providers(app):\ndef intent_action_check(action_in_question):\ndef get_intent_actions(app):\n def _analyze_component(component_elements, component_name):\ndef get_hardwares(app):\ndef check_suspicious_api(api_query):\ndef check_sensitive_api(api_query):\ndef get_apis(dexes, max_number_of_smali_files):\ndef save_to_disk(data, saving_path):\ndef read_from_disk(loading_path):\ndef get_feature_list(feature):\ndef get_api_name(api_info):\ndef get_api_info(node_tag):\ndef format_feature(feature):\ndef get_api_class(node_tag):\ndef get_caller_info(node_tag):\ndef get_api_tag(api_ivk_line, api_callee_class_name, api_callee_name):\ndef get_same_class_prefix(entry_node_list):\ndef _main():"
},
{
"identifier": "dex_manip",
"path": "tools/dex_manip.py",
"snippet": "CONST_STR = 'android/content/res/' # the append smali files will be put at the folder smali/android/contect/res of new APK\nANNOTATION_REF = '''\n .annotation system Ldalvik/annotation/Throws;\n value = {\n Ljava/lang/reflect/InvocationTargetException;,\n Ljava/lang/IllegalAccessException;,\n Ljava/lang/NoSuchMethodException;\n }\n .end annotation\n'''\nFIELD_TEMPLATE = '.field private static final {stringName}:Ljava/lang/String; = \\\"{stringValue}\\\"'\nEMPTY_METHOD = '''.method private static final {methodName}()V\n{methodBody}\n\n return-void\n.end method\n'''\nVAR_STATEMENT_TEMPLATE = ' .local v{varNum:d}, \"{varName}\":{varType}'\nVAR_END_TEMPLATE = ' .end local v{varNum:d} # \"{varName}\":{varType}'\ndef is_wide_type(arg_type):\ndef read_full_file(file_path):\ndef write_whole_file(obj, file_path):\ndef get_param_smali_type(params, is_smali=True):\ndef encrypt_line(smali_line, name_string, encryption_class_name):\ndef encrypt_string(all_smali_paths, name_string, mod_count=1):\ndef name2path(name):\ndef abs_path_comp(path, pkg_path):\ndef change_source_name(smali_paths, act_source_name, act_dst_name):\ndef find_smali_w_name(smali_paths, source_name):\ndef change_method_name(block_smali_method, rdm_number=2):\ndef insert_dead_code(smali_file_path, smali_block):\ndef is_specfic_exsit(desired_str, src):\ndef split_invoke_argument(invoke_argument):\ndef is_class(class_name_smali):\ndef is_wide_type(invoke_type):\ndef is_void(invoke_return):\ndef is_wide(invoke_return):\ndef is_obj(invoke_return):\ndef change_invoke_by_ref(new_class_name, method_fh, ivk_type, ivk_param, ivk_object, ivk_method, ivk_argument,\n ivk_return):\ndef get_smali_paths(directory):\ndef retrieve_smali_dirs(disassembly_dir):\ndef retrieve_methods(disassembly_dir):\ndef retrieve_api_caller_info(api_name, disassembly_dir):\ndef get_super_class_name(smali_path):\ndef fix_invalid_id(comp_name, spec_chr = '@&'):\ndef path_split(path):\ndef rename_file(src,dst):\ndef rename_smali_file(smali_path, activity_name, new_activity_name):\ndef rename_dir(old, new):\ndef rename_tree_dir(old_name, new_name):\ndef rename_smali_dir(smali_dir, activity_name, new_activity_name):\n def rename(src_path, new_path):\ndef change_class_name(smali_paths, source_name, dst_name, pkg_name):\ndef change_instantition_name(smali_paths, related_class, source_name, dst_name, pkg_name):\ndef _main():"
},
{
"identifier": "xml_manip",
"path": "tools/xml_manip.py",
"snippet": "NAMESPACE = '{http://schemas.android.com/apk/res/android}'\n MSG = 'Repetition allowed:{}/\\'{}\\'.'.format(feature_type, spec_name)\n MSG = \"Component inserted Successfully.\"\n MSG = 'Repetition allowed:{}/\\'{}\\'.'.format(comp_type, comp_spec_name)\n MSG = \"Component inserted Successfully.\"\n MSG = \"Provider inserted Successfully.\"\n MSG = \"intent-filter inserted Successfully.\"\ndef get_xmltree_by_ET(xml_path):\ndef insert_perm_manifest(manifest_ET_tree, feature_type, spec_name, mod_count=1):\ndef insert_comp_manifest(manifest_ET_tree, comp_type, comp_spec_name, mod_count=1):\ndef insert_provider_manifest(manifest_ET_tree, provider_info, mod_count=1):\ndef insert_intent_manifest(manifest_ET_tree, comp_type, intent_spec_name, mod_count=1):\ndef insert_elem_manifest(manifest_ET_tree, elem_type, elem_spec_name, mod_count=1):\ndef get_package_name(manifest_path):\ndef dump_xml(save_path, et_tree):\ndef fix_invalid_id(comp_name, spec_chr='@&'):\ndef defix_invalid_id(comp_name,spec_chr='@&'):\ndef check_comp_name(manifest_ET_tree, comp_type, comp_spec_name):\ndef rename_comp_manifest(manifest_ET_tree, comp_type, comp_spec_name):\ndef get_xml_paths(directory):\ndef classname2dotstring(path_str):\ndef transform_class_name(class_names):\ndef extend_name(related_class, pkg_name):\ndef change_match_xml_line(xml_line, class_strings, src_name, dst_name):\ndef change_xml(xml_paths, related_class_names, source_name, dst_name, pkg_name):"
},
{
"identifier": "utils",
"path": "tools/utils.py",
"snippet": "ENC_KEY = 'cab228a122d3486bac7fab148e8b5aba'\n MSG = \"No such directory or file {} exists!\".format(sample_dir)\n MSG = \"A directory or a list of paths are allowed!\"\ndef pool_initializer():\ndef retrive_files_set(base_dir, dir_ext, file_ext):\n def get_file_name(root_dir, file_ext):\ndef check_dir(sample_dir):\ndef dump_joblib(data, path):\ndef read_joblib(path):\ndef load_json(json_path):\ndef dump_json(obj_dict, file_path):\ndef dump_pickle(data, path, use_gzip=False):\ndef read_pickle(path, use_gzip=False):\ndef dump_pickle_frd_space(data, path):\ndef read_pickle_frd_space(path):\ndef dump_list_of_lists(data, path):\ndef read_list_of_lists(path):\ndef mkdir(target):\ndef read_txt(path, mode='r'):\ndef dump_txt(data_str, path, mode='w'):\ndef read_file_by_fileinput(file_path, inplace=True):\n def __init__(self, manager, use_cache=True):\n def is_cached(self, key):\n def reset(self):\n def get(self, key):\n def cache(self, key, img, lbl):\ndef build_kwargs(keys, arg_dict):\ndef inverse_kwargs(vars):\ndef save_args(fout, args):\ndef load_args(fout):\ndef get_group_args(args, args_parser, title):\ndef tensor_coo_sp_to_ivs(sparse_tensor):\ndef ivs_to_tensor_coo_sp(ivs, device='cpu'):\ndef sp_to_symmetric_sp(sparse_mx):\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\ndef to_tensor(feature_x=None, labels=None, device='cpu'):\n def _to_torch_tensor(mat):\ndef to_device(feature_x=None, labels=None, device='cpu'):\ndef psn(x_tensor, prob, lower_value=0., upper_value=1.):\n def __init__(self):\n def __call__(self, module):\ndef round_x(x, alpha=0.5):\ndef get_x0(x, rounding_threshold=0.5, is_sample=False):\ndef or_tensors(x_1, x_2):\ndef xor_tensors(x_1, x_2):\ndef get_mal_data(x_batch, y_batch):\ndef get_mal_ben_data(x_batch, y_batch):\ndef java_class_name2smali_name(cls):\ndef remove_duplicate(components):\ndef crypt_identifier(idf, seed=2345):\n def md5_transform():\ndef random_string(code):\n def sha1_transform():\ndef string_on_code(code):\n def md5_transform():\ndef random_name(seed=2345, code='abc'):\ndef apply_encryption(base_string):\ndef get_sha256(file_path):\nclass SimplifyClass:\nclass NonnegWeightConstraint(object):"
},
{
"identifier": "config",
"path": "config.py",
"snippet": "def parser_config():"
}
] | import os
import time
import warnings
import random
import shutil
import tempfile
import subprocess
import traceback
import string
import re
import numpy as np
import networkx as nx
import torch
from core.droidfeature import Apk2features
from core.droidfeature import feature_gen
from tools import dex_manip, xml_manip, utils
from config import config, logging, ErrorHandler | 7,893 | INSERTION_STATIC_TEMPLATE = '''.method public static {newMethodName}()V
.locals {numLocals:d}
.prologue
const/4 v0, 0x0
.local v0, "a":I
const/4 v1, 0x1
if-ne v0, v1, :cond_0
:try_start_0
{argInitialization}
{invokeType} {{{paramRegisters}}}, {apiClassName}->{methodName}({argumentTypes}){returnType}
:try_end_0
.catch Ljava/lang/Exception; {{:try_start_0 .. :try_end_0}} :catch_0
{varEndCont}
goto :goto_0
:catch_0
move-exception v0
:cond_0
:goto_0
return-void
.end method
'''
INSERTION_TEMPLATE = '''.method public static {newMethodName}()V
.locals {numLocals:d}
.prologue
const/4 v0, 0x0
.local v0, "a":I
const/4 v1, 0x1
if-ne v0, v1, :cond_0
const/4 v0, 0x0
.local v0, "{varRandName}":{apiClassName}
:try_start_0
{argInitialization}
{invokeType} {{{paramRegisters}}}, {apiClassName}->{methodName}({argumentTypes}){returnType}
:try_end_0
.catch Ljava/lang/Exception; {{:try_start_0 .. :try_end_0}} :catch_0
.end local v0 # "{varRandName}":{apiClassName}
{varEndCont}
goto :goto_0
:catch_0
move-exception v0
:cond_0
:goto_0
return-void
.end method
'''
ENTRY_METHOD_STATEMENT = 'public onBind(Landroid/content/Intent;)Landroid/os/IBinder;'
EMPTY_SERVICE_BODY = '''.class public L{fullClassName}
.super Landroid/app/Service;
.source "{className}.java"
# direct methods
.method public constructor <init>()V
.locals 0
.line 8
invoke-direct {{p0}}, Landroid/app/Service;-><init>()V
.line 9
return-void
.end method
.method {entryMethodStatement}
.locals 2
.param p1, "intent" # Landroid/content/Intent;
.line 14
new-instance v0, Ljava/lang/UnsupportedOperationException;
const-string v1, "Not yet implemented"
invoke-direct {{v0, v1}}, Ljava/lang/UnsupportedOperationException;-><init>(Ljava/lang/String;)V
throw v0
.end method
'''
PROVIDER_TEMPLATE = '''.class public {ProviderCLS}
.super Landroid/content/ContentProvider;
.source "{CLSName}.java"
# direct methods
.method public constructor <init>()V
.locals 1
.prologue
.line 3
invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
return-void
.end method
'''
class InverseDroidFeature(object):
vocab, vocab_info, vocab_type = None, None, None
def __init__(self, seed=0):
random.seed(seed)
meta_data_saving_dir = config.get('dataset', 'intermediate')
naive_data_saving_dir = config.get('metadata', 'naive_data_pool')
|
random.seed(0)
logger = logging.getLogger('core.droidfeature.inverse_feature_extraction')
logger.addHandler(ErrorHandler)
TMP_DIR = '/tmp'
OP_INSERTION = '+'
OP_REMOVAL = '-'
MANIFEST = "AndroidManifest.xml"
REFLECTION_TEMPLATE = '''.class public Landroid/content/res/MethodReflection;
.super Ljava/lang/Object;
.source "MethodReflection.java"
# direct methods
.method public constructor <init>()V
.locals 1
.prologue
.line 3
invoke-direct {p0}, Ljava/lang/Object;-><init>()V
return-void
.end method
'''
DEFAULT_SMALI_DIR = 'android/content/res/' # the path corresponds to the reflection class set above
INSERTION_STATIC_TEMPLATE = '''.method public static {newMethodName}()V
.locals {numLocals:d}
.prologue
const/4 v0, 0x0
.local v0, "a":I
const/4 v1, 0x1
if-ne v0, v1, :cond_0
:try_start_0
{argInitialization}
{invokeType} {{{paramRegisters}}}, {apiClassName}->{methodName}({argumentTypes}){returnType}
:try_end_0
.catch Ljava/lang/Exception; {{:try_start_0 .. :try_end_0}} :catch_0
{varEndCont}
goto :goto_0
:catch_0
move-exception v0
:cond_0
:goto_0
return-void
.end method
'''
INSERTION_TEMPLATE = '''.method public static {newMethodName}()V
.locals {numLocals:d}
.prologue
const/4 v0, 0x0
.local v0, "a":I
const/4 v1, 0x1
if-ne v0, v1, :cond_0
const/4 v0, 0x0
.local v0, "{varRandName}":{apiClassName}
:try_start_0
{argInitialization}
{invokeType} {{{paramRegisters}}}, {apiClassName}->{methodName}({argumentTypes}){returnType}
:try_end_0
.catch Ljava/lang/Exception; {{:try_start_0 .. :try_end_0}} :catch_0
.end local v0 # "{varRandName}":{apiClassName}
{varEndCont}
goto :goto_0
:catch_0
move-exception v0
:cond_0
:goto_0
return-void
.end method
'''
ENTRY_METHOD_STATEMENT = 'public onBind(Landroid/content/Intent;)Landroid/os/IBinder;'
EMPTY_SERVICE_BODY = '''.class public L{fullClassName}
.super Landroid/app/Service;
.source "{className}.java"
# direct methods
.method public constructor <init>()V
.locals 0
.line 8
invoke-direct {{p0}}, Landroid/app/Service;-><init>()V
.line 9
return-void
.end method
.method {entryMethodStatement}
.locals 2
.param p1, "intent" # Landroid/content/Intent;
.line 14
new-instance v0, Ljava/lang/UnsupportedOperationException;
const-string v1, "Not yet implemented"
invoke-direct {{v0, v1}}, Ljava/lang/UnsupportedOperationException;-><init>(Ljava/lang/String;)V
throw v0
.end method
'''
PROVIDER_TEMPLATE = '''.class public {ProviderCLS}
.super Landroid/content/ContentProvider;
.source "{CLSName}.java"
# direct methods
.method public constructor <init>()V
.locals 1
.prologue
.line 3
invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
return-void
.end method
'''
class InverseDroidFeature(object):
vocab, vocab_info, vocab_type = None, None, None
def __init__(self, seed=0):
random.seed(seed)
meta_data_saving_dir = config.get('dataset', 'intermediate')
naive_data_saving_dir = config.get('metadata', 'naive_data_pool') | self.feature_extractor = Apk2features(naive_data_saving_dir, meta_data_saving_dir) | 0 | 2023-11-27 02:00:23+00:00 | 12k |
IDEA-XL/InstructMol | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "build_attn_bias",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')"
},
{
"identifier": "MPTBlock",
"path": "llava/model/language_model/mpt/blocks.py",
"snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)"
},
{
"identifier": "SharedEmbedding",
"path": "llava/model/language_model/mpt/custom_embedding.py",
"snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)"
},
{
"identifier": "NORM_CLASS_REGISTRY",
"path": "llava/model/language_model/mpt/norm.py",
"snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}"
},
{
"identifier": "MPTConfig",
"path": "llava/model/language_model/mpt/configuration_mpt.py",
"snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')"
},
{
"identifier": "AutoTokenizerForMOD",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer"
},
{
"identifier": "adapt_tokenizer_for_denoising",
"path": "llava/model/language_model/mpt/adapt_tokenizer.py",
"snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids"
},
{
"identifier": "add_bidirectional_mask_if_missing",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')"
},
{
"identifier": "convert_hf_causal_lm_to_prefix_lm",
"path": "llava/model/language_model/mpt/hf_prefixlm_converter.py",
"snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')"
},
{
"identifier": "init_empty_weights",
"path": "llava/model/language_model/mpt/meta_init_context.py",
"snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f"
},
{
"identifier": "MODEL_INIT_REGISTRY",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}"
},
{
"identifier": "generic_param_init_fn_",
"path": "llava/model/language_model/mpt/param_init_fns.py",
"snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')"
}
] | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 9,446 | assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name']
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
self._attn_bias_initialized = True
if self.attn_impl == 'flash':
return (self.attn_bias, attention_mask)
if self.attn_bias is not None:
self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
attn_bias = self.attn_bias
if self.prefix_lm:
assert isinstance(attn_bias, torch.Tensor)
assert isinstance(prefix_mask, torch.Tensor)
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
if self.attn_uses_sequence_id and sequence_id is not None:
assert isinstance(attn_bias, torch.Tensor)
attn_bias = self._apply_sequence_id(attn_bias, sequence_id)
if attention_mask is not None:
s_k = attention_mask.shape[-1]
if attn_bias is None:
attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
else:
_s_k = max(0, attn_bias.size(-1) - s_k)
attn_bias = attn_bias[:, :, :, _s_k:]
if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
return (attn_bias, None)
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor):
(s_k, s_q) = attn_bias.shape[-2:]
if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
seq_len = prefix_mask.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
prefix = prefix_mask.view(-1, 1, 1, seq_len)
cannot_attend = ~torch.logical_or(causal, prefix.bool())
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor):
seq_len = sequence_id.shape[-1]
if seq_len > self.config.max_seq_len:
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
attn_bias = attn_bias[..., :seq_len, :seq_len]
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
min_val = torch.finfo(attn_bias.dtype).min
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
return attn_bias
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None):
return_dict = return_dict if return_dict is not None else self.config.return_dict
use_cache = use_cache if use_cache is not None else self.config.use_cache
if attention_mask is not None:
attention_mask = attention_mask.bool()
if prefix_mask is not None:
prefix_mask = prefix_mask.bool()
if not return_dict:
raise NotImplementedError('return_dict False is not implemented yet for MPT')
if output_attentions:
if self.attn_impl != 'torch':
raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training:
raise NotImplementedError('MPT does not support training with left padding.')
if self.prefix_lm and prefix_mask is None:
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
if self.training:
if self.attn_uses_sequence_id and sequence_id is None:
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
elif self.attn_uses_sequence_id is False and sequence_id is not None:
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
if input_ids is not None:
S = input_ids.size(1)
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
tok_emb = self.wte(input_ids)
else:
assert inputs_embeds is not None
assert self.alibi, 'inputs_embeds is not implemented for MPT unless for alibi.'
S = inputs_embeds.size(1)
tok_emb = inputs_embeds
if self.alibi:
x = tok_emb
else:
past_position = 0
if past_key_values is not None:
if len(past_key_values) != self.config.n_layers:
raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
past_position = past_key_values[0][0].size(1)
if self.attn_impl == 'torch':
past_position = past_key_values[0][0].size(3)
if S + past_position > self.config.max_seq_len:
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length {S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_ids.device).unsqueeze(0)
if attention_mask is not None:
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
pos_emb = self.wpe(pos)
x = tok_emb + pos_emb
if self.embedding_fraction == 1:
x = self.emb_drop(x)
else:
x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
assert isinstance(self.emb_drop, nn.Module)
x = self.emb_drop(x_shrunk)
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
if use_cache and past_key_values is None:
past_key_values = [() for _ in range(self.config.n_layers)]
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for (b_idx, block) in enumerate(self.blocks):
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
(x, attn_weights, past_key_value) = torch.utils.checkpoint.checkpoint(block, x, past_key_value, attn_bias, attention_mask, self.is_causal)
else:
(x, attn_weights, past_key_value) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal)
if past_key_values is not None:
past_key_values[b_idx] = past_key_value
if output_attentions:
assert all_self_attns is not None
all_self_attns = all_self_attns + (attn_weights,)
x = self.norm_f(x)
if output_hidden_states:
assert all_hidden_states is not None
all_hidden_states = all_hidden_states + (x,)
return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def param_init_fn(self, module):
init_fn_name = self.config.init_config['name'] | MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config) | 11 | 2023-11-27 09:55:39+00:00 | 12k |
iann838/pulsefire | tests/test_ratelimiters.py | [
{
"identifier": "RiotAPIClient",
"path": "pulsefire/clients.py",
"snippet": "class RiotAPIClient(BaseClient):\n \"\"\"Riot API Client.\n\n | Resources | Support |\n | -------------------- | -------------------------- |\n | League of Legends | ✅ |\n | Legends of Runeterra | ✅ |\n | Teamfight Tactics | ✅ |\n | Valorant | ✅ |\n\n Example:\n ```python\n async with RiotAPIClient(\n default_headers={\"X-Riot-Token\": <API_KEY>}\n ) as client:\n summoner = await client.get_lol_summoner_v4_by_name(region=\"na1\", name=\"Not a Whale\")\n assert summoner[\"summonerLevel\"] > 200\n ```\n \"\"\"\n\n Region = Literal[\n \"americas\", \"europe\", \"asia\", \"sea\", \"esports\",\n \"br1\", \"eun1\", \"euw1\", \"jp1\", \"kr\", \"la1\", \"la2\",\n \"na1\", \"oc1\", \"tr1\", \"ru\", \"ph2\", \"sg2\", \"th2\", \"tw2\", \"vn2\",\n \"ap\", \"br\", \"eu\", \"kr\", \"latam\", \"na\",\n ] | _str\n\n def __init__(\n self,\n *,\n base_url: str = \"https://{region}.api.riotgames.com\",\n default_params: dict[str, Any] = {},\n default_headers: dict[str, str] = {\"X-Riot-Token\": \"\"},\n default_queries: dict[str, str] = {},\n middlewares: list[Middleware] = [\n json_response_middleware(),\n http_error_middleware(),\n rate_limiter_middleware(RiotAPIRateLimiter()),\n ],\n ) -> None:\n super().__init__(\n base_url=base_url,\n default_params=default_params,\n default_headers=default_headers,\n default_queries=default_queries,\n middlewares=middlewares\n )\n\n # Account Endpoints\n\n async def get_account_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-puuid/{puuid}\")\n\n async def get_account_v1_by_riot_id(self, *, region: Region = ..., game_name: str = ..., tag_line: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-riot-id/{game_name}/{tag_line}\")\n\n async def get_account_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/me\")\n\n async def get_account_v1_active_shard_by_puuid(self, *, region: Region = ..., puuid: str = ..., game: str = ...) -> RiotAPISchema.AccountV1ActiveShard:\n return await self.invoke(\"GET\", \"/riot/account/v1/active-shards/by-game/{game}/by-puuid/{puuid}\")\n\n # League of Legends Endpoints\n\n async def get_lol_champion_v3_rotation(self, *, region: Region = ...) -> RiotAPISchema.LolChampionV3Rotation:\n return await self.invoke(\"GET\", \"/lol/platform/v3/champion-rotations\")\n\n async def get_lol_champion_v4_mastery_by_summoner(self, *, region: Region = ..., summoner_id: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_top_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_mastery_by_puuid(self, *, region: Region = ..., puuid: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}\")\n\n async def get_lol_champion_v4_top_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_players_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-summoner/{summoner_id}\")\n\n async def get_lol_clash_v1_players_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_team(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Team:\n return await self.invoke(\"GET\", \"/lol/clash/v1/teams/{id}\")\n\n async def get_lol_clash_v1_tournament_by_team(self, *, region: Region = ..., team_id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/by-team/{team_id}\")\n\n async def get_lol_clash_v1_tournament(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/{id}\")\n\n async def get_lol_clash_v1_tournaments(self, *, region: Region = ...) -> list[RiotAPISchema.LolClashV1Tournament]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments\")\n\n async def get_lol_league_v4_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/by-summoner/{summoner_id}\")\n\n async def get_lol_league_v4_challenger_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/challengerleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_grandmaster_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/grandmasterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_master_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/masterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_entries_by_division(\n self, *, region: Region = ..., queue: str = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/{queue}/{tier}/{division}\")\n\n async def get_lol_league_v4_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/leagues/{id}\")\n\n async def get_lol_match_v5_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5Match:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}\")\n\n async def get_lol_match_v5_match_timeline(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5MatchTimeline:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}/timeline\")\n\n async def get_lol_match_v5_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/by-puuid/{puuid}/ids\")\n\n async def get_lol_spectator_v4_active_game_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> RiotAPISchema.LolSpectatorV4Game:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/active-games/by-summoner/{summoner_id}\")\n\n async def get_lol_spectator_v4_featured_games(self, *, region: Region = ...) -> RiotAPISchema.LolSpectatorV4GameList:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/featured-games\")\n\n async def get_lol_status_v4_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lol/status/v4/platform-data\")\n\n async def get_lol_summoner_v4_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/{id}\")\n\n async def get_lol_summoner_v4_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-name/{name}\")\n\n async def get_lol_summoner_v4_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-puuid/{puuid}\")\n\n async def get_lol_summoner_v4_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/me\")\n\n async def get_lol_summoner_v4_by_rso_puuid(self, *, region: Region = ..., rso_puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/fulfillment/v1/summoners/by-puuid/{rso_puuid}\")\n\n # Teamfight Tactics Endpoints\n\n async def get_tft_league_v1_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/by-summoner/{summoner_id}\")\n\n async def get_tft_league_v1_challenger_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/challenger\")\n\n async def get_tft_league_v1_grandmaster_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/grandmaster\")\n\n async def get_tft_league_v1_master_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/master\")\n\n async def get_tft_league_v1_entries_by_division(\n self, *, region: Region = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/{tier}/{division}\")\n\n async def get_tft_league_v1_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/leagues/{id}\")\n\n async def get_tft_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftMatchV1Match:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/{id}\")\n\n async def get_tft_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_tft_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/tft/status/v1/platform-data\")\n\n async def get_tft_summoner_v1_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/{id}\")\n\n async def get_tft_summoner_v1_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-name/{name}\")\n\n async def get_tft_summoner_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-puuid/{puuid}\")\n\n async def get_tft_summoner_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/me\")\n\n # Legends of Runeterra Endpoints\n\n async def get_lor_ranked_v1_leaderboard(self, *, region: Region = ...) -> RiotAPISchema.LorRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/lor/ranked/v1/leaderboards\")\n\n async def get_lor_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LorMatchV1Match:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/{id}\")\n\n async def get_lor_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[str]:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_lor_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lor/status/v1/platform-data\")\n\n # Valorant Endpoints\n\n async def get_val_content_v1_contents(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.ValContentV1Contents:\n return await self.invoke(\"GET\", \"/val/content/v1/contents\")\n\n async def get_val_ranked_v1_leaderboard_by_act(self, *, region: Region = ..., act_id: str = ...) -> RiotAPISchema.ValRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/val/ranked/v1/leaderboards/by-act/{act_id}\")\n\n async def get_val_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.ValMatchV1Match:\n return await self.invoke(\"GET\", \"/val/match/v1/matches/{id}\")\n\n async def get_val_match_v1_matchlist_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.ValMatchV1Matchlist:\n return await self.invoke(\"GET\", \"/val/match/v1/matchlists/by-puuid/{puuid}\")\n\n async def get_val_match_v1_recent_matches_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.ValMatchV1RecentMatches:\n return await self.invoke(\"GET\", \"/val/match/v1/recent-matches/by-queue/{queue}\")\n\n async def get_val_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/val/status/v1/platform-data\")"
},
{
"identifier": "async_to_sync",
"path": "pulsefire/functools.py",
"snippet": "def async_to_sync(runner: Callable[[Awaitable[Any]], Any] = asyncio.run):\n \"\"\"Convert a coroutine function to run synchronously. Use as decorator `@async_to_sync()`.\n\n Example:\n ```python\n @async_to_sync()\n async def sample_func(number: int):\n ...\n \n sample_func(0)\n ```\n\n Parameters:\n runner: A callable that runs the awaitable synchronously.\n\n Raises:\n TypeError: When `func` is not a coroutine function.\n \"\"\"\n\n def decorator[**P, R](func: Callable[P, Awaitable[R]]) -> Callable[P, R]:\n if not inspect.iscoroutinefunction(func):\n raise TypeError(f\"{func} is not a coroutine function\")\n\n @functools.wraps(func)\n def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n return runner(func(*args, **kwargs))\n\n return wrapper\n\n return decorator"
},
{
"identifier": "json_response_middleware",
"path": "pulsefire/middlewares.py",
"snippet": "def json_response_middleware(loads: Callable[[str | bytes | bytearray], Any] = json.loads):\n \"\"\"JSON response middleware.\n\n Attempts to deserialize JSON responses regardless of content type,\n if an exception is raised during deserialization, bytes are returned instead.\n\n Example:\n ```python\n # Use orjson loads for 3~10x faster deserialization\n import orjson\n json_response_middleware(orjson.loads)\n ```\n\n Parameters:\n loads: JSON decoder to be used on deserialization.\n \"\"\"\n\n def constructor(next: MiddlewareCallable):\n\n async def middleware(invocation: Invocation):\n response: aiohttp.ClientResponse = await next(invocation)\n try:\n return await response.json(encoding=\"utf-8\", content_type=None, loads=loads)\n except Exception:\n return await response.read()\n\n return middleware\n\n return constructor"
},
{
"identifier": "http_error_middleware",
"path": "pulsefire/middlewares.py",
"snippet": "def http_error_middleware(max_retries: int = 3):\n \"\"\"HTTP error middleware.\n\n Should be positioned as late as possible and before rate limiter middlewares\n (if any) in the client middlewares list.\n\n Responses are handled differently based on their HTTP status:\n\n | Status | Measures |\n | ------ | ------------------------------------- |\n | 2XX | Return response. |\n | 3XX | Raise `aiohttp.ClientResponseError`. |\n | 4XX | Raise `aiohttp.ClientResponseError`. |\n | 429 | Exponential retries (2^n). |\n | 5XX | Exponential retries (2^n). |\n\n Example:\n ```python\n http_error_middleware(3)\n ```\n\n Parameters:\n max_retries: Number of retries to perform before giving up.\n\n Raises:\n aiohttp.ClientResponseError: When retries have exhausted.\n \"\"\"\n\n def constructor(next: MiddlewareCallable):\n\n async def middleware(invocation: Invocation):\n last_response: aiohttp.ClientResponse = None\n for attempt in range(max_retries + 1):\n if attempt:\n await asyncio.sleep(2 ** attempt)\n response: aiohttp.ClientResponse = await next(invocation)\n last_response = response\n if 300 > response.status >= 200:\n return response\n if not (response.status == 429 or response.status >= 500):\n response.raise_for_status()\n else:\n last_response.raise_for_status()\n\n return middleware\n\n return constructor"
},
{
"identifier": "rate_limiter_middleware",
"path": "pulsefire/middlewares.py",
"snippet": "def rate_limiter_middleware(rate_limiter: BaseRateLimiter):\n \"\"\"Rate limiter middleware.\n\n Should be positioned as late as possible in the client middlewares list.\n\n Example:\n ```python\n rate_limiter = RiotAPIRateLimiter()\n rate_limiter_middleware(rate_limiter)\n ```\n\n Parameters:\n rate_limiter: Rate limiter instance.\n \"\"\"\n\n track_429s = collections.deque(maxlen=12)\n\n def constructor(next: MiddlewareCallable):\n\n async def middleware(invocation: Invocation):\n while True:\n wait_for = await rate_limiter.acquire(invocation)\n if wait_for <= 0:\n break\n await asyncio.sleep(wait_for)\n\n response: aiohttp.ClientResponse = await next(invocation)\n\n if response.status == 429:\n response_time = time.time()\n track_429s.append(response_time)\n if sum(response_time - prev_time < 10 for prev_time in track_429s) >= 10:\n LOGGER.warning(f\"rate_limiter_middleware: detected elevated amount of http 429 responses\")\n track_429s.clear()\n\n if wait_for == -1:\n await rate_limiter.synchronize(invocation, response.headers)\n\n return response\n\n return middleware\n\n return constructor"
},
{
"identifier": "RiotAPIRateLimiter",
"path": "pulsefire/ratelimiters.py",
"snippet": "class RiotAPIRateLimiter(BaseRateLimiter):\n \"\"\"Riot API rate limiter.\n\n This rate limiter can be served stand-alone for centralized rate limiting,\n also accepting proxy configuration towards said centralized rate limiter.\n\n Example:\n ```python\n RiotAPIRateLimiter() # Local rate limiter\n\n RiotAPIRateLimiter().serve() # Served at 127.0.0.1:12227\n RiotAPIRateLimiter().serve(port=<PORT>) # Served at 127.0.0.1:<PORT>\n RiotAPIRateLimiter().serve(\"0.0.0.0\", 12227) # Served at 0.0.0.0:12227 (public)\n RiotAPIRateLimiter().serve(\"0.0.0.0\", 12227, secret=<SECRET>) # Add authentication\n\n RiotAPIRateLimiter(proxy=\"http://127.0.0.1:12227\") # Proxy to 127.0.0.1:12227\n RiotAPIRateLimiter(proxy=\"http://127.0.0.1:12227\", proxy_secret=<SECRET>) # Proxy authentication\n RiotAPIRateLimiter(proxy=\"<SCHEME>://<HOST>:<PORT>\")\n RiotAPIRateLimiter(proxy=\"<SCHEME>://<HOST>:<PORT>\", proxy_secret=<SECRET>)\n ```\n\n Parameters:\n proxy: URL of the proxy rate limiter.\n proxy_secret: Secret of the proxy rate limiter if required.\n \"\"\"\n\n _index: dict[tuple[str, int, *tuple[str]], tuple[int, int, float, float, float]] = \\\n collections.defaultdict(lambda: (0, 0, 0, 0, 0))\n\n def __init__(self, *, proxy: str | None = None, proxy_secret: str | None = None) -> None:\n self.proxy = proxy\n self.proxy_secret = proxy_secret\n self._track_syncs: dict[str, tuple[float, list]] = {}\n\n async def acquire(self, invocation: Invocation) -> float:\n if self.proxy:\n response = await invocation.session.post(\n self.proxy + \"/acquire\",\n json={\n \"invocation\": {\n \"uid\": invocation.uid,\n \"method\": invocation.method,\n \"urlformat\": invocation.urlformat,\n \"params\": invocation.params,\n }\n },\n headers=self.proxy_secret and {\"Authorization\": \"Bearer \" + self.proxy_secret}\n )\n response.raise_for_status()\n return await response.json()\n\n wait_for = 0\n pinging_targets = []\n requesting_targets = []\n request_time = time.time()\n for target in [\n (\"app\", 0, invocation.params.get(\"region\", \"\"), invocation.method),\n (\"app\", 1, invocation.params.get(\"region\", \"\"), invocation.method),\n (\"method\", 0, invocation.params.get(\"region\", \"\"), invocation.method, invocation.urlformat),\n (\"method\", 1, invocation.params.get(\"region\", \"\"), invocation.method, invocation.urlformat),\n ]:\n count, limit, expire, latency, pinged = self._index[target]\n pinging = pinged and request_time - pinged < 10\n if pinging:\n wait_for = max(wait_for, 0.1)\n elif request_time > expire:\n pinging_targets.append(target)\n elif request_time > expire - latency * 1.1 + 0.01 or count >= limit:\n wait_for = max(wait_for, expire - request_time)\n else:\n requesting_targets.append(target)\n if wait_for <= 0:\n if pinging_targets:\n self._track_syncs[invocation.uid] = (request_time, pinging_targets)\n for pinging_target in pinging_targets:\n self._index[pinging_target] = (0, 0, 0, 0, time.time())\n wait_for = -1\n for requesting_target in requesting_targets:\n count, *values = self._index[requesting_target]\n self._index[requesting_target] = (count + 1, *values)\n return wait_for\n\n async def synchronize(self, invocation: Invocation, headers: dict[str, str]) -> None:\n if self.proxy:\n response = await invocation.session.post(\n self.proxy + \"/synchronize\",\n json={\n \"invocation\": {\n \"uid\": invocation.uid,\n \"method\": invocation.method,\n \"urlformat\": invocation.urlformat,\n \"params\": invocation.params,\n },\n \"headers\": dict(headers)\n },\n headers=self.proxy_secret and {\"Authorization\": \"Bearer \" + self.proxy_secret}\n )\n return response.raise_for_status()\n\n response_time = time.time()\n request_time, pinging_targets = self._track_syncs.pop(invocation.uid, [None, None])\n if request_time is None:\n return\n\n if random.random() < 0.1:\n for prev_uid, (prev_request_time, _) in self._track_syncs.items():\n if response_time - prev_request_time > 600:\n self._track_syncs.pop(prev_uid, None)\n\n try:\n header_limits = {\n \"app\": [[int(v) for v in t.split(':')] for t in headers[\"X-App-Rate-Limit\"].split(',')],\n \"method\": [[int(v) for v in t.split(':')] for t in headers[\"X-Method-Rate-Limit\"].split(',')],\n }\n header_counts = {\n \"app\": [[int(v) for v in t.split(':')] for t in headers[\"X-App-Rate-Limit-Count\"].split(',')],\n \"method\": [[int(v) for v in t.split(':')] for t in headers[\"X-Method-Rate-Limit-Count\"].split(',')],\n }\n except KeyError:\n for pinging_target in pinging_targets:\n self._index[pinging_target] = (0, 0, 0, 0, 0)\n return\n for scope, idx, *subscopes in pinging_targets:\n if idx >= len(header_limits[scope]):\n self._index[(scope, idx, *subscopes)] = (0, 10**10, response_time + 3600, 0, 0)\n continue\n self._index[(scope, idx, *subscopes)] = (\n header_counts[scope][idx][0],\n header_limits[scope][idx][0],\n header_limits[scope][idx][1] + response_time,\n response_time - request_time,\n 0\n )\n\n def serve(self, host=\"127.0.0.1\", port=12227, *, secret: str | None = None) -> NoReturn:\n from aiohttp import web\n\n app = web.Application(client_max_size=4096)\n routes = web.RouteTableDef()\n\n def is_authenticated(request: web.Request):\n if not secret:\n return True\n request_secret = request.headers.get(\"Authorization\", \"Bearer \").lstrip(\"Bearer \")\n return request_secret == secret\n\n @routes.post(\"/acquire\")\n async def acquire(request: web.Request) -> web.Response:\n if not is_authenticated(request):\n return web.Response(status=401)\n try:\n data = await request.json()\n wait_for = await self.acquire(Invocation(**data[\"invocation\"]))\n return web.json_response(wait_for)\n except (KeyError, ValueError):\n return web.Response(status=400)\n\n @routes.post(\"/synchronize\")\n async def synchronize(request: web.Request) -> web.Response:\n if not is_authenticated(request):\n return web.Response(status=401)\n try:\n data = await request.json()\n await self.synchronize(Invocation(**data[\"invocation\"]), data[\"headers\"])\n return web.Response()\n except (KeyError, ValueError):\n return web.Response(status=400)\n\n app.add_routes(routes)\n web.run_app(app, host=host, port=port)"
},
{
"identifier": "TaskGroup",
"path": "pulsefire/taskgroups.py",
"snippet": "class TaskGroup(asyncio.TaskGroup):\n \"\"\"Asynchronous context manager for managing groups of tasks.\n See [python asyncio task groups documentation](https://docs.python.org/3/library/asyncio-task.html#task-groups).\n\n Adapted for pulsefire, key differences from `asyncio.TaskGroup`:\n\n - Accepts a semaphore to restrict the amount of concurrent running coroutines.\n - Due to semaphore support, the `create_task` method is now async.\n - Allows internal collection of results and exceptions, similar to `asyncio.Task`.\n - If exception collection is on (default), the task group will not abort on task exceptions.\n\n Example:\n ```python\n async with TaskGroup(asyncio.Semaphore(100)) as tg:\n await tg.create_task(coro_func(...))\n results = tg.results()\n ```\n \"\"\"\n\n semaphore: asyncio.Semaphore | None = None\n \"\"\"Semaphore for restricting concurrent running coroutines.\"\"\"\n collect_results: bool = True\n \"\"\"Flag for collecting task results.\"\"\"\n collect_exceptions: bool = True\n \"\"\"Flag for collecting task exceptions, disables abort.\"\"\"\n\n def __init__(\n self,\n semaphore: asyncio.Semaphore | None = None,\n *,\n collect_results: bool = True,\n collect_exceptions: bool = True,\n ) -> None:\n super().__init__()\n self.semaphore = semaphore\n self.collect_results = collect_results\n self.collect_exceptions = collect_exceptions\n self._exceptions: list[BaseException] = []\n self._results = []\n\n async def __aenter__(self):\n self._exceptions = []\n self._results = []\n return await super().__aenter__()\n\n def results[T](self) -> list[T]:\n \"\"\"Return the collected results returned from created tasks.\"\"\"\n if not self.collect_results:\n raise RuntimeError(f\"TaskGroup {self!r} has `collect_results` off\")\n return self._results\n\n def exceptions(self) -> list[BaseException]:\n \"\"\"Return the collected exceptions raised from created tasks.\"\"\"\n if not self.collect_exceptions:\n raise RuntimeError(f\"TaskGroup {self!r} has `collect_exceptions` off\")\n return self._exceptions\n\n @override\n async def create_task[T](self, coro: Awaitable[T], *, name: str | None = None, context: Context | None = None) -> asyncio.Task[T]:\n \"\"\"Create a new task in this group and return it.\n\n If this group has a semaphore, wrap this semaphore on the coroutine.\n \"\"\"\n _coro = coro\n if self.semaphore:\n await self.semaphore.acquire()\n async def semaphored():\n try:\n return await _coro\n finally:\n self.semaphore.release()\n coro = semaphored()\n return super().create_task(coro, name=name, context=context)\n\n def _on_task_done(self, task) -> None:\n if exc := task.exception():\n if self.collect_exceptions:\n LOGGER.warning(\n \"TaskGroup: unhandled exception\\n\" +\n \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n )\n self._exceptions.append(exc)\n self._tasks.discard(task)\n if self._on_completed_fut is not None and not self._tasks:\n if not self._on_completed_fut.done():\n self._on_completed_fut.set_result(True)\n return\n elif self.collect_results and not task.cancelled():\n self._results.append(task.result())\n return super()._on_task_done(task)"
}
] | import asyncio
import os
import subprocess
import time
import aiohttp
from pulsefire.clients import RiotAPIClient
from pulsefire.functools import async_to_sync
from pulsefire.middlewares import (
json_response_middleware,
http_error_middleware,
rate_limiter_middleware
)
from pulsefire.ratelimiters import RiotAPIRateLimiter
from pulsefire.taskgroups import TaskGroup | 8,619 |
@async_to_sync()
async def test_riot_api_rate_limiter_local():
async with RiotAPIClient(
default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]},
middlewares=[
json_response_middleware(),
http_error_middleware(),
|
@async_to_sync()
async def test_riot_api_rate_limiter_local():
async with RiotAPIClient(
default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]},
middlewares=[
json_response_middleware(),
http_error_middleware(), | rate_limiter_middleware(RiotAPIRateLimiter()), | 4 | 2023-11-27 13:37:24+00:00 | 12k |
ubc-vision/nf-soft-mining | examples/train_ngp_nerf_occ.py | [
{
"identifier": "MIPNERF360_UNBOUNDED_SCENES",
"path": "examples/utils.py",
"snippet": "MIPNERF360_UNBOUNDED_SCENES = [\n \"garden\",\n \"bicycle\",\n \"bonsai\",\n \"counter\",\n \"kitchen\",\n \"room\",\n \"stump\",\n]"
},
{
"identifier": "NERF_SYNTHETIC_SCENES",
"path": "examples/utils.py",
"snippet": "NERF_SYNTHETIC_SCENES = [\n \"chair\",\n \"drums\",\n \"ficus\",\n \"hotdog\",\n \"lego\",\n \"materials\",\n \"mic\",\n \"ship\",\n]"
},
{
"identifier": "render_image_with_occgrid",
"path": "examples/utils.py",
"snippet": "def render_image_with_occgrid(\n # scene\n radiance_field: torch.nn.Module,\n estimator: OccGridEstimator,\n rays: Rays,\n # rendering options\n near_plane: float = 0.0,\n far_plane: float = 1e10,\n render_step_size: float = 1e-3,\n render_bkgd: Optional[torch.Tensor] = None,\n cone_angle: float = 0.0,\n alpha_thre: float = 0.0,\n # test options\n test_chunk_size: int = 8192,\n # only useful for dnerf\n timestamps: Optional[torch.Tensor] = None,\n):\n \"\"\"Render the pixels of an image.\"\"\"\n rays_shape = rays.origins.shape\n if len(rays_shape) == 3:\n height, width, _ = rays_shape\n num_rays = height * width\n rays = namedtuple_map(\n lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays\n )\n else:\n num_rays, _ = rays_shape\n\n def sigma_fn(t_starts, t_ends, ray_indices):\n t_origins = chunk_rays.origins[ray_indices]\n t_dirs = chunk_rays.viewdirs[ray_indices]\n positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0\n if timestamps is not None:\n # dnerf\n t = (\n timestamps[ray_indices]\n if radiance_field.training\n else timestamps.expand_as(positions[:, :1])\n )\n sigmas = radiance_field.query_density(positions, t)\n else:\n sigmas = radiance_field.query_density(positions)\n return sigmas.squeeze(-1)\n\n def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n t_origins = chunk_rays.origins[ray_indices]\n t_dirs = chunk_rays.viewdirs[ray_indices]\n positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0\n if timestamps is not None:\n # dnerf\n t = (\n timestamps[ray_indices]\n if radiance_field.training\n else timestamps.expand_as(positions[:, :1])\n )\n rgbs, sigmas = radiance_field(positions, t, t_dirs)\n else:\n rgbs, sigmas = radiance_field(positions, t_dirs)\n return rgbs, sigmas.squeeze(-1)\n\n results = []\n chunk = (\n torch.iinfo(torch.int32).max\n if radiance_field.training\n else test_chunk_size\n )\n for i in range(0, num_rays, chunk):\n chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays)\n ray_indices, t_starts, t_ends = estimator.sampling(\n chunk_rays.origins,\n chunk_rays.viewdirs,\n sigma_fn=sigma_fn,\n near_plane=near_plane,\n far_plane=far_plane,\n render_step_size=render_step_size,\n stratified=radiance_field.training,\n cone_angle=cone_angle,\n alpha_thre=alpha_thre,\n )\n rgb, opacity, depth, extras = rendering(\n t_starts,\n t_ends,\n ray_indices,\n n_rays=chunk_rays.origins.shape[0],\n rgb_sigma_fn=rgb_sigma_fn,\n render_bkgd=render_bkgd,\n )\n chunk_results = [rgb, opacity, depth, len(t_starts)]\n results.append(chunk_results)\n colors, opacities, depths, n_rendering_samples = [\n torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r\n for r in zip(*results)\n ]\n return (\n colors.view((*rays_shape[:-1], -1)),\n opacities.view((*rays_shape[:-1], -1)),\n depths.view((*rays_shape[:-1], -1)),\n sum(n_rendering_samples),\n )"
},
{
"identifier": "render_image_with_occgrid_test",
"path": "examples/utils.py",
"snippet": "@torch.no_grad()\ndef render_image_with_occgrid_test(\n max_samples: int,\n # scene\n radiance_field: torch.nn.Module,\n estimator: OccGridEstimator,\n rays: Rays,\n # rendering options\n near_plane: float = 0.0,\n far_plane: float = 1e10,\n render_step_size: float = 1e-3,\n render_bkgd: Optional[torch.Tensor] = None,\n cone_angle: float = 0.0,\n alpha_thre: float = 0.0,\n early_stop_eps: float = 1e-4,\n # only useful for dnerf\n timestamps: Optional[torch.Tensor] = None,\n):\n \"\"\"Render the pixels of an image.\"\"\"\n rays_shape = rays.origins.shape\n if len(rays_shape) == 3:\n height, width, _ = rays_shape\n num_rays = height * width\n rays = namedtuple_map(\n lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays\n )\n else:\n num_rays, _ = rays_shape\n\n def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n t_origins = rays.origins[ray_indices]\n t_dirs = rays.viewdirs[ray_indices]\n positions = (\n t_origins + t_dirs * (t_starts[:, None] + t_ends[:, None]) / 2.0\n )\n if timestamps is not None:\n # dnerf\n t = (\n timestamps[ray_indices]\n if radiance_field.training\n else timestamps.expand_as(positions[:, :1])\n )\n rgbs, sigmas = radiance_field(positions, t, t_dirs)\n else:\n rgbs, sigmas = radiance_field(positions, t_dirs)\n return rgbs, sigmas.squeeze(-1)\n\n device = rays.origins.device\n opacity = torch.zeros(num_rays, 1, device=device)\n depth = torch.zeros(num_rays, 1, device=device)\n rgb = torch.zeros(num_rays, 3, device=device)\n\n ray_mask = torch.ones(num_rays, device=device).bool()\n\n # 1 for synthetic scenes, 4 for real scenes\n min_samples = 1 if cone_angle == 0 else 4\n\n iter_samples = total_samples = 0\n\n rays_o = rays.origins\n rays_d = rays.viewdirs\n\n near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)\n far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)\n\n t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, estimator.aabbs)\n\n n_grids = estimator.binaries.size(0)\n\n if n_grids > 1:\n t_sorted, t_indices = torch.sort(torch.cat([t_mins, t_maxs], -1), -1)\n else:\n t_sorted = torch.cat([t_mins, t_maxs], -1)\n t_indices = torch.arange(\n 0, n_grids * 2, device=t_mins.device, dtype=torch.int64\n ).expand(num_rays, n_grids * 2)\n\n opc_thre = 1 - early_stop_eps\n\n while iter_samples < max_samples:\n\n n_alive = ray_mask.sum().item()\n if n_alive == 0:\n break\n\n # the number of samples to add on each ray\n n_samples = max(min(num_rays // n_alive, 64), min_samples)\n iter_samples += n_samples\n\n # ray marching\n (intervals, samples, termination_planes) = traverse_grids(\n # rays\n rays_o, # [n_rays, 3]\n rays_d, # [n_rays, 3]\n # grids\n estimator.binaries, # [m, resx, resy, resz]\n estimator.aabbs, # [m, 6]\n # options\n near_planes, # [n_rays]\n far_planes, # [n_rays]\n render_step_size,\n cone_angle,\n n_samples,\n True,\n ray_mask,\n # pre-compute intersections\n t_sorted, # [n_rays, m*2]\n t_indices, # [n_rays, m*2]\n hits, # [n_rays, m]\n )\n t_starts = intervals.vals[intervals.is_left]\n t_ends = intervals.vals[intervals.is_right]\n ray_indices = samples.ray_indices[samples.is_valid]\n packed_info = samples.packed_info\n\n # get rgb and sigma from radiance field\n rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices)\n # volume rendering using native cuda scan\n weights, _, alphas = render_weight_from_density(\n t_starts,\n t_ends,\n sigmas,\n ray_indices=ray_indices,\n n_rays=num_rays,\n prefix_trans=1 - opacity[ray_indices].squeeze(-1),\n )\n if alpha_thre > 0:\n vis_mask = alphas >= alpha_thre\n ray_indices, rgbs, weights, t_starts, t_ends = (\n ray_indices[vis_mask],\n rgbs[vis_mask],\n weights[vis_mask],\n t_starts[vis_mask],\n t_ends[vis_mask],\n )\n\n accumulate_along_rays_(\n weights,\n values=rgbs,\n ray_indices=ray_indices,\n outputs=rgb,\n )\n accumulate_along_rays_(\n weights,\n values=None,\n ray_indices=ray_indices,\n outputs=opacity,\n )\n accumulate_along_rays_(\n weights,\n values=(t_starts + t_ends)[..., None] / 2.0,\n ray_indices=ray_indices,\n outputs=depth,\n )\n # update near_planes using termination planes\n near_planes = termination_planes\n # update rays status\n ray_mask = torch.logical_and(\n # early stopping\n opacity.view(-1) <= opc_thre,\n # remove rays that have reached the far plane\n packed_info[:, 1] == n_samples,\n )\n total_samples += ray_indices.shape[0]\n\n rgb = rgb + render_bkgd * (1.0 - opacity)\n depth = depth / opacity.clamp_min(torch.finfo(rgbs.dtype).eps)\n\n return (\n rgb.view((*rays_shape[:-1], -1)),\n opacity.view((*rays_shape[:-1], -1)),\n depth.view((*rays_shape[:-1], -1)),\n total_samples,\n )"
},
{
"identifier": "set_random_seed",
"path": "examples/utils.py",
"snippet": "def set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)"
},
{
"identifier": "OccGridEstimator",
"path": "nerfacc/estimators/occ_grid.py",
"snippet": "class OccGridEstimator(AbstractEstimator):\n \"\"\"Occupancy grid transmittance estimator for spatial skipping.\n\n References: \"Instant Neural Graphics Primitives.\"\n\n Args:\n roi_aabb: The axis-aligned bounding box of the region of interest. Useful for mapping\n the 3D space to the grid.\n resolution: The resolution of the grid. If an integer is given, the grid is assumed to\n be a cube. Otherwise, a list or a tensor of shape (3,) is expected. Default: 128.\n levels: The number of levels of the grid. Default: 1.\n \"\"\"\n\n DIM: int = 3\n\n def __init__(\n self,\n roi_aabb: Union[List[int], Tensor],\n resolution: Union[int, List[int], Tensor] = 128,\n levels: int = 1,\n **kwargs,\n ) -> None:\n super().__init__()\n\n if \"contraction_type\" in kwargs:\n raise ValueError(\n \"`contraction_type` is not supported anymore for nerfacc >= 0.4.0.\"\n )\n\n # check the resolution is legal\n if isinstance(resolution, int):\n resolution = [resolution] * self.DIM\n if isinstance(resolution, (list, tuple)):\n resolution = torch.tensor(resolution, dtype=torch.int32)\n assert isinstance(resolution, Tensor), f\"Invalid type: {resolution}!\"\n assert resolution.shape[0] == self.DIM, f\"Invalid shape: {resolution}!\"\n\n # check the roi_aabb is legal\n if isinstance(roi_aabb, (list, tuple)):\n roi_aabb = torch.tensor(roi_aabb, dtype=torch.float32)\n assert isinstance(roi_aabb, Tensor), f\"Invalid type: {roi_aabb}!\"\n assert roi_aabb.shape[0] == self.DIM * 2, f\"Invalid shape: {roi_aabb}!\"\n\n # multiple levels of aabbs\n aabbs = torch.stack(\n [_enlarge_aabb(roi_aabb, 2**i) for i in range(levels)], dim=0\n )\n\n # total number of voxels\n self.cells_per_lvl = int(resolution.prod().item())\n self.levels = levels\n\n # Buffers\n self.register_buffer(\"resolution\", resolution) # [3]\n self.register_buffer(\"aabbs\", aabbs) # [n_aabbs, 6]\n self.register_buffer(\n \"occs\", torch.zeros(self.levels * self.cells_per_lvl)\n )\n self.register_buffer(\n \"binaries\",\n torch.zeros([levels] + resolution.tolist(), dtype=torch.bool),\n )\n\n # Grid coords & indices\n grid_coords = _meshgrid3d(resolution).reshape(\n self.cells_per_lvl, self.DIM\n )\n self.register_buffer(\"grid_coords\", grid_coords, persistent=False)\n grid_indices = torch.arange(self.cells_per_lvl)\n self.register_buffer(\"grid_indices\", grid_indices, persistent=False)\n\n @torch.no_grad()\n def sampling(\n self,\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # sigma/alpha function for skipping invisible space\n sigma_fn: Optional[Callable] = None,\n alpha_fn: Optional[Callable] = None,\n near_plane: float = 0.0,\n far_plane: float = 1e10,\n t_min: Optional[Tensor] = None, # [n_rays]\n t_max: Optional[Tensor] = None, # [n_rays]\n # rendering options\n render_step_size: float = 1e-3,\n early_stop_eps: float = 1e-4,\n alpha_thre: float = 0.0,\n stratified: bool = False,\n cone_angle: float = 0.0,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Sampling with spatial skipping.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: Ray origins of shape (n_rays, 3).\n rays_d: Normalized ray directions of shape (n_rays, 3).\n sigma_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `sigma_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation density values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n alpha_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `alpha_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation opacity values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n near_plane: Optional. Near plane distance. Default: 0.0.\n far_plane: Optional. Far plane distance. Default: 1e10.\n t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).\n If profided, the marching will start from maximum of t_min and near_plane.\n t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).\n If profided, the marching will stop by minimum of t_max and far_plane.\n render_step_size: Step size for marching. Default: 1e-3.\n early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.\n alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.\n stratified: Whether to use stratified sampling. Default: False.\n cone_angle: Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n\n Returns:\n A tuple of {LongTensor, Tensor, Tensor}:\n\n - **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).\n - **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).\n - **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).\n\n Examples:\n\n .. code-block:: python\n\n >>> ray_indices, t_starts, t_ends = grid.sampling(\n >>> rays_o, rays_d, render_step_size=1e-3)\n >>> t_mid = (t_starts + t_ends) / 2.0\n >>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]\n\n \"\"\"\n\n near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)\n far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)\n\n if t_min is not None:\n near_planes = torch.clamp(near_planes, min=t_min)\n if t_max is not None:\n far_planes = torch.clamp(far_planes, max=t_max)\n\n if stratified:\n near_planes += torch.rand_like(near_planes) * render_step_size\n intervals, samples, _ = traverse_grids(\n rays_o,\n rays_d,\n self.binaries,\n self.aabbs,\n near_planes=near_planes,\n far_planes=far_planes,\n step_size=render_step_size,\n cone_angle=cone_angle,\n )\n t_starts = intervals.vals[intervals.is_left]\n t_ends = intervals.vals[intervals.is_right]\n ray_indices = samples.ray_indices\n packed_info = samples.packed_info\n\n # skip invisible space\n if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (\n sigma_fn is not None or alpha_fn is not None\n ):\n alpha_thre = min(alpha_thre, self.occs.mean().item())\n\n # Compute visibility of the samples, and filter out invisible samples\n if sigma_fn is not None:\n if t_starts.shape[0] != 0:\n sigmas = sigma_fn(t_starts, t_ends, ray_indices)\n else:\n sigmas = torch.empty((0,), device=t_starts.device)\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n masks = render_visibility_from_density(\n t_starts=t_starts,\n t_ends=t_ends,\n sigmas=sigmas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n elif alpha_fn is not None:\n if t_starts.shape[0] != 0:\n alphas = alpha_fn(t_starts, t_ends, ray_indices)\n else:\n alphas = torch.empty((0,), device=t_starts.device)\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n masks = render_visibility_from_alpha(\n alphas=alphas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n ray_indices, t_starts, t_ends = (\n ray_indices[masks],\n t_starts[masks],\n t_ends[masks],\n )\n return ray_indices, t_starts, t_ends\n\n @torch.no_grad()\n def update_every_n_steps(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 1e-2,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n n: int = 16,\n ) -> None:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n step: Current training step.\n occ_eval_fn: A function that takes in sample locations :math:`(N, 3)` and\n returns the occupancy values :math:`(N, 1)` at those locations.\n occ_thre: Threshold used to binarize the occupancy grid. Default: 1e-2.\n ema_decay: The decay rate for EMA updates. Default: 0.95.\n warmup_steps: Sample all cells during the warmup stage. After the warmup\n stage we change the sampling strategy to 1/4 uniformly sampled cells\n together with 1/4 occupied cells. Default: 256.\n n: Update the grid every n steps. Default: 16.\n \"\"\"\n if not self.training:\n raise RuntimeError(\n \"You should only call this function only during training. \"\n \"Please call _update() directly if you want to update the \"\n \"field during inference.\"\n )\n if step % n == 0 and self.training:\n self._update(\n step=step,\n occ_eval_fn=occ_eval_fn,\n occ_thre=occ_thre,\n ema_decay=ema_decay,\n warmup_steps=warmup_steps,\n )\n\n # adapted from https://github.com/kwea123/ngp_pl/blob/master/models/networks.py\n @torch.no_grad()\n def mark_invisible_cells(\n self,\n K: Tensor,\n c2w: Tensor,\n width: int,\n height: int,\n near_plane: float = 0.0,\n chunk: int = 32**3,\n ) -> None:\n \"\"\"Mark the cells that aren't covered by the cameras with density -1.\n Should only be executed once before training starts.\n\n Args:\n K: Camera intrinsics of shape (N, 3, 3) or (1, 3, 3).\n c2w: Camera to world poses of shape (N, 3, 4) or (N, 4, 4).\n width: Image width in pixels\n height: Image height in pixels\n near_plane: Near plane distance\n chunk: The chunk size to split the cells (to avoid OOM)\n \"\"\"\n assert K.dim() == 3 and K.shape[1:] == (3, 3)\n assert c2w.dim() == 3 and (\n c2w.shape[1:] == (3, 4) or c2w.shape[1:] == (4, 4)\n )\n assert K.shape[0] == c2w.shape[0] or K.shape[0] == 1\n\n N_cams = c2w.shape[0]\n w2c_R = c2w[:, :3, :3].transpose(2, 1) # (N_cams, 3, 3)\n w2c_T = -w2c_R @ c2w[:, :3, 3:] # (N_cams, 3, 1)\n\n lvl_indices = self._get_all_cells()\n for lvl, indices in enumerate(lvl_indices):\n grid_coords = self.grid_coords[indices]\n\n for i in range(0, len(indices), chunk):\n x = grid_coords[i : i + chunk] / (self.resolution - 1)\n indices_chunk = indices[i : i + chunk]\n # voxel coordinates [0, 1]^3 -> world\n xyzs_w = (\n self.aabbs[lvl, :3]\n + x * (self.aabbs[lvl, 3:] - self.aabbs[lvl, :3])\n ).T\n xyzs_c = w2c_R @ xyzs_w + w2c_T # (N_cams, 3, chunk)\n uvd = K @ xyzs_c # (N_cams, 3, chunk)\n uv = uvd[:, :2] / uvd[:, 2:] # (N_cams, 2, chunk)\n in_image = (\n (uvd[:, 2] >= 0)\n & (uv[:, 0] >= 0)\n & (uv[:, 0] < width)\n & (uv[:, 1] >= 0)\n & (uv[:, 1] < height)\n )\n covered_by_cam = (\n uvd[:, 2] >= near_plane\n ) & in_image # (N_cams, chunk)\n # if the cell is visible by at least one camera\n count = covered_by_cam.sum(0) / N_cams\n\n too_near_to_cam = (\n uvd[:, 2] < near_plane\n ) & in_image # (N, chunk)\n # if the cell is too close (in front) to any camera\n too_near_to_any_cam = too_near_to_cam.any(0)\n # a valid cell should be visible by at least one camera and not too close to any camera\n valid_mask = (count > 0) & (~too_near_to_any_cam)\n\n cell_ids_base = lvl * self.cells_per_lvl\n self.occs[cell_ids_base + indices_chunk] = torch.where(\n valid_mask, 0.0, -1.0\n )\n\n @torch.no_grad()\n def _get_all_cells(self) -> List[Tensor]:\n \"\"\"Returns all cells of the grid.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + self.grid_indices\n indices = self.grid_indices[self.occs[cell_ids] >= 0.0]\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _sample_uniform_and_occupied_cells(self, n: int) -> List[Tensor]:\n \"\"\"Samples both n uniform and occupied cells.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n uniform_indices = torch.randint(\n self.cells_per_lvl, (n,), device=self.device\n )\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + uniform_indices\n uniform_indices = uniform_indices[self.occs[cell_ids] >= 0.0]\n occupied_indices = torch.nonzero(self.binaries[lvl].flatten())[:, 0]\n if n < len(occupied_indices):\n selector = torch.randint(\n len(occupied_indices), (n,), device=self.device\n )\n occupied_indices = occupied_indices[selector]\n indices = torch.cat([uniform_indices, occupied_indices], dim=0)\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _update(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 0.01,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n ) -> None:\n \"\"\"Update the occ field in the EMA way.\"\"\"\n # sample cells\n if step < warmup_steps:\n lvl_indices = self._get_all_cells()\n else:\n N = self.cells_per_lvl // 4\n lvl_indices = self._sample_uniform_and_occupied_cells(N)\n\n for lvl, indices in enumerate(lvl_indices):\n # infer occupancy: density * step_size\n grid_coords = self.grid_coords[indices]\n x = (\n grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)\n ) / self.resolution\n # voxel coordinates [0, 1]^3 -> world\n x = self.aabbs[lvl, :3] + x * (\n self.aabbs[lvl, 3:] - self.aabbs[lvl, :3]\n )\n occ = occ_eval_fn(x).squeeze(-1)\n # ema update\n cell_ids = lvl * self.cells_per_lvl + indices\n self.occs[cell_ids] = torch.maximum(\n self.occs[cell_ids] * ema_decay, occ\n )\n # suppose to use scatter max but emperically it is almost the same.\n # self.occs, _ = scatter_max(\n # occ, indices, dim=0, out=self.occs * ema_decay\n # )\n thre = torch.clamp(self.occs[self.occs >= 0].mean(), max=occ_thre)\n self.binaries = (self.occs > thre).view(self.binaries.shape)"
}
] | import argparse
import math
import pathlib
import time
import imageio
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from lpips import LPIPS
from radiance_fields.ngp import NGPRadianceField
from examples.utils import (
MIPNERF360_UNBOUNDED_SCENES,
NERF_SYNTHETIC_SCENES,
render_image_with_occgrid,
render_image_with_occgrid_test,
set_random_seed,
)
from nerfacc.estimators.occ_grid import OccGridEstimator
from datasets.nerf_360_v2 import SubjectLoader
from datasets.nerf_synthetic import SubjectLoader | 7,547 | """
Copyright (c) 2022 Ruilong Li, UC Berkeley.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_root",
type=str,
# default=str(pathlib.Path.cwd() / "data/360_v2"),
default=str(pathlib.Path.cwd() / "data/nerf_synthetic"),
help="the root dir of the dataset",
)
parser.add_argument(
"--train_split",
type=str,
default="train",
choices=["train", "trainval"],
help="which train split to use",
)
parser.add_argument(
"--scene",
type=str,
default="lego",
choices=NERF_SYNTHETIC_SCENES + MIPNERF360_UNBOUNDED_SCENES,
help="which scene to use",
)
args = parser.parse_args()
device = "cuda:0"
| """
Copyright (c) 2022 Ruilong Li, UC Berkeley.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_root",
type=str,
# default=str(pathlib.Path.cwd() / "data/360_v2"),
default=str(pathlib.Path.cwd() / "data/nerf_synthetic"),
help="the root dir of the dataset",
)
parser.add_argument(
"--train_split",
type=str,
default="train",
choices=["train", "trainval"],
help="which train split to use",
)
parser.add_argument(
"--scene",
type=str,
default="lego",
choices=NERF_SYNTHETIC_SCENES + MIPNERF360_UNBOUNDED_SCENES,
help="which scene to use",
)
args = parser.parse_args()
device = "cuda:0" | set_random_seed(42) | 4 | 2023-11-27 22:12:55+00:00 | 12k |
facebookresearch/SOC-matching | SOC_matching/experiment_settings/settings.py | [
{
"identifier": "optimal_control_LQ",
"path": "SOC_matching/utils.py",
"snippet": "def optimal_control_LQ(sigma, A, P, Q, t):\n R_inverse = torch.matmul(sigma, torch.transpose(sigma, 0, 1))\n Ft = solution_Ricatti(R_inverse, A, P, Q, t)\n ut = -2 * torch.einsum(\"ij,bjk->bik\", torch.transpose(sigma, 0, 1), Ft)\n return ut"
},
{
"identifier": "exponential_t_A",
"path": "SOC_matching/utils.py",
"snippet": "def exponential_t_A(t, A):\n return torch.matrix_exp(t.unsqueeze(1).unsqueeze(2) * A.unsqueeze(0))"
},
{
"identifier": "restricted_SOC",
"path": "SOC_matching/utils.py",
"snippet": "def restricted_SOC(problem, x0, x1, device, cfg):\n\n optim_cfg = OmegaConf.create(\n {\n \"N\": 512, # 512,\n \"num_step\": 200,\n \"lr_mean\": cfg.optim.splines_lr, # 0.02,\n \"lr_gamma\": cfg.optim.splines_lr, # 0.002,\n \"momentum\": 0.0,\n \"nitr\": cfg.method.num_iterations_splines,\n }\n )\n\n B, S = 1, 21 # 1, 11 # number of splines and number of knots\n x0 = x0.repeat((B, 1))\n x1 = x1.repeat((B, 1))\n\n gpath = EndPointGaussianPath(\n mean=init_spline(x0, x1, S),\n sigma=problem.sigma,\n gamma=GammaSpline(\n torch.linspace(0, 1, S),\n torch.ones(B, S, 1),\n sigma=1.0,\n fix_init=True,\n init_knots=1,\n ),\n ).to(device)\n\n result = fit_gpath(problem, gpath, optim_cfg, verbose=True)\n return result"
},
{
"identifier": "LinearControl",
"path": "SOC_matching/models.py",
"snippet": "class LinearControl:\n def __init__(self, u, T):\n self.u = u\n self.T = T\n\n def evaluate(self, t):\n nsteps = self.u.shape[0]\n idx = torch.floor((nsteps - 1) * t / self.T).to(torch.int64)\n return self.u[idx]\n\n def evaluate_tensor(self, t):\n nsteps = self.u.shape[0]\n idx = torch.floor((nsteps - 1) * t / self.T).to(torch.int64)\n return self.u[idx, :, :]\n\n def __call__(self, t, x, t_is_tensor=False):\n if not t_is_tensor:\n if len(self.evaluate(t).shape) == 2:\n evaluate_t = self.evaluate(t)\n else:\n evaluate_t = self.evaluate(t)[0, :, :]\n if len(x.shape) == 2:\n return torch.einsum(\"ij,bj->bi\", evaluate_t, x)\n elif len(x.shape) == 3:\n return torch.einsum(\"ij,abj->abi\", evaluate_t, x)\n else:\n if len(x.shape) == 2:\n return torch.einsum(\"bij,bj->bi\", self.evaluate_tensor(t), x)\n elif len(x.shape) == 3:\n return torch.einsum(\"aij,abj->abi\", self.evaluate_tensor(t), x)"
},
{
"identifier": "ConstantControlLinear",
"path": "SOC_matching/models.py",
"snippet": "class ConstantControlLinear:\n def __init__(self, ut, T):\n self.ut = ut\n self.T = T\n\n def evaluate_ut(self, t):\n nsteps = self.ut.shape[0]\n idx = torch.floor(nsteps * t / self.T).to(torch.int64)\n return self.ut[idx]\n\n def evaluate_ut_tensor(self, t):\n nsteps = self.ut.shape[0]\n idx = torch.floor((nsteps - 1) * t / self.T).to(torch.int64)\n return self.ut[idx, :]\n\n def __call__(self, t, x, t_is_tensor=False):\n if not t_is_tensor:\n control = self.evaluate_ut(t).unsqueeze(0).repeat(x.shape[0], 1)\n else:\n control = self.evaluate_ut_tensor(t).unsqueeze(1).repeat(1, x.shape[1], 1)\n return control"
},
{
"identifier": "LowDimControl",
"path": "SOC_matching/models.py",
"snippet": "class LowDimControl:\n def __init__(self, ut, T, xb, dim, delta_t, delta_x):\n self.ut = ut\n self.T = T\n self.xb = xb\n self.dim = dim\n self.delta_t = delta_t\n self.delta_x = delta_x\n\n def evaluate_ut(self, t, x):\n x_reshape = x.reshape(-1, self.dim)\n t = torch.tensor([t]).to(x.device)\n t = t.reshape(-1, 1).expand(x_reshape.shape[0], 1)\n tx = torch.cat([t, x_reshape], dim=-1)\n\n idx = torch.zeros_like(tx).to(tx.device).to(torch.int64)\n\n idx[:, 0] = torch.ceil(tx[:, 0] / self.delta_t).to(torch.int64)\n\n idx[:, 1:] = torch.floor((tx[:, 1:] + self.xb) / self.delta_x).to(torch.int64)\n idx[:, 1:] = torch.minimum(\n idx[:, 1:],\n torch.tensor(self.ut.shape[1] - 1).to(torch.int64).to(idx.device),\n )\n idx[:, 1:] = torch.maximum(\n idx[:, 1:], torch.tensor(0).to(torch.int64).to(idx.device)\n )\n control = torch.zeros_like(x_reshape)\n for j in range(self.dim):\n idx_j = idx[:, [0, j + 1]]\n ut_j = self.ut[:, :, j]\n control_j = ut_j[idx_j[:, 0], idx_j[:, 1]]\n control[:, j] = control_j\n return control\n\n def evaluate_ut_tensor(self, t, x):\n t = t.reshape(-1, 1, 1).expand(x.shape[0], x.shape[1], 1)\n tx = torch.cat([t, x], dim=-1)\n tx_shape = tx.shape\n tx = tx.reshape(-1, tx.shape[2])\n\n idx = torch.zeros_like(tx).to(tx.device).to(torch.int64)\n\n idx[:, 0] = torch.ceil(tx[:, 0] / self.delta_t).to(torch.int64)\n\n idx[:, 1:] = torch.floor((tx[:, 1:] + self.xb) / self.delta_x).to(torch.int64)\n idx[:, 1:] = torch.minimum(\n idx[:, 1:],\n torch.tensor(self.ut.shape[1] - 1).to(torch.int64).to(idx.device),\n )\n idx[:, 1:] = torch.maximum(\n idx[:, 1:], torch.tensor(0).to(torch.int64).to(idx.device)\n )\n control = torch.zeros_like(tx).to(tx.device)\n for j in range(self.dim):\n idx_j = idx[:, [0, j + 1]]\n ut_j = self.ut[:, :, j]\n control_j = ut_j[idx_j[:, 0], idx_j[:, 1]]\n control[:, j + 1] = control_j\n control = torch.reshape(control, tx_shape)[:, :, 1:]\n return control\n\n def __call__(self, t, x, t_is_tensor=False):\n if not t_is_tensor:\n return self.evaluate_ut(t, x)\n else:\n return self.evaluate_ut_tensor(t, x)"
},
{
"identifier": "RestrictedControl",
"path": "SOC_matching/models.py",
"snippet": "class RestrictedControl:\n def __init__(self, gpath, sigma, b, device, T, B):\n self.device = device\n self.gpath = gpath\n self.sigma = sigma\n self.sigma_inverse = torch.inverse(sigma)\n self.b = b\n self.T = T\n self.B = B\n\n def __call__(self, t, x, verbose=False):\n if verbose:\n print(f\"x.shape in control: {x.shape}\")\n len_2 = len(x.shape) == 2\n if len(x.shape) == 2:\n x = x[None, :, None, :].repeat((self.B, 1, 1, 1))\n t = torch.tensor([t]).to(self.device)\n t = t + 1e-4 if t < self.T / 2 else t - 1e-4\n control = self.gpath.ut(\n t, x, direction=\"fwd\", create_graph_jvp=False, verbose=False\n )\n control_reshape = control[0, :, 0, :]\n b_eval = self.b(t, x[0, :, 0, :])\n output = torch.einsum(\n \"ij,...j->...i\", self.sigma_inverse, control_reshape - b_eval\n )\n return output\n if len(x.shape) == 3:\n x_transpose = (\n torch.transpose(x, 0, 1).unsqueeze(0).repeat((self.B, 1, 1, 1))\n )\n t_copy = t.clone().to(t.device)\n t_copy[t < self.T / 2] = t[t < self.T / 2] + 1e-4\n t_copy[t > self.T / 2] = t[t > self.T / 2] - 1e-4\n control = self.gpath.ut(\n t_copy,\n x_transpose,\n direction=\"fwd\",\n create_graph_jvp=False,\n verbose=False,\n ).detach()\n control_reshape = torch.transpose(control.squeeze(0), 0, 1)\n b_eval = self.b(t_copy, x_transpose.squeeze(0))\n output = torch.einsum(\n \"ij,...j->...i\", self.sigma_inverse, control.squeeze(0) - b_eval\n ).detach()\n return torch.transpose(output, 0, 1)"
},
{
"identifier": "OU_Quadratic",
"path": "SOC_matching/experiment_settings/OU_quadratic.py",
"snippet": "class OU_Quadratic(method.NeuralSDE):\n def __init__(\n self,\n device=\"cuda\",\n dim=2,\n hdims=[256, 128, 64],\n hdims_M=[128, 128],\n u=None,\n lmbd=1.0,\n A=torch.eye(2),\n P=torch.eye(2),\n Q=torch.eye(2),\n sigma=torch.eye(2),\n gamma=3.0,\n scaling_factor_nabla_V=1.0,\n scaling_factor_M=1.0,\n T=1.0,\n u_warm_start=None,\n use_warm_start=False,\n ):\n super().__init__(\n device=device,\n dim=dim,\n hdims=hdims,\n hdims_M=hdims_M,\n u=u,\n lmbd=lmbd,\n sigma=sigma,\n gamma=gamma,\n scaling_factor_nabla_V=scaling_factor_nabla_V,\n scaling_factor_M=scaling_factor_M,\n T=T,\n u_warm_start=u_warm_start,\n use_warm_start=use_warm_start,\n )\n self.A = A\n self.P = P\n self.Q = Q\n\n # Base Drift\n def b(self, t, x):\n return torch.einsum(\"ij,...j->...i\", self.A, x)\n\n # Gradient of base drift\n def nabla_b(self, t, x):\n if len(x.shape) == 2:\n return torch.transpose(self.A.unsqueeze(0).repeat(x.shape[0], 1, 1), 1, 2)\n elif len(x.shape) == 3:\n return torch.transpose(\n self.A.unsqueeze(0).unsqueeze(0).repeat(x.shape[0], x.shape[1], 1, 1),\n 2,\n 3,\n )\n\n # Running cost\n def f(self, t, x):\n return torch.sum(\n x * torch.einsum(\"ij,...j->...i\", self.P, x), -1\n )\n\n # Gradient of running cost\n def nabla_f(self, t, x):\n return 2 * torch.einsum(\"ij,...j->...i\", self.P, x)\n\n # Final cost\n def g(self, x):\n return torch.sum(\n x * torch.einsum(\"ij,...j->...i\", self.Q, x), -1\n )\n\n # Gradient of final cost\n def nabla_g(self, x):\n return 2 * torch.einsum(\"ij,...j->...i\", self.Q, x)"
},
{
"identifier": "OU_Linear",
"path": "SOC_matching/experiment_settings/OU_linear.py",
"snippet": "class OU_Linear(method.NeuralSDE):\n def __init__(\n self,\n device=\"cuda\",\n dim=2,\n u=None,\n hdims=[256, 128, 64],\n hdims_M=[128, 128],\n lmbd=1.0,\n A=torch.eye(2),\n sigma=torch.eye(2),\n omega=torch.ones(2),\n gamma=3.0,\n scaling_factor_nabla_V=1.0,\n scaling_factor_M=1.0,\n ):\n super().__init__(\n device=device,\n dim=dim,\n hdims=hdims,\n hdims_M=hdims_M,\n u=u,\n lmbd=lmbd,\n sigma=sigma,\n gamma=gamma,\n scaling_factor_nabla_V=scaling_factor_nabla_V,\n scaling_factor_M=scaling_factor_M,\n )\n self.A = A\n self.omega = omega\n\n # Base Drift\n def b(self, t, x):\n return torch.einsum(\"ij,...j->...i\", self.A, x)\n\n # Gradient of base drift\n def nabla_b(self, t, x):\n if len(x.shape) == 2:\n return torch.transpose(self.A.unsqueeze(0).repeat(x.shape[0], 1, 1), 1, 2)\n elif len(x.shape) == 3:\n return torch.transpose(\n self.A.unsqueeze(0).unsqueeze(0).repeat(x.shape[0], x.shape[1], 1, 1),\n 2,\n 3,\n )\n elif len(x.shape) == 3:\n return torch.transpose(\n self.A.unsqueeze(0)\n .unsqueeze(0)\n .unsqueeze(0)\n .repeat(x.shape[0], x.shape[1], x.shape[2], 1, 1),\n 3,\n 4,\n )\n\n # Running cost\n def f(self, t, x):\n if len(x.shape) == 2:\n return torch.zeros(x.shape[0]).to(x.device)\n elif len(x.shape) == 3:\n return torch.zeros(x.shape[0], x.shape[1]).to(x.device)\n elif len(x.shape) == 4:\n return torch.zeros(x.shape[0], x.shape[1], x.shape[2]).to(\n x.device\n )\n\n # Gradient of running cost\n def nabla_f(self, t, x):\n return torch.zeros_like(x).to(x.device)\n\n # Final cost\n def g(self, x):\n return torch.einsum(\"j,...j->...\", self.omega, x)\n\n # Gradient of final cost\n def nabla_g(self, x):\n if len(x.shape) == 2:\n return self.omega.unsqueeze(0).repeat(x.shape[0], 1)\n elif len(x.shape) == 3:\n return self.omega.unsqueeze(0).unsqueeze(0).repeat(\n x.shape[0], x.shape[1], 1\n )\n elif len(x.shape) == 3:\n return self.omega.unsqueeze(0).unsqueeze(0).unsqueeze(\n 0\n ).repeat(x.shape[0], x.shape[1], x.shape[2], 1)"
},
{
"identifier": "DoubleWell",
"path": "SOC_matching/experiment_settings/double_well.py",
"snippet": "class DoubleWell(method.NeuralSDE):\n def __init__(\n self,\n device=\"cuda\",\n dim=2,\n hdims=[256, 128, 64],\n hdims_M=[128, 128],\n u=None,\n lmbd=1.0,\n kappa=torch.ones(2),\n nu=torch.ones(2),\n sigma=torch.eye(2),\n gamma=3.0,\n scaling_factor_nabla_V=1.0,\n scaling_factor_M=1.0,\n ):\n super().__init__(\n device=device,\n dim=dim,\n hdims=hdims,\n hdims_M=hdims_M,\n u=u,\n lmbd=lmbd,\n sigma=sigma,\n gamma=gamma,\n scaling_factor_nabla_V=scaling_factor_nabla_V,\n scaling_factor_M=scaling_factor_M,\n )\n self.kappa = kappa\n self.nu = nu\n\n # Base Drift\n def b(self, t, x):\n if len(x.shape) == 2:\n return -2 * self.kappa.unsqueeze(0) * (x**2 - 1) * 2 * x\n elif len(x.shape) == 3:\n return -2 * self.kappa.unsqueeze(0).unsqueeze(0) * (x**2 - 1) * 2 * x\n\n # Gradient of base drift\n def nabla_b(self, t, x):\n if len(x.shape) == 2:\n return -torch.diag_embed(\n 8 * self.kappa.unsqueeze(0) * x**2\n + 4 * self.kappa.unsqueeze(0) * (x**2 - 1)\n )\n elif len(x.shape) == 3:\n return -torch.diag_embed(\n 8 * self.kappa.unsqueeze(0).unsqueeze(0) * x**2\n + 4 * self.kappa.unsqueeze(0).unsqueeze(0) * (x**2 - 1)\n )\n\n # Running cost\n def f(self, t, x):\n if len(x.shape) == 2:\n return torch.zeros(x.shape[0]).to(x.device)\n elif len(x.shape) == 3:\n return torch.zeros(x.shape[0], x.shape[1]).to(x.device)\n\n # Gradient of running cost\n def nabla_f(self, t, x):\n return torch.zeros_like(x).to(x.device)\n\n # Final cost\n def g(self, x):\n if len(x.shape) == 2:\n return torch.sum(\n self.nu.unsqueeze(0) * (x**2 - 1) ** 2, dim=1\n )\n elif len(x.shape) == 3:\n return torch.sum(\n self.nu.unsqueeze(0).unsqueeze(0) * (x**2 - 1) ** 2,\n dim=2,\n )\n\n # Gradient of final cost\n def nabla_g(self, x):\n if len(x.shape) == 2:\n return 2 * self.nu.unsqueeze(0) * (x**2 - 1) * 2 * x\n elif len(x.shape) == 3:\n return (\n 2\n * self.nu.unsqueeze(0).unsqueeze(0)\n * (x**2 - 1)\n * 2\n * x\n )\n\n # Potential\n def potential(self, x):\n # return torch.einsum('j,bj->b', self.gamma, x)\n if len(x.shape) == 2:\n return torch.sum(\n self.kappa.unsqueeze(0) * (x**2 - 1) ** 2, dim=1\n )\n elif len(x.shape) == 1:\n return torch.sum(self.kappa * (x**2 - 1) ** 2)\n\n # Scalar potential\n def scalar_potential(self, x, idx, cpu=False):\n if cpu:\n return self.kappa.cpu()[idx] * (x**2 - 1) ** 2\n else:\n return self.kappa[idx] * (x**2 - 1) ** 2\n\n # Scalar Base Drift\n def scalar_b(self, t, x, idx):\n return -2 * self.kappa[idx] * (x**2 - 1) * 2 * x\n\n # Running cost\n def scalar_f(self, t, x, idx):\n return torch.zeros_like(x).to(x.device)\n\n # Final cost\n def scalar_g(self, x, idx, cpu=False):\n if cpu:\n return self.nu.cpu()[idx] * (x**2 - 1) ** 2\n else:\n return self.nu[idx] * (x**2 - 1) ** 2\n\n # Optimal control\n def compute_reference_solution(\n self, T=1.0, delta_t=0.005, delta_x=0.005, xb=2.5, lmbd=1.0, idx=0\n ):\n nx = int(2.0 * xb / delta_x)\n\n beta = 2\n\n xvec = np.linspace(-xb, xb, nx, endpoint=True)\n\n # A = D^{-1} L D\n # assumes Neumann boundary conditions\n\n A = np.zeros([nx, nx])\n for i in range(0, nx):\n\n x = -xb + (i + 0.5) * delta_x\n if i > 0:\n x0 = -xb + (i - 0.5) * delta_x\n x1 = -xb + i * delta_x\n A[i, i - 1] = (\n -np.exp(\n beta\n * 0.5\n * (\n self.scalar_potential(x0, idx, cpu=True)\n + self.scalar_potential(x, idx, cpu=True)\n - 2 * self.scalar_potential(x1, idx, cpu=True)\n )\n )\n / delta_x**2\n )\n A[i, i] = (\n np.exp(\n beta\n * (\n self.scalar_potential(x, idx, cpu=True)\n - self.scalar_potential(x1, idx, cpu=True)\n )\n )\n / delta_x**2\n )\n if i < nx - 1:\n x0 = -xb + (i + 1.5) * delta_x\n x1 = -xb + (i + 1) * delta_x\n A[i, i + 1] = (\n -np.exp(\n beta\n * 0.5\n * (\n self.scalar_potential(x0, idx, cpu=True)\n + self.scalar_potential(x, idx, cpu=True)\n - 2 * self.scalar_potential(x1, idx, cpu=True)\n )\n )\n / delta_x**2\n )\n A[i, i] = (\n A[i, i]\n + np.exp(\n beta\n * (\n self.scalar_potential(x, idx, cpu=True)\n - self.scalar_potential(x1, idx, cpu=True)\n )\n )\n / delta_x**2\n )\n\n A = -A / beta\n N = int(T / delta_t)\n\n sc_potential = self.scalar_potential(xvec, idx, cpu=True)\n D = np.diag(np.exp(beta * sc_potential / 2))\n D_inv = np.diag(np.exp(-beta * sc_potential / 2))\n\n psi = np.zeros([N + 1, nx])\n psi[N, :] = np.exp(-self.scalar_g(xvec, idx, cpu=True))\n\n for n in range(N - 1, -1, -1):\n band = -delta_t * np.vstack(\n [\n np.append([0], np.diagonal(A, offset=1)),\n np.diagonal(A, offset=0) - N / T,\n np.append(np.diagonal(A, offset=1), [0]),\n ]\n )\n\n psi[n, :] = D.dot(solve_banded([1, 1], band, D_inv.dot(psi[n + 1, :])))\n\n ut_discrete = np.zeros([N + 1, nx - 1])\n for n in range(N + 1):\n for i in range(nx - 1):\n ut_discrete[n, i] = (\n -2\n / beta\n * self.sigma[idx, idx]\n * (-np.log(psi[n, i + 1]) + np.log(psi[n, i]))\n / delta_x\n )\n\n print(f\"ut_discrete computed\")\n return ut_discrete"
},
{
"identifier": "MolecularDynamics",
"path": "SOC_matching/experiment_settings/molecular_dynamics.py",
"snippet": "class MolecularDynamics(method.NeuralSDE):\n def __init__(\n self,\n device=\"cuda\",\n dim=2,\n hdims=[256, 128, 64],\n hdims_M=[128, 128],\n u=None,\n lmbd=1.0,\n kappa=torch.ones(2),\n sigma=torch.eye(2),\n gamma=3.0,\n scaling_factor_nabla_V=1.0,\n scaling_factor_M=1.0,\n T=1.0,\n u_warm_start=None,\n use_warm_start=False,\n use_stopping_time=False,\n ):\n super().__init__(\n device=device,\n dim=dim,\n hdims=hdims,\n hdims_M=hdims_M,\n u=u,\n lmbd=lmbd,\n sigma=sigma,\n gamma=gamma,\n scaling_factor_nabla_V=scaling_factor_nabla_V,\n scaling_factor_M=scaling_factor_M,\n T=T,\n u_warm_start=u_warm_start,\n use_warm_start=use_warm_start,\n use_stopping_time=use_stopping_time,\n )\n self.kappa = kappa\n\n # Base Drift\n def b(self, t, x):\n if len(x.shape) == 2:\n return -2 * self.kappa.unsqueeze(0) * (x**2 - 1) * 2 * x\n elif len(x.shape) == 3:\n return -2 * self.kappa.unsqueeze(0).unsqueeze(0) * (x**2 - 1) * 2 * x\n\n # Gradient of base drift\n def nabla_b(self, t, x):\n if len(x.shape) == 2:\n return -torch.diag_embed(\n 8 * self.kappa.unsqueeze(0) * x**2\n + 4 * self.kappa.unsqueeze(0) * (x**2 - 1)\n )\n elif len(x.shape) == 3:\n return -torch.diag_embed(\n 8 * self.kappa.unsqueeze(0).unsqueeze(0) * x**2\n + 4 * self.kappa.unsqueeze(0).unsqueeze(0) * (x**2 - 1)\n )\n\n # Final cost\n def g(self, x):\n \"\"\"\n x: (B, D)\n output: (B,)\n \"\"\"\n return torch.zeros_like(x[..., 0])\n\n def nabla_g(self, x):\n return torch.zeros_like(x)\n\n # Running cost\n def f(self, t, x):\n \"\"\"\n x: (T, B, D) or (B, D)\n output: (T, B) or (B)\n \"\"\"\n return torch.ones_like(x[..., 0])\n\n def nabla_f(self, t, x):\n return torch.zeros_like(x)\n\n # Stopping condition, process stops when Phi(x) < 0\n def Phi(self, x):\n if len(x.shape) == 2:\n return -x[:, 0]\n elif len(x.shape) == 3:\n return -x[:, :, 0]"
}
] | import torch
from SOC_matching.utils import (
optimal_control_LQ,
exponential_t_A,
restricted_SOC,
)
from SOC_matching.models import (
LinearControl,
ConstantControlLinear,
LowDimControl,
RestrictedControl,
)
from SOC_matching.experiment_settings.OU_quadratic import OU_Quadratic
from SOC_matching.experiment_settings.OU_linear import OU_Linear
from SOC_matching.experiment_settings.double_well import DoubleWell
from SOC_matching.experiment_settings.molecular_dynamics import MolecularDynamics | 7,255 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory.
def ground_truth_control(cfg, ts, x0, **kwargs):
if (
cfg.method.setting == "OU_quadratic_easy"
or cfg.method.setting == "OU_quadratic_hard"
):
R_inverse = torch.matmul(
kwargs["sigma"], torch.transpose(kwargs["sigma"], 0, 1)
)
R = torch.inverse(R_inverse)
ut = optimal_control_LQ(
kwargs["sigma"], kwargs["A"], kwargs["P"], kwargs["Q"], ts
)
ut = LinearControl(ut, cfg.method.T)
optimal_sde = OU_Quadratic(
u=ut,
lmbd=cfg.method.lmbd,
A=kwargs["A"],
P=kwargs["P"],
Q=kwargs["Q"],
sigma=kwargs["sigma"],
T=cfg.method.T,
)
return optimal_sde
elif cfg.method.setting == "OU_linear":
exp_matrix = exponential_t_A(
cfg.method.T - ts, torch.transpose(kwargs["A"], 0, 1)
)
ut = -torch.einsum(
"aij,j->ai",
torch.einsum(
"ij,ajk->aik", torch.transpose(kwargs["sigma"], 0, 1), exp_matrix
),
kwargs["omega"],
)
ut = ConstantControlLinear(ut, cfg.method.T)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory.
def ground_truth_control(cfg, ts, x0, **kwargs):
if (
cfg.method.setting == "OU_quadratic_easy"
or cfg.method.setting == "OU_quadratic_hard"
):
R_inverse = torch.matmul(
kwargs["sigma"], torch.transpose(kwargs["sigma"], 0, 1)
)
R = torch.inverse(R_inverse)
ut = optimal_control_LQ(
kwargs["sigma"], kwargs["A"], kwargs["P"], kwargs["Q"], ts
)
ut = LinearControl(ut, cfg.method.T)
optimal_sde = OU_Quadratic(
u=ut,
lmbd=cfg.method.lmbd,
A=kwargs["A"],
P=kwargs["P"],
Q=kwargs["Q"],
sigma=kwargs["sigma"],
T=cfg.method.T,
)
return optimal_sde
elif cfg.method.setting == "OU_linear":
exp_matrix = exponential_t_A(
cfg.method.T - ts, torch.transpose(kwargs["A"], 0, 1)
)
ut = -torch.einsum(
"aij,j->ai",
torch.einsum(
"ij,ajk->aik", torch.transpose(kwargs["sigma"], 0, 1), exp_matrix
),
kwargs["omega"],
)
ut = ConstantControlLinear(ut, cfg.method.T)
| optimal_sde = OU_Linear( | 8 | 2023-12-04 20:26:18+00:00 | 12k |
yiwenlu66/learning-qp | experiments/tank/visualize_feasible_sets.py | [
{
"identifier": "sys_param",
"path": "src/envs/env_creators.py",
"snippet": "def tank_initial_generator(size, device, rng):\ndef tank_ref_generator(size, device, rng):\ndef tank_randomizer(size, device, rng):\n B = torch.tensor(sys_param[\"tank\"][\"B\"], device=device, dtype=torch.float).unsqueeze(0)"
},
{
"identifier": "get_mpc_baseline_parameters",
"path": "src/envs/mpc_baseline_parameters.py",
"snippet": "def get_mpc_baseline_parameters(env_name, N, noise_std=0.):\n mpc_parameters = {\n \"n_mpc\": sys_param[env_name][\"n\"],\n \"m_mpc\": sys_param[env_name][\"m\"],\n \"N\": N,\n **sys_param[env_name],\n }\n if env_name == \"tank\":\n # Compute state and ref from obs: the first n entries of obs is state, and the latter n entries are ref\n mpc_parameters[\"obs_to_state_and_ref\"] = lambda obs: (obs[:, :mpc_parameters[\"n_mpc\"]], obs[:, mpc_parameters[\"n_mpc\"]:])\n A_nom = sys_param[env_name][\"A\"]\n A_max = np.copy(A_nom)\n A_max[tuple(zip(*[(0, 0), (0, 2), (1, 1), (1, 3), (2, 2), (3, 3)]))] += 0.002\n B_nom = sys_param[env_name][\"B\"]\n B_max = np.copy(B_nom)\n B_max *= 1.02\n mpc_parameters[\"A_scenarios\"] = [A_nom, A_max]\n mpc_parameters[\"B_scenarios\"] = [B_nom, B_max]\n n_mpc = mpc_parameters[\"n_mpc\"]\n mpc_parameters[\"w_scenarios\"] = [\n np.zeros((n_mpc, 1)),\n 3 * noise_std * np.ones((n_mpc, 1)),\n -3 * noise_std * np.ones((n_mpc, 1)),\n ]\n # mpc_parameters[\"max_disturbance_per_dim\"] = 0.5 * (3 * noise_std + 20 * 0.002 * 2 + 8 * 0.02 * 2)\n if env_name == \"cartpole\":\n # Compute A, B matrices for linearized system\n m_pole = mpc_parameters[\"m_pole_nom\"]\n m_cart = mpc_parameters[\"m_cart_nom\"]\n l = mpc_parameters[\"l_nom\"]\n g = 9.8\n\n # Continuous time A, B matrices\n A_ct = np.array([\n [0, 1, 0, 0],\n [0, 0, -g * m_pole / m_cart, 0],\n [0, 0, 0, 1],\n [0, 0, (m_cart + m_pole) * g / (l * m_cart) , 0],\n ])\n B_ct = np.array([\n [0],\n [1 / m_cart],\n [0],\n [-1 / (l * m_cart)],\n ])\n\n # Discretization\n dt = sys_param[env_name][\"dt\"]\n A = np.eye(4) + dt * A_ct\n B = dt * B_ct\n\n mpc_parameters[\"A\"] = A\n mpc_parameters[\"B\"] = B\n\n # Compute state and ref from obs: obs is in format (x, x_ref, x_dot, sin_theta, cos_theta, theta_dot)\n def obs_to_state_and_ref(obs):\n x, x_dot, theta, theta_dot, x_ref = obs[:, 0], obs[:, 1], obs[:, 2], obs[:, 3], obs[:, 4]\n state = torch.stack([x, x_dot, theta, theta_dot], dim=1)\n zeros = torch.zeros_like(x_ref)\n ref = torch.stack([x_ref, zeros, zeros, zeros], dim=1)\n return state, ref\n mpc_parameters[\"obs_to_state_and_ref\"] = obs_to_state_and_ref\n\n return mpc_parameters"
},
{
"identifier": "QPUnrolledNetwork",
"path": "src/modules/qp_unrolled_network.py",
"snippet": "class QPUnrolledNetwork(nn.Module):\n \"\"\"\n Learn a QP problem from the input using a MLP, then solve the QP using fixed number of unrolled PDHG iterations.\n\n Form of QP:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n where x in R^n, b in R^m.\n \"\"\"\n def __init__(\n self, device, input_size, n_qp, m_qp, qp_iter, mlp_builder,\n shared_PH=False,\n affine_qb=False,\n strict_affine_layer=False,\n obs_has_half_ref=False,\n symmetric=False,\n no_b=False,\n use_warm_starter=False,\n train_warm_starter=False,\n ws_loss_coef=1.,\n ws_update_rate=0.01,\n ws_loss_shaper=lambda x: x ** (1 / 2),\n mpc_baseline=None,\n use_osqp_for_mpc=False,\n imitate_mpc=False,\n use_residual_loss=False,\n force_feasible=False,\n feasible_lambda=10,\n is_test=False,\n ):\n \"\"\"mlp_builder is a function mapping (input_size, output_size) to a nn.Sequential object.\n\n If shared_PH == True, P and H are parameters indepedent of input, and q and b are functions of input;\n Otherwise, (P, H, q, b) are all functions of input.\n\n If affine_qb == True, then q and b are restricted to be affine functions of input.\n\n If strict_affine_layer == True (only effective when affine_qb=True), then:\n 1. q is linear w.r.t. (x0, xref) (no bias)\n 2. b is affine w.r.t. x0 (no dependence on xref)\n\n If obs_has_half_ref == True, the policy knows that the observation is in the form (x0, xref), with each taking up half of the dimension of the observation.\n\n If symmetric == True (only effective when affine_qb=True), then:\n 1. The bias terms are disabled in the modeling of q and b, i.e., q = Wq * x, b = Wb * x.\n 2. The constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0.\n\n If no_b == True in addition to symmetric == True, then b is skipped altogether, i.e., the constraint is assumed to be -1 <= Hx <= 1.\n\n If mpc_baseline != None and imitate_mpc == False, then the forward function directly returns the solution of the MPC problem, instead of solving the learned QP problem. Can be used for benchmarking MPC.\n\n If mpc_baseline != None and imitate_mpc == True, then the forward function returns the solution of the learned QP problem, but a loss term is computed using the MPC problem. Can be used for supervised imitation learning.\n\n If force_feasible == True, solve the following problem instead of the original QP problem:\n minimize_{x,y} (1/2)x'Px + q'x + lambda * y^2\n s.t. Hx + b + y * 1 >= 0, y >= 0,\n where x in R^n, y in R.\n In this case, the solution returned will be of dimension (n + 1).\n \"\"\"\n\n super().__init__()\n\n self.shared_PH = shared_PH\n self.affine_qb = affine_qb\n self.strict_affine_layer = strict_affine_layer\n self.obs_has_half_ref = obs_has_half_ref\n\n self.device = device\n self.input_size = input_size\n\n # QP dimensions: there are the number of variables and constraints WITHOUT considering the slack variable\n self.n_qp = n_qp\n self.m_qp = m_qp\n\n self.qp_iter = qp_iter\n\n self.symmetric = symmetric\n self.no_b = no_b\n\n self.n_P_param = n_qp * (n_qp + 1) // 2\n self.n_q_param = n_qp\n self.n_H_param = m_qp * n_qp\n self.n_b_param = m_qp if not self.no_b else 0\n\n self.n_mlp_output = 0\n if not self.shared_PH:\n self.n_mlp_output += (self.n_P_param + self.n_H_param)\n self.P_params = None\n self.H_params = None\n else:\n self.P_params = nn.Parameter(torch.randn((self.n_P_param,), device=device))\n self.H_params = nn.Parameter(torch.randn((self.n_H_param,), device=device))\n\n if not self.affine_qb:\n self.n_mlp_output += (self.n_q_param + self.n_b_param)\n self.qb_affine_layer = None\n else:\n if not self.strict_affine_layer:\n self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric)\n else:\n self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref)\n\n if self.n_mlp_output > 0:\n self.mlp = mlp_builder(input_size, self.n_mlp_output)\n else:\n self.mlp = None\n\n # TODO: add preconditioner\n self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None\n self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None\n self.train_warm_starter = train_warm_starter\n self.ws_loss_coef = ws_loss_coef\n self.ws_update_rate = ws_update_rate\n self.ws_loss_shaper = ws_loss_shaper\n\n # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True)\n self.fixed_PH = is_test and shared_PH\n\n # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning\n self.autonomous_losses = {}\n\n self.mpc_baseline = mpc_baseline\n self.use_osqp_for_mpc = use_osqp_for_mpc\n\n self.imitate_mpc = imitate_mpc\n\n # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem\n self.use_residual_loss = use_residual_loss\n\n # Whether to force the problem to be feasible\n self.force_feasible = force_feasible\n self.feasible_lambda = feasible_lambda\n\n self.solver = None\n\n self.info = {}\n\n # Reserved for storing the controllers for each simulation instance when robust MPC is enabled\n self.robust_controllers = []\n\n # Store info returned by env\n self.env_info = {}\n\n # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization\n self.is_active = None\n\n\n def initialize_solver(self):\n # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable)\n n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp\n m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp\n\n # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver\n # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead\n if not self.fixed_PH:\n self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible)\n else:\n # Should be called after loading state dict\n Pinv, H = self.get_PH()\n self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible)\n\n def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs):\n qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H])\n X0 = self.warm_starter(qd, bd, Pinvd, Hd)\n gt = solver_Xs[:, -1, :].detach()\n return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean())\n\n def parallel_controller_creation(self, controller_creator, xref_np, bs):\n \"\"\"\n Create robust MPC controlller in parallel\n \"\"\"\n # Helper function for parallel execution\n def task_creator(index):\n return controller_creator(self.mpc_baseline, xref_np[index, :])\n\n with ThreadPoolExecutor() as executor:\n # Executing the tasks in parallel\n results = executor.map(task_creator, range(bs))\n\n # Collecting the results\n self.robust_controllers.extend(results)\n\n def run_mpc_baseline(self, x, use_osqp_oracle=False):\n robust_method = self.mpc_baseline.get(\"robust_method\", None)\n x0, xref = self.mpc_baseline[\"obs_to_state_and_ref\"](x)\n bs = x.shape[0]\n\n # Conversions between torch and np\n t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float)\n f = lambda t: t.detach().cpu().numpy()\n f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy())\n\n if robust_method is None:\n # Run vanilla MPC without robustness\n eps = 1e-3\n n, m, P, q, H, b = mpc2qp(\n self.mpc_baseline[\"n_mpc\"],\n self.mpc_baseline[\"m_mpc\"],\n self.mpc_baseline[\"N\"],\n t(self.mpc_baseline[\"A\"]),\n t(self.mpc_baseline[\"B\"]),\n t(self.mpc_baseline[\"Q\"]),\n t(self.mpc_baseline[\"R\"]),\n self.mpc_baseline[\"x_min\"] + eps,\n self.mpc_baseline[\"x_max\"] - eps,\n self.mpc_baseline[\"u_min\"],\n self.mpc_baseline[\"u_max\"],\n x0,\n xref,\n normalize=self.mpc_baseline.get(\"normalize\", False),\n Qf=self.mpc_baseline.get(\"terminal_coef\", 0.) * t(np.eye(self.mpc_baseline[\"n_mpc\"])) if self.mpc_baseline.get(\"Qf\", None) is None else t(self.mpc_baseline[\"Qf\"]),\n )\n if not use_osqp_oracle:\n solver = QPSolver(x.device, n, m, P=P, H=H)\n Xs, primal_sols = solver(q, b, iters=100)\n sol = primal_sols[:, -1, :]\n else:\n osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True)\n if q.shape[0] > 1:\n sol_np, iter_counts = np_batch_op(osqp_oracle_with_iter_count, f(q), f(b), f_sparse(P), f_sparse(H))\n sol = t(sol_np)\n else:\n sol_np, iter_count = osqp_oracle_with_iter_count(f(q[0, :]), f(b[0, :]), f_sparse(P), f_sparse(H))\n sol = t(sol_np).unsqueeze(0)\n iter_counts = np.array([iter_count])\n # Save OSQP iteration counts into the info dict\n if \"osqp_iter_counts\" not in self.info:\n self.info[\"osqp_iter_counts\"] = iter_counts\n else:\n self.info[\"osqp_iter_counts\"] = np.concatenate([self.info[\"osqp_iter_counts\"], iter_counts])\n return sol, (P.unsqueeze(0), q, H.unsqueeze(0), b)\n\n elif robust_method in [\"scenario\", \"tube\"]:\n # Set up scenario or tube MPC\n if not self.robust_controllers:\n # Create a controller for each simulation instance, according to the current reference (note: this assumes that the mapping from instance index to reference is constant)\n controller_creator = {\n \"scenario\": scenario_robust_mpc,\n \"tube\": tube_robust_mpc,\n }[robust_method]\n xref_np = f(xref)\n self.parallel_controller_creation(controller_creator, xref_np, bs)\n self.is_active = np.ones((bs,), dtype=bool)\n\n # Get solutions according to current state\n x0_np = f(x0)\n already_on_stats = f(self.env_info.get(\"already_on_stats\", torch.zeros((bs,), dtype=bool))).astype(bool)\n self.is_active = np.logical_not(already_on_stats) & self.is_active # Skip computation for instances already done\n get_solution = lambda i: self.robust_controllers[i](x0_np[i, :], is_active=self.is_active[i])\n sol_np, running_time = np_batch_op(get_solution, np.arange(bs))\n sol = t(sol_np)\n\n # Save running time to info dict\n non_zero_mask = running_time != 0. # Filter out instances that are already done\n running_time_eff = running_time[non_zero_mask]\n if \"running_time\" not in self.info:\n self.info[\"running_time\"] = running_time_eff\n else:\n self.info[\"running_time\"] = np.concatenate([self.info[\"running_time\"], running_time_eff])\n\n return sol, None\n\n\n def get_PH(self, mlp_out=None):\n \"\"\"\n Compute P, H matrices from the parameters.\n Notice: returns (Pinv, H) instead of (P, H)\n \"\"\"\n # Decode MLP output\n end = 0\n if not self.shared_PH:\n start = end\n end = start + self.n_P_param\n P_params = mlp_out[:, start:end]\n start = end\n end = start + self.n_H_param\n H_params = mlp_out[:, start:end]\n else:\n P_params = self.P_params.unsqueeze(0)\n H_params = self.H_params.unsqueeze(0)\n\n # Reshape P, H vectors into matrices\n Pinv = make_psd(P_params, min_eig=1e-2)\n H = H_params.view(-1, self.m_qp, self.n_qp)\n\n # If the problem is forced to be feasible, compute the parameters (\\tilde{P}, \\tilde{H}) of the augmented problem\n # \\tilde{P} = [P, 0; 0, lambda]\n if self.force_feasible:\n zeros_n = torch.zeros((1, self.n_qp, 1), device=self.device)\n I = torch.eye(1, device=self.device).unsqueeze(0)\n tilde_P_inv = torch.cat([\n torch.cat([Pinv, zeros_n], dim=2),\n torch.cat([zeros_n.transpose(1, 2), 1 / self.feasible_lambda * I], dim=2)\n ], dim=1)\n # \\tilde{H} = [H, I; 0, I]\n ones_m = torch.ones((1, self.m_qp, 1), device=self.device)\n tilde_H = torch.cat([\n torch.cat([H, ones_m], dim=2),\n torch.cat([zeros_n.transpose(1, 2), I], dim=2)\n ], dim=1)\n Pinv, H = tilde_P_inv, tilde_H\n return Pinv, H\n\n def get_qb(self, x, mlp_out=None):\n \"\"\"\n Compute q, b vectors from the parameters.\n \"\"\"\n bs = x.shape[0]\n end = self.n_P_param + self.n_H_param if not self.shared_PH else 0\n if not self.affine_qb:\n start = end\n end = start + self.n_q_param\n q = mlp_out[:, start:end]\n start = end\n end = start + self.n_b_param\n b = mlp_out[:, start:end]\n else:\n qb = self.qb_affine_layer(x)\n q = qb[:, :self.n_q_param]\n b = qb[:, self.n_q_param:]\n if self.no_b:\n b = torch.zeros((bs, self.m_qp), device=self.device)\n\n # If the problem is forced to be feasible, compute the parameters (\\tilde{q}, \\tilde{b}) of the augmented problem\n if self.force_feasible:\n zeros_1 = torch.zeros((bs, 1), device=self.device)\n # \\tilde{q} = [q; 0]\n tilde_q = torch.cat([q, zeros_1], dim=1)\n # \\tilde{b} = [b; 0]\n tilde_b = torch.cat([b, zeros_1], dim=1)\n q, b = tilde_q, tilde_b\n\n return q, b\n\n def forward(self, x, return_problem_params=False, info=None):\n if info is not None:\n self.env_info = info\n if self.mpc_baseline is not None:\n mpc_sol, mpc_problem_params = self.run_mpc_baseline(x, use_osqp_oracle=self.use_osqp_for_mpc)\n\n if (self.mpc_baseline is not None) and (not self.imitate_mpc):\n # MPC solution is directly used as the final solution\n sol, problem_params = mpc_sol, mpc_problem_params\n else:\n # Check whether solver has been initialized\n if self.solver is None:\n self.initialize_solver()\n\n bs = x.shape[0]\n\n # Run MLP forward pass, if necessary\n if self.mlp is not None:\n mlp_out = self.mlp(x)\n else:\n mlp_out = None\n\n # Compute P, H, if they are not fixed\n if not self.fixed_PH:\n Pinv, H = self.get_PH(mlp_out)\n else:\n Pinv, H = None, None\n\n # Compute q, b\n q, b = self.get_qb(x, mlp_out)\n\n # Update parameters of warm starter with a delay to stabilize training\n if self.train_warm_starter:\n self.warm_starter_delayed.load_state_dict(interpolate_state_dicts(self.warm_starter_delayed.state_dict(), self.warm_starter.state_dict(), self.ws_update_rate))\n\n # Run solver forward\n if self.use_residual_loss:\n Xs, primal_sols, residuals = self.solver(q, b, Pinv=Pinv, H=H, iters=self.qp_iter, return_residuals=True)\n primal_residual, dual_residual = residuals\n residual_loss = ((primal_residual ** 2).sum(dim=-1) + (dual_residual ** 2).sum(dim=-1)).mean()\n self.autonomous_losses[\"residual\"] = 1e-3 * residual_loss\n else:\n Xs, primal_sols = self.solver(q, b, Pinv=Pinv, H=H, iters=self.qp_iter)\n sol = primal_sols[:, -1, :]\n\n # Compute warm starter loss\n if self.train_warm_starter:\n self.autonomous_losses[\"warm_starter\"] = self.compute_warm_starter_loss(q, b, Pinv, H, Xs)\n\n # Compute imitation loss\n if self.imitate_mpc:\n # Use min(n of learned qp, n of mpc) as the common dimension of solution\n sol_dim = min(self.n_qp, mpc_sol.shape[-1])\n self.autonomous_losses[\"imitation_only\"] = ((sol[:, :sol_dim] - mpc_sol[:, :sol_dim]) ** 2).sum(dim=-1).mean()\n\n if return_problem_params:\n problem_params = (torch.linalg.inv(Pinv), q, H, b)\n\n if not return_problem_params:\n # Only return the solution\n return sol\n else:\n # Return the solution as well as (P, q, H, b)\n return sol, problem_params"
},
{
"identifier": "bmv",
"path": "src/utils/torch_utils.py",
"snippet": "def bmv(A, b):\n \"\"\"Compute matrix multiply vector in batch mode.\"\"\"\n bs = b.shape[0]\n if A.shape[0] == 1:\n # The same A for different b's; use matrix multiplication instead of broadcasting\n return (A.squeeze(0) @ b.t()).t()\n else:\n return (A @ b.unsqueeze(-1)).squeeze(-1)"
},
{
"identifier": "plot_multiple_2d_polytopes_with_contour",
"path": "src/utils/visualization.py",
"snippet": "def plot_multiple_2d_polytopes_with_contour(polytope_contour_params):\n \"\"\"\n Plot multiple 2D polytopes each defined by Ax <= b and overlay the contour of a quadratic function.\n \n Parameters:\n - polytope_contour_params (list of dict): List of dictionaries containing A, b, optimal_solution, P, q, and label.\n \n Returns:\n - fig (matplotlib.figure.Figure): Figure object.\n - ax (matplotlib.axes._subplots.AxesSubplot): Axis object.\n \"\"\"\n \n fig, ax = plt.subplots()\n \n # Determine global x and y limits\n all_vertices = []\n for params in polytope_contour_params:\n interior_point = find_interior_point(params['A'], params['b'])\n if interior_point is not None:\n vertices = HalfspaceIntersection(np.hstack([params['A'], -params['b'][:, np.newaxis]]), interior_point).intersections\n all_vertices.append(vertices)\n all_vertices = np.vstack(all_vertices)\n \n margin = 0.5 # Additional margin around the polytopes\n x_range = np.max(all_vertices[:, 0]) - np.min(all_vertices[:, 0])\n y_range = np.max(all_vertices[:, 1]) - np.min(all_vertices[:, 1])\n max_range = max(x_range, y_range) + 2 * margin\n x_margin = (max_range - x_range) / 2\n y_margin = (max_range - y_range) / 2\n x_min, x_max = np.min(all_vertices[:, 0]) - x_margin, np.max(all_vertices[:, 0]) + x_margin\n y_min, y_max = np.min(all_vertices[:, 1]) - y_margin, np.max(all_vertices[:, 1]) + y_margin\n x_grid, y_grid = np.meshgrid(np.linspace(x_min, x_max, 100), np.linspace(y_min, y_max, 100))\n \n custom_legend_handles = []\n \n for params in polytope_contour_params:\n A, b, P, q, color, label = params['A'], params['b'], params['P'], params['q'], params['color'], params['label']\n optimal_solution = params.get(\"optimal_solution\", None)\n \n # Find an interior point\n interior_point = find_interior_point(A, b)\n if interior_point is None:\n continue # Skip this polytope if LP is infeasible\n \n # Plot polytope\n halfspace_intersection = HalfspaceIntersection(np.hstack([A, -b[:, np.newaxis]]), interior_point)\n vertices = halfspace_intersection.intersections\n hull = ConvexHull(vertices)\n ordered_vertices = vertices[hull.vertices]\n closed_loop = np.vstack([ordered_vertices, ordered_vertices[0]])\n \n ax.fill(closed_loop[:, 0], closed_loop[:, 1], alpha=0.3, color=color, label=f\"{label} (Polytope)\")\n ax.plot(closed_loop[:, 0], closed_loop[:, 1], color=color)\n \n # Mark the optimal solution\n if optimal_solution is not None:\n ax.plot(optimal_solution[0], optimal_solution[1], 'o', color=color)\n \n # Evaluate quadratic function\n Z = np.zeros_like(x_grid)\n for i in range(x_grid.shape[0]):\n for j in range(x_grid.shape[1]):\n x_vec = np.array([x_grid[i, j], y_grid[i, j]])\n Z[i, j] = 0.5 * x_vec.T @ P @ x_vec + q.T @ x_vec\n \n # Plot contour\n contour = ax.contour(x_grid, y_grid, Z, levels=5, colors=color) # Reduced number of levels for sparser contour\n\n # Create a custom legend handle\n custom_legend_handles.append(Line2D([0], [0], color=color, lw=4, label=label))\n\n # Adjust plot settings\n ax.set_aspect('equal', adjustable='box')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n \n # Add custom legend\n if custom_legend_handles:\n # Move legend outside the plot\n ax.legend(handles=custom_legend_handles, loc='upper left', bbox_to_anchor=(1, 1))\n # Adjust layout to prevent clipping\n plt.tight_layout(rect=[0, 0, 0.85, 1])\n \n return fig, ax"
},
{
"identifier": "high_dim_to_2D_sampling",
"path": "src/utils/geometry.py",
"snippet": "def high_dim_to_2D_sampling(A, b, grid_size=50, x_range=(-1, 1)):\n \"\"\"\n Converts a high-dimensional polytope {x | Ax <= b} to its 2D projection {x | A_proj x <= b_proj}\n using a sampling-based approximation method.\n \n Parameters:\n - A (numpy.ndarray): The coefficient matrix for the high-dimensional inequalities.\n - b (numpy.ndarray): The constant terms for the high-dimensional inequalities.\n - grid_size (int): The number of grid points along each dimension in the sampling grid.\n - x_range (tuple): The range (min, max) for both x1 and x2 in the 2D plane.\n \n Returns:\n - A_2D (numpy.ndarray): The coefficient matrix for the 2D inequalities.\n - b_2D (numpy.ndarray): The constant terms for the 2D inequalities.\n \"\"\"\n \n def sample_based_projection_LP(A, b, x1_range, x2_range, grid_size):\n x1_min, x1_max = x1_range\n x2_min, x2_max = x2_range\n x1_vals = np.linspace(x1_min, x1_max, grid_size)\n x2_vals = np.linspace(x2_min, x2_max, grid_size)\n grid_points = np.array([[x1, x2] for x1 in x1_vals for x2 in x2_vals])\n feasible_points = []\n for point in grid_points:\n x_dim = np.zeros(A.shape[1])\n x_dim[:2] = point\n c = np.zeros(A.shape[1] - 2)\n A_ub = A[:, 2:]\n b_ub = b - np.dot(A[:, :2], point)\n res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=(None, None), method='highs')\n if res.success:\n feasible_points.append(point)\n feasible_points = np.array(feasible_points)\n if feasible_points.shape[0] < 3:\n return \"Insufficient feasible points for a 2D polytope.\"\n hull = ConvexHull(feasible_points)\n vertices = hull.points[hull.vertices]\n return vertices\n \n # Step 1: Sample points and find the approximated vertices in 2D\n vertices_approx = sample_based_projection_LP(A, b, x_range, x_range, grid_size)\n \n # Step 2: Find supporting hyperplanes in 2D\n A_2D, b_2D = find_supporting_hyperplanes(vertices_approx)\n \n return A_2D, b_2D"
},
{
"identifier": "partial_minimization_2D",
"path": "src/utils/geometry.py",
"snippet": "def partial_minimization_2D(P, q):\n \"\"\"\n Performs partial minimization over dimensions starting from 3 to obtain a 2D quadratic function.\n \n Parameters:\n - P (numpy.ndarray): The coefficient matrix for the high-dimensional quadratic function.\n - q (numpy.ndarray): The coefficient vector for the high-dimensional quadratic function.\n \n Returns:\n - P_2D (numpy.ndarray): The 2x2 coefficient matrix for the resulting 2D quadratic function.\n - q_2D (numpy.ndarray): The 2D coefficient vector for the resulting 2D quadratic function.\n - c (float): The constant bias term for the resulting 2D quadratic function.\n \"\"\"\n # Decompose P into P11, P12, P21, P22\n P11 = P[:2, :2]\n P12 = P[:2, 2:]\n P21 = P[2:, :2]\n P22 = P[2:, 2:]\n \n # Decompose q into q1 and q2\n q1 = q[:2]\n q2 = q[2:]\n\n # Compute the 2D quadratic function parameters\n P_2D = P11 - P12 @ np.linalg.inv(P22) @ P21\n q_2D = q1 - P12 @ np.linalg.inv(P22) @ q2\n c = -0.5 * q2 @ np.linalg.inv(P22) @ q2\n\n return P_2D, q_2D, c"
}
] | import numpy as np
import sys
import os
import torch
from src.envs.env_creators import sys_param, env_creators
from src.envs.mpc_baseline_parameters import get_mpc_baseline_parameters
from src.modules.qp_unrolled_network import QPUnrolledNetwork
from matplotlib import pyplot as plt
from icecream import ic
from src.utils.torch_utils import bmv
from src.utils.visualization import plot_multiple_2d_polytopes_with_contour
from src.utils.geometry import high_dim_to_2D_sampling, partial_minimization_2D | 8,744 | # %% Specify test case
# Case where MPC is better
x0 = np.array([10., 10., 10., 10.])
x_ref = np.array([19, 19, 2.4, 2.4])
# # Case where MPC fails
# x0 = np.array([ 5.4963946, 10.947876, 1.034516, 18.08066 ])
# x_ref = np.array([7.522859, 8.169776, 1.1107684, 1. ])
# Controlling process noise and parametric uncertainty
noise_level = 0
parametric_uncertainty = False
parameter_randomization_seed = 2
# %% Set up test bench
file_path = os.path.dirname(__file__)
sys.path.append(os.path.join(file_path, "../.."))
# Utilities
def make_obs(x, x_ref, running_mean, running_std, normalize):
raw_obs = torch.tensor(np.concatenate([x, x_ref]), device=device, dtype=torch.float)
if not normalize:
return raw_obs.unsqueeze(0)
else:
return ((raw_obs - running_mean) / running_std).unsqueeze(0)
def get_state_dict(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
model = checkpoint["model"]
prefix = "a2c_network.policy_net."
policy_net_state_dict = {k.lstrip(prefix): v for (k, v) in model.items() if k.startswith(prefix)}
if "running_mean_std.running_mean" in model:
running_mean = model["running_mean_std.running_mean"].to(dtype=torch.float)
running_std = model["running_mean_std.running_var"].sqrt().to(dtype=torch.float)
else:
running_mean = torch.tensor([0.])
running_std = torch.tensor([1.])
return policy_net_state_dict, running_mean, running_std
def rescale_action(action, low=-1., high=8.):
action = action.clamp(-1., 1.)
return low + (high - low) * (action + 1) / 2
t = lambda arr: torch.tensor(arr, device=device, dtype=torch.float).unsqueeze(0)
a = lambda t: t.detach().cpu().numpy()
# Constants and options
n_sys = 4
m_sys = 2
input_size = 8 # 4 for x, 4 for x_ref
n = 2
m = 64
qp_iter = 10
device = "cuda:0"
# MPC module
| # %% Specify test case
# Case where MPC is better
x0 = np.array([10., 10., 10., 10.])
x_ref = np.array([19, 19, 2.4, 2.4])
# # Case where MPC fails
# x0 = np.array([ 5.4963946, 10.947876, 1.034516, 18.08066 ])
# x_ref = np.array([7.522859, 8.169776, 1.1107684, 1. ])
# Controlling process noise and parametric uncertainty
noise_level = 0
parametric_uncertainty = False
parameter_randomization_seed = 2
# %% Set up test bench
file_path = os.path.dirname(__file__)
sys.path.append(os.path.join(file_path, "../.."))
# Utilities
def make_obs(x, x_ref, running_mean, running_std, normalize):
raw_obs = torch.tensor(np.concatenate([x, x_ref]), device=device, dtype=torch.float)
if not normalize:
return raw_obs.unsqueeze(0)
else:
return ((raw_obs - running_mean) / running_std).unsqueeze(0)
def get_state_dict(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
model = checkpoint["model"]
prefix = "a2c_network.policy_net."
policy_net_state_dict = {k.lstrip(prefix): v for (k, v) in model.items() if k.startswith(prefix)}
if "running_mean_std.running_mean" in model:
running_mean = model["running_mean_std.running_mean"].to(dtype=torch.float)
running_std = model["running_mean_std.running_var"].sqrt().to(dtype=torch.float)
else:
running_mean = torch.tensor([0.])
running_std = torch.tensor([1.])
return policy_net_state_dict, running_mean, running_std
def rescale_action(action, low=-1., high=8.):
action = action.clamp(-1., 1.)
return low + (high - low) * (action + 1) / 2
t = lambda arr: torch.tensor(arr, device=device, dtype=torch.float).unsqueeze(0)
a = lambda t: t.detach().cpu().numpy()
# Constants and options
n_sys = 4
m_sys = 2
input_size = 8 # 4 for x, 4 for x_ref
n = 2
m = 64
qp_iter = 10
device = "cuda:0"
# MPC module | mpc_baseline = get_mpc_baseline_parameters("tank", 1) | 1 | 2023-11-28 05:56:22+00:00 | 12k |
armed-gpt/gpt-blazing | gpt_blazing_experiment/model/debug_baichuan2.py | [
{
"identifier": "Role",
"path": "gpt_blazing/model/interface.py",
"snippet": "class Role(Enum):\n SYSTEM = 'system'\n USER = 'user'\n ASSISTANT = 'assistant'\n\n @classmethod\n def from_string(cls, text: str):\n return _TEXT_TO_ROLE[text]"
},
{
"identifier": "Baichuan2Model",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "class Baichuan2Model(torch.nn.Module):\n\n def __init__(self, config: Baichuan2ModelConfig) -> None:\n super().__init__()\n self.config = config\n self.apply_nan_to_num_to_alibi_mask = config.apply_nan_to_num_to_alibi_mask\n\n self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)\n # [num_heads, model_max_length, model_max_length]\n # TODO: dtype issue here.\n self.register_buffer(\n \"alibi_mask\",\n _gen_alibi_mask(config.num_attention_heads, config.model_max_length),\n persistent=False,\n )\n\n self.layers = torch.nn.ModuleList([\n BaichuanLayer(config) for _ in range(config.num_hidden_layers)\n ])\n self.norm = RMSNorm(config.hidden_size, epsilon=config.rms_norm_eps)\n\n self.lm_head = NormHead(config.hidden_size, config.vocab_size)\n\n def half(self):\n self = super().half()\n if self.apply_nan_to_num_to_alibi_mask:\n self.alibi_mask.nan_to_num_()\n return self\n\n def bfloat16(self):\n self = super().bfloat16()\n if self.apply_nan_to_num_to_alibi_mask:\n self.alibi_mask.nan_to_num_()\n return self\n\n def forward(\n self,\n input_pos: torch.Tensor,\n end: int,\n input_ids: torch.Tensor,\n ):\n inputs_embeds = self.embed_tokens(input_ids)\n\n hidden_states = inputs_embeds\n for layer in self.layers:\n hidden_states = layer(\n input_pos=input_pos,\n end=end,\n hidden_states=hidden_states,\n attention_mask=self.alibi_mask,\n )\n hidden_states = self.norm(hidden_states)\n\n logits = self.lm_head(hidden_states)\n\n return logits, hidden_states"
},
{
"identifier": "Baichuan2ModelConfig",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "class Baichuan2ModelConfig:\n hidden_size: int = 5120\n initializer_range: float = 0.02\n intermediate_size: int = 13696\n model_max_length: int = 4096\n model_max_batch_size: int = 1\n num_attention_heads: int = 40\n num_hidden_layers: int = 40\n pad_token_id: int = 0\n rms_norm_eps: float = 1e-06\n vocab_size: int = 125696\n use_original_attn_impl: bool = True\n apply_nan_to_num_to_alibi_mask: bool = False\n debug: bool = False"
},
{
"identifier": "quantize_int8",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def quantize_int8(model: Baichuan2Model, struct_only: bool = False):\n replace_linear_weight_only_int8_per_channel(model, struct_only)\n return model"
},
{
"identifier": "quantize_fp8",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def quantize_fp8(model: Baichuan2Model, struct_only: bool = False):\n replace_linear_weight_only_fp8_per_channel(model, struct_only)\n return model"
},
{
"identifier": "EmptyInitOnDevice",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "class EmptyInitOnDevice(torch.overrides.TorchFunctionMode): # type: ignore\n\n def __init__(self, device=None): # type: ignore\n self.device = device\n\n def __torch_function__(self, func, types, args=(), kwargs=None): # type: ignore\n kwargs = kwargs or {}\n if getattr(func, '__module__', None) == 'torch.nn.init':\n if 'tensor' in kwargs:\n return kwargs['tensor']\n else:\n return args[0]\n device_constructors = torch.utils._device._device_constructors() # type: ignore\n if (\n self.device is not None and func in device_constructors and kwargs.get('device') is None\n ):\n kwargs['device'] = self.device\n return func(*args, **kwargs)"
},
{
"identifier": "load_model",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def load_model(\n model_pt: str,\n config: Optional[Baichuan2ModelConfig] = None,\n int8: bool = True,\n fp8: bool = False,\n):\n if config is None:\n config = Baichuan2ModelConfig()\n\n with EmptyInitOnDevice():\n model = Baichuan2Model(config)\n model.eval()\n model.bfloat16()\n\n if int8:\n model = quantize_int8(model, struct_only=True)\n elif fp8:\n model = quantize_fp8(model, struct_only=True)\n\n model.load_state_dict(torch.load(model_pt, map_location='cpu'))\n\n return model"
},
{
"identifier": "model_prefill_2048",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def model_prefill_2048(\n model: Baichuan2Model,\n input_pos: torch.Tensor,\n input_ids: torch.Tensor,\n):\n return model(input_pos=input_pos, end=2048, input_ids=input_ids)"
},
{
"identifier": "model_prefill_4096",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def model_prefill_4096(\n model: Baichuan2Model,\n input_pos: torch.Tensor,\n input_ids: torch.Tensor,\n):\n return model(input_pos=input_pos, end=4096, input_ids=input_ids)"
},
{
"identifier": "compile_model_prefill",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def compile_model_prefill(func): # type: ignore\n return torch.compile(func, fullgraph=True, dynamic=True)"
},
{
"identifier": "model_decode_one_token_2048",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def model_decode_one_token_2048(\n model: Baichuan2Model,\n input_pos: torch.Tensor,\n input_ids: torch.Tensor,\n):\n return model(input_pos=input_pos, end=2048, input_ids=input_ids)"
},
{
"identifier": "model_decode_one_token_4096",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def model_decode_one_token_4096(\n model: Baichuan2Model,\n input_pos: torch.Tensor,\n input_ids: torch.Tensor,\n):\n return model(input_pos=input_pos, end=4096, input_ids=input_ids)"
},
{
"identifier": "compile_model_decode_one_token",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def compile_model_decode_one_token(func): # type: ignore\n return torch.compile(func, mode=\"reduce-overhead\", fullgraph=True)"
},
{
"identifier": "model_dispatch",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def model_dispatch(\n model: Baichuan2Model,\n func_2048: Any,\n func_4096: Any,\n input_pos: torch.Tensor,\n input_ids: torch.Tensor,\n):\n if func_2048 is None:\n func = func_4096\n else:\n if input_pos[-1] < 2048:\n func = func_2048\n else:\n func = func_4096\n\n # https://github.com/pytorch-labs/gpt-fast/issues/31\n with torch.inference_mode():\n with torch.backends.cuda.sdp_kernel(\n enable_flash=False,\n enable_mem_efficient=False,\n enable_math=True,\n ):\n logits, hidden_states = func(model, input_pos, input_ids)\n logits = logits.detach()\n hidden_states = hidden_states.detach()\n return logits, hidden_states"
},
{
"identifier": "model_get_cache",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def model_get_cache(\n model: Baichuan2Model,\n length: int,\n device: Optional[str] = None,\n):\n attn_cache: List[Tuple[torch.Tensor, torch.Tensor]] = []\n for layer in model.layers:\n k_cache = layer.self_attn.k_cache[:, :, :length].clone()\n v_cache = layer.self_attn.v_cache[:, :, :length].clone()\n if device:\n k_cache = k_cache.to(device, non_blocking=True)\n v_cache = v_cache.to(device, non_blocking=True)\n attn_cache.append((k_cache, v_cache))\n return attn_cache"
},
{
"identifier": "model_set_cache",
"path": "gpt_blazing/model/baichuan2/model.py",
"snippet": "def model_set_cache(\n model: Baichuan2Model,\n length: int,\n attn_cache: Sequence[Tuple[torch.Tensor, torch.Tensor]],\n):\n assert len(model.layers) == len(attn_cache)\n for layer, (k_cache, v_cache) in zip(model.layers, attn_cache):\n layer.self_attn.k_cache[:, :, :length] = k_cache.to(\n layer.self_attn.k_cache.device,\n non_blocking=True,\n )\n layer.self_attn.v_cache[:, :, :length] = v_cache.to(\n layer.self_attn.v_cache.device,\n non_blocking=True,\n )"
},
{
"identifier": "convert_hf_model_to_model",
"path": "gpt_blazing/model/baichuan2/utility.py",
"snippet": "def convert_hf_model_to_model(hf_model: Any):\n import torch\n\n with EmptyInitOnDevice():\n model = Baichuan2Model(Baichuan2ModelConfig(debug=True))\n model.bfloat16()\n\n assert hf_model.dtype == torch.bfloat16 # type: ignore\n baichuan_model = hf_model.model\n\n model.embed_tokens.load_state_dict(baichuan_model.embed_tokens.state_dict())\n for layer_idx, layer in enumerate(model.layers):\n layer.load_state_dict(baichuan_model.layers[layer_idx].state_dict())\n model.norm.load_state_dict(baichuan_model.norm.state_dict())\n model.lm_head.load_state_dict(hf_model.lm_head.state_dict())\n return model"
},
{
"identifier": "Baichuan2Tokenizer",
"path": "gpt_blazing/model/baichuan2/tokenizer.py",
"snippet": "class Baichuan2Tokenizer:\n\n def __init__(self, model_file: str) -> None:\n self.sp_model = SentencePieceProcessor()\n self.sp_model.Load(model_file)\n\n self.eos_token_id = 2\n\n self.user_token_id = 195\n self.assistant_token_id = 196\n\n def tokenize(self, text: str) -> Sequence[int]:\n return self.sp_model.tokenize(text) # type: ignore\n\n def chat_tokenize(self, rounds: Sequence[Tuple[Role, str]]):\n input_ids = []\n\n system = None\n if rounds[0][0] == Role.SYSTEM:\n system = rounds[0][1]\n input_ids.extend(self.tokenize(system))\n rounds = rounds[1:]\n\n num_system_tokens = len(input_ids)\n\n for role, text in rounds:\n if role == Role.USER:\n input_ids.append(self.user_token_id)\n elif role == Role.ASSISTANT:\n input_ids.append(self.assistant_token_id)\n else:\n raise NotImplementedError()\n input_ids.extend(self.tokenize(text))\n\n assert rounds[-1][0] == Role.USER\n input_ids.append(self.assistant_token_id)\n\n return input_ids, system, num_system_tokens\n\n def decode(self, tokens: Sequence[int]) -> str:\n return self.sp_model.decode(tokens) # type: ignore"
},
{
"identifier": "Baichuan2ModelInferenceConfig",
"path": "gpt_blazing/model/baichuan2/inference.py",
"snippet": "class Baichuan2ModelInferenceConfig:\n model_folder: str\n model_config: Baichuan2ModelConfig = attrs.field(factory=Baichuan2ModelConfig)\n quantization_mode: QuantizationMode = QuantizationMode.INT8\n device: str = 'cuda:0'\n cache_capacity: int = 20\n use_dynamic_dispatch: bool = True\n skip_torch_compile: bool = False"
},
{
"identifier": "Baichuan2ModelInference",
"path": "gpt_blazing/model/baichuan2/inference.py",
"snippet": "class Baichuan2ModelInference(ModelInference[Baichuan2ModelInferenceConfig]):\n\n def __init__(\n self,\n config: Baichuan2ModelInferenceConfig,\n func_process_model: Optional[Callable[[Any], None]] = None,\n ):\n super().__init__(config, func_process_model)\n\n self.device = config.device\n self.model_max_length = 4096\n\n # For cache.\n self.cached_system: Optional[str] = None\n self.lru_cache = LruCache(config.cache_capacity)\n self.model_is_loaded = False\n self.model_is_compiled = False\n\n def load_model(self, device: Optional[str] = None) -> None:\n if device:\n self.device = device\n\n logger.info(\n f'Initializing Baichuan2Inference(config={self.config}), '\n f'device={self.device}'\n )\n\n model_fd = io.folder(self.config.model_folder, exists=True)\n\n # TODO: support more modes.\n assert self.config.quantization_mode == QuantizationMode.INT8\n\n model_pt = str(model_fd / f'{self.config.quantization_mode.value}.pt')\n logger.info(f'Loading model_pt={model_pt}')\n self.model = load_model(model_pt=model_pt, config=self.config.model_config, int8=True)\n logger.info('Model loaded.')\n\n tokenizer_model = str(model_fd / 'tokenizer.model')\n logger.info(f'Loading tokenizer_model={tokenizer_model}')\n self.tokenizer = Baichuan2Tokenizer(tokenizer_model)\n logger.info('Tokenizer loaded.')\n\n logger.info(f'Moving model to device={self.device}')\n self.model = self.model.to(self.device)\n\n if self.func_process_model is not None:\n logger.info('func_process_model is set, calling func_process_model(self.model)...')\n self.func_process_model(self.model)\n\n self.model_is_loaded = True\n\n def compile_model(self) -> None:\n assert self.model_is_loaded\n\n if self.config.skip_torch_compile:\n logger.info('skip_torch_compile is set, abort. (only for debugging)')\n\n self.prefill_4096 = model_prefill_4096\n self.decode_one_token_4096 = model_decode_one_token_4096\n\n self.prefill_2048 = None\n self.decode_one_token_2048 = None\n\n self.model_is_compiled = True\n return\n\n logger.info('Compiling model...')\n\n self.prefill_4096 = compile_model_prefill(model_prefill_4096)\n self.decode_one_token_4096 = compile_model_decode_one_token(model_decode_one_token_4096)\n\n self.prefill_2048 = None\n self.decode_one_token_2048 = None\n\n if self.config.use_dynamic_dispatch:\n self.prefill_2048 = compile_model_prefill(model_prefill_2048)\n self.decode_one_token_2048 = compile_model_decode_one_token(model_decode_one_token_2048)\n\n self.trigger_model_compilation()\n\n self.model_is_compiled = True\n\n def model_is_ready(self) -> bool:\n return self.model_is_compiled\n\n def trigger_model_compilation(self):\n import torch._dynamo.config\n import torch._inductor.config\n\n torch._inductor.config.coordinate_descent_tuning = True\n torch._inductor.config.triton.unique_kernel_names = True\n torch._inductor.config.fx_graph_cache = True\n\n logger.info('Trigger prefill compilation.')\n input_ids = torch.tensor([self.tokenizer.tokenize('随便写点什么')], dtype=torch.int)\n input_ids = input_ids.to(self.device)\n\n for offset in [0, 2048]:\n logger.info(f'offset={offset}')\n for idx in range(5):\n input_pos = torch.arange(\n offset,\n offset + int(input_ids.shape[1]),\n device=input_ids.device,\n dtype=torch.int,\n )\n _, num_seconds = timed(\n lambda: model_dispatch(\n model=self.model,\n func_2048=self.prefill_2048,\n func_4096=self.prefill_4096,\n input_pos=input_pos,\n input_ids=input_ids,\n )\n )\n logger.info(f'[{idx}]: prefill compilation: {num_seconds}s.')\n\n logger.info('Trigger decode_one_token compilation.')\n for offset in [0, 2048]:\n logger.info(f'offset={offset}')\n for idx in range(5):\n input_pos = torch.tensor([offset + idx], device=self.device, dtype=torch.int)\n input_ids = torch.tensor(\n [[random.randint(0, self.config.model_config.vocab_size)]],\n dtype=torch.int,\n device=self.device,\n )\n\n _, num_seconds = timed(\n lambda: model_dispatch(\n model=self.model,\n func_2048=self.decode_one_token_2048,\n func_4096=self.decode_one_token_4096,\n input_pos=input_pos,\n input_ids=input_ids,\n )\n )\n logger.info(f'[{idx}]: decode_one_token compilation: {num_seconds}s.')\n\n def get_eos_token(self):\n return self.tokenizer.eos_token_id\n\n def get_model_max_length(self):\n return self.model_max_length\n\n def get_hidden_size(self):\n return self.config.model_config.hidden_size\n\n def model_prefill(self, rounds: Sequence[Tuple[Role, str]], cache_system: bool = False):\n input_ids = None\n system = None\n num_system_tokens = 0\n begin = 0\n initialized = False\n\n if cache_system:\n system = None\n if rounds[0][0] == Role.SYSTEM:\n system = rounds[0][1]\n\n if system:\n cache = self.lru_cache.get(system)\n if cache is not None:\n num_system_tokens, attn_cache = cache\n # Cache hit.\n if system != self.cached_system:\n # Need to move the cache to model.\n model_set_cache(self.model, num_system_tokens, attn_cache)\n self.cached_system = system\n\n # Skip tokenizing system.\n input_ids, _, _num_system_tokens = self.tokenizer.chat_tokenize(rounds[1:])\n assert _num_system_tokens == 0\n begin = num_system_tokens\n\n initialized = True\n\n if not initialized:\n input_ids, system, num_system_tokens = self.tokenizer.chat_tokenize(rounds)\n # Invalidate the model cache.\n self.cached_system = None\n\n assert input_ids\n\n end = begin + len(input_ids)\n if end >= self.model_max_length:\n return None\n\n input_pos = torch.arange(begin, end, device=self.device, dtype=torch.int)\n input_ids = torch.tensor([input_ids], dtype=torch.int, device=self.device)\n logits, hidden_states = model_dispatch(\n model=self.model,\n func_2048=self.prefill_2048,\n func_4096=self.prefill_4096,\n input_pos=input_pos,\n input_ids=input_ids,\n )\n\n if cache_system and system and self.cached_system is None:\n # Add to cache.\n self.cached_system = system\n self.lru_cache.set(\n system,\n (num_system_tokens, model_get_cache(self.model, num_system_tokens)),\n )\n\n return logits, hidden_states, end\n\n def model_decode_one_token(self, input_pos: torch.Tensor, input_ids: torch.Tensor):\n logits, hidden_states = model_dispatch(\n model=self.model,\n func_2048=self.decode_one_token_2048,\n func_4096=self.decode_one_token_4096,\n input_pos=input_pos,\n input_ids=input_ids,\n )\n return logits, hidden_states\n\n def tokenizer_decode(self, tokens: Sequence[int]):\n return self.tokenizer.decode(tokens)"
}
] | from typing import Tuple, Sequence, Optional
from datetime import datetime
from gpt_blazing.model.interface import Role
from gpt_blazing.model.baichuan2.model import (
Baichuan2Model,
Baichuan2ModelConfig,
quantize_int8,
quantize_fp8,
EmptyInitOnDevice,
load_model,
model_prefill_2048,
model_prefill_4096,
compile_model_prefill,
model_decode_one_token_2048,
model_decode_one_token_4096,
compile_model_decode_one_token,
model_dispatch,
model_get_cache,
model_set_cache,
)
from gpt_blazing.model.baichuan2.utility import convert_hf_model_to_model
from gpt_blazing.model.baichuan2.tokenizer import Baichuan2Tokenizer
from gpt_blazing.model.baichuan2.inference import (
Baichuan2ModelInferenceConfig,
Baichuan2ModelInference,
)
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers.generation.utils import GenerationConfig
from transformers import AutoTokenizer
from transformers.generation.utils import GenerationConfig
from transformers import AutoTokenizer
from transformers.generation.utils import GenerationConfig
import torch
import torch.nn.functional as F
import sentencepiece as spm
import iolite as io
import os
import torch._dynamo.config
import torch._inductor.config
import random
import torch._dynamo.config
import torch._inductor.config
import os
import random
import os | 7,711 |
def get_top_p_sorted_indices(logits: torch.Tensor, top_p: float = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
mask = cumulative_probs <= top_p
mask[..., 1:] = mask[..., :-1].clone()
mask[..., 0] = True
sorted_indices[~mask] = -1
return sorted_indices
def compare_logits(file0: str, file1: str):
'''
# 1.
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/hf_logits.pt"
# 0.9942
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/fp8_logits.pt"
# 0.9942
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/int8_logits.pt"
# 0.9945
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/int8_hf_logits.pt"
# 0.9939
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/compiled_logits.pt"
# 0.9939
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/compiled_int8_logits.pt"
# 0.9938
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/compiled_int8_logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/int8_hf_logits.pt"
'''
logits0 = torch.load(file0, map_location='cuda:0')
logits1 = torch.load(file1, map_location='cuda:0')
tpsi0 = get_top_p_sorted_indices(logits0)
tpsi1 = get_top_p_sorted_indices(logits1)
rank = tpsi0 == tpsi1
r = rank.sum() / rank.numel()
print(r)
def demo_func(x: torch.Tensor, y: torch.Tensor, begin: int, end: int):
return x[:, begin:end] + y[:, begin:end]
def debug_compile():
func = torch.compile(demo_func, mode="reduce-overhead", fullgraph=True)
x = torch.rand((1, 20, 128))
y = torch.rand((1, 20, 128))
print(func(x, y, 0, 10))
# triggers Recompiling!
print(func(x, y, 0, 15))
print(func(x, y, 1, 2))
def timed(fn): # type: ignore
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record() # type: ignore
result = fn()
end.record() # type: ignore
torch.cuda.synchronize()
return result, start.elapsed_time(end) / 1000
def debug_greedy_decoding_performance():
print('Loading...')
model = load_model(
model_pt=str(
io.file("$GPT_BLAZING_DATA/model/baichuan2-13b-chat/int8.pt", expandvars=True)
),
int8=True,
config=Baichuan2ModelConfig(debug=False),
)
model.to('cuda:0')
print('Compiling...')
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True
input_ids = generate_debug_input_ids()
input_ids = input_ids.to('cuda:0')
prefill_2048 = compile_model_prefill(model_prefill_2048)
prefill_4096 = compile_model_prefill(model_prefill_4096)
for offset in [0, 2048]:
for _ in range(3):
input_pos = torch.arange(
offset,
offset + int(input_ids.shape[1]),
device=input_ids.device,
dtype=torch.int,
)
print(
'prefill compiling time:',
timed(
|
BAICHUAN2_13B_MODEL_FOLDER = str(
io.folder(
'$GPT_BLAZING_DATA/base/Baichuan2-13B-Chat',
expandvars=True,
)
)
def load_hf_model(
model_folder: str = BAICHUAN2_13B_MODEL_FOLDER,
device_map: Optional[str] = None,
):
return AutoModelForCausalLM.from_pretrained(
model_folder,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map=device_map,
)
def eval_hf_model():
with EmptyInitOnDevice():
model = load_hf_model()
model.generation_config = GenerationConfig.from_pretrained(BAICHUAN2_13B_MODEL_FOLDER)
tokenizer = AutoTokenizer.from_pretrained(
BAICHUAN2_13B_MODEL_FOLDER,
use_fast=False,
trust_remote_code=True,
)
model.generation_config.do_sample = False
# pip install bitsandbytes scipy
model = model.quantize(8).to('cuda:0')
print('Warmup')
with torch.inference_mode():
print(model.chat(tokenizer, [{"role": "user", "content": '你好'}]))
print('Running...')
decode_dt_begin = datetime.now()
with torch.inference_mode():
response = model.chat(
tokenizer,
[{
"role": "user",
"content": "帮我写一篇与A股主题相关的作文,800字左右"
}],
)
decode_dt_end = datetime.now()
decode_dt_delta = decode_dt_end - decode_dt_begin
print('decode_dt_delta:', decode_dt_delta)
output_ids = tokenizer.encode(response, add_special_tokens=False)
print('tok/s:', (len(output_ids) + 1) / decode_dt_delta.total_seconds())
print(response)
def run_hf_demo():
with EmptyInitOnDevice():
model = load_hf_model()
model.generation_config = GenerationConfig.from_pretrained(BAICHUAN2_13B_MODEL_FOLDER)
tokenizer = AutoTokenizer.from_pretrained(
BAICHUAN2_13B_MODEL_FOLDER,
use_fast=False,
trust_remote_code=True,
)
model.generation_config.do_sample = False
# pip install bitsandbytes scipy
model = model.quantize(8).to('cuda:0')
messages = []
while True:
content = input('[USER]: ').strip()
if content == 'reset':
messages = []
continue
messages.append({'role': 'user', 'content': content})
response = model.chat(tokenizer, messages)
print(f'[ASSISTANT]: {response}')
messages.append({'role': 'assistant', 'content': response})
def build_chat_input_for_test(
generation_config, # type: ignore
tokenizer, # type: ignore
messages: Sequence[dict],
max_new_tokens: int = 0
):
def _parse_messages(messages, split_role="user"): # type: ignore
system, rounds = "", []
round = []
for i, message in enumerate(messages):
if message["role"] == "system":
assert i == 0
system = message["content"]
continue
if message["role"] == split_role and round:
rounds.append(round)
round = []
round.append(message)
if round:
rounds.append(round)
return system, rounds
max_new_tokens = max_new_tokens or generation_config.max_new_tokens
max_input_tokens = 4096 - max_new_tokens
system, rounds = _parse_messages(messages, split_role="user")
system_tokens = tokenizer.encode(system)
max_history_tokens = max_input_tokens - len(system_tokens)
history_tokens = []
for round in rounds[::-1]:
round_tokens = []
for message in round:
if message["role"] == "user":
round_tokens.append(generation_config.user_token_id)
else:
round_tokens.append(generation_config.assistant_token_id)
round_tokens.extend(tokenizer.encode(message["content"]))
if len(history_tokens
) == 0 or len(history_tokens) + len(round_tokens) <= max_history_tokens:
history_tokens = round_tokens + history_tokens # concat left
if len(history_tokens) < max_history_tokens:
continue
break
input_tokens = system_tokens + history_tokens
if messages[-1]["role"] != "assistant":
input_tokens.append(generation_config.assistant_token_id)
input_tokens = input_tokens[-max_input_tokens:] # truncate left
return input_tokens
def compare_tokenizers():
hf_tokenizer = AutoTokenizer.from_pretrained(
BAICHUAN2_13B_MODEL_FOLDER,
use_fast=False,
trust_remote_code=True,
)
hf_generation_config = GenerationConfig.from_pretrained(BAICHUAN2_13B_MODEL_FOLDER)
tokenizer = Baichuan2Tokenizer(f'{BAICHUAN2_13B_MODEL_FOLDER}/tokenizer.model')
def _compare(messages): # type: ignore
hf_input_ids = build_chat_input_for_test(
generation_config=hf_generation_config,
tokenizer=hf_tokenizer,
messages=messages,
)
input_ids = tokenizer.chat_tokenize([
(Role.from_string(message['role']), message['content']) for message in messages
])[0]
assert hf_input_ids == input_ids
_compare([
{
'role': 'user',
'content': '测试一下'
},
])
_compare([
{
'role': 'system',
'content': '这个是 system'
},
{
'role': 'user',
'content': '测试一下'
},
])
_compare([
{
'role': 'user',
'content': 'foo'
},
{
'role': 'assistant',
'content': 'bar'
},
{
'role': 'user',
'content': 'baz'
},
])
_compare([
{
'role': 'system',
'content': '这个是 system'
},
{
'role': 'user',
'content': 'foo'
},
{
'role': 'assistant',
'content': 'bar'
},
{
'role': 'user',
'content': 'baz'
},
])
def load_and_convert_to_model(model_folder: str = BAICHUAN2_13B_MODEL_FOLDER):
with EmptyInitOnDevice():
hf_model = load_hf_model(model_folder)
return convert_hf_model_to_model(hf_model)
def generate_debug_input_ids(model_folder: str = BAICHUAN2_13B_MODEL_FOLDER):
sp_model = spm.SentencePieceProcessor()
sp_model.Load(f'{model_folder}/tokenizer.model')
input_ids = sp_model.tokenize('测试一下。') # type: ignore
input_ids = torch.tensor([[195, *input_ids]], dtype=torch.int)
return input_ids
def move_model_to_devices(
model: Baichuan2Model,
device_and_layer_begin_pairs: Sequence[Tuple[str, int]],
):
assert device_and_layer_begin_pairs
device0 = device_and_layer_begin_pairs[0][0]
model.embed_tokens.to(device0)
model.alibi_mask = model.alibi_mask.to(device0)
for pair_idx, (device, layer_begin) in enumerate(device_and_layer_begin_pairs):
if pair_idx + 1 < len(device_and_layer_begin_pairs):
layer_end = device_and_layer_begin_pairs[pair_idx + 1][1]
else:
layer_end = len(model.layers)
for layer_idx in range(layer_begin, layer_end):
model.layers[layer_idx].to(device)
device1 = device_and_layer_begin_pairs[-1][0]
model.norm.to(device1)
model.lm_head.to(device1)
def save_model_logits(
output_file: str,
model_folder: str = BAICHUAN2_13B_MODEL_FOLDER,
compile: bool = False,
int8: bool = False,
fp8: bool = False,
):
'''
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/logits.pt"
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/int8_logits.pt" \
--int8
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/fp8_logits.pt" \
--fp8
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/compiled_logits.pt" \
--compile
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/compiled_int8_logits.pt" \
--compile \
--int8
# 'fp8e4nv data type is not supported on CUDA arch < 89'
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/compiled_fp8_logits.pt" \
--compile \
--fp8
'''
print('Loading...')
model = load_and_convert_to_model(model_folder)
if int8:
print('Quantizing (int8) ...')
model = quantize_int8(model)
elif fp8:
print('Quantizing (fp8) ...')
model = quantize_fp8(model)
if not (int8 or fp8):
move_model_to_devices(model, [('cuda:0', 0), ('cuda:1', 20)]) # type: ignore
else:
model = model.to('cuda:0') # type: ignore
if compile:
print('Compiling...')
model = torch.compile(model, mode="reduce-overhead", fullgraph=True)
input_ids = generate_debug_input_ids(model_folder)
input_ids = input_ids.to('cuda:0')
input_pos = torch.arange(0, input_ids.shape[1], device=input_ids.device)
with torch.inference_mode():
with torch.backends.cuda.sdp_kernel(
enable_flash=False,
enable_mem_efficient=False,
enable_math=True,
):
logits, _ = model(input_pos=input_pos, end=2048, input_ids=input_ids)
print('Saving to', output_file)
torch.save(logits, output_file)
def save_hf_model_logits(
output_file: str,
model_folder: str = BAICHUAN2_13B_MODEL_FOLDER,
int8: bool = False,
):
'''
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_hf_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/hf_logits.pt"
fib gpt_blazing_experiment/model/debug_baichuan2.py:save_hf_model_logits \
--output_file="$GPT_BLAZING_DATA/model/baichuan2/int8_hf_logits.pt" \
--int8
'''
print('Loading...')
with EmptyInitOnDevice():
if not int8:
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
hf_model = load_hf_model(model_folder, device_map='auto')
else:
hf_model = load_hf_model(model_folder)
hf_model = hf_model.quantize(8).to('cuda:0') # type: ignore
input_ids = generate_debug_input_ids(model_folder)
input_ids = input_ids.to('cuda:0')
with torch.inference_mode():
output = hf_model.forward(input_ids=input_ids)
print('Saving to', output_file)
torch.save(output.logits, output_file)
def get_top_p_sorted_indices(logits: torch.Tensor, top_p: float = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
mask = cumulative_probs <= top_p
mask[..., 1:] = mask[..., :-1].clone()
mask[..., 0] = True
sorted_indices[~mask] = -1
return sorted_indices
def compare_logits(file0: str, file1: str):
'''
# 1.
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/hf_logits.pt"
# 0.9942
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/fp8_logits.pt"
# 0.9942
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/int8_logits.pt"
# 0.9945
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/int8_hf_logits.pt"
# 0.9939
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/compiled_logits.pt"
# 0.9939
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/compiled_int8_logits.pt"
# 0.9938
fib gpt_blazing_experiment/model/debug_baichuan2.py:compare_logits \
--file0="$GPT_BLAZING_DATA/model/baichuan2/compiled_int8_logits.pt" \
--file1="$GPT_BLAZING_DATA/model/baichuan2/int8_hf_logits.pt"
'''
logits0 = torch.load(file0, map_location='cuda:0')
logits1 = torch.load(file1, map_location='cuda:0')
tpsi0 = get_top_p_sorted_indices(logits0)
tpsi1 = get_top_p_sorted_indices(logits1)
rank = tpsi0 == tpsi1
r = rank.sum() / rank.numel()
print(r)
def demo_func(x: torch.Tensor, y: torch.Tensor, begin: int, end: int):
return x[:, begin:end] + y[:, begin:end]
def debug_compile():
func = torch.compile(demo_func, mode="reduce-overhead", fullgraph=True)
x = torch.rand((1, 20, 128))
y = torch.rand((1, 20, 128))
print(func(x, y, 0, 10))
# triggers Recompiling!
print(func(x, y, 0, 15))
print(func(x, y, 1, 2))
def timed(fn): # type: ignore
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record() # type: ignore
result = fn()
end.record() # type: ignore
torch.cuda.synchronize()
return result, start.elapsed_time(end) / 1000
def debug_greedy_decoding_performance():
print('Loading...')
model = load_model(
model_pt=str(
io.file("$GPT_BLAZING_DATA/model/baichuan2-13b-chat/int8.pt", expandvars=True)
),
int8=True,
config=Baichuan2ModelConfig(debug=False),
)
model.to('cuda:0')
print('Compiling...')
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True
input_ids = generate_debug_input_ids()
input_ids = input_ids.to('cuda:0')
prefill_2048 = compile_model_prefill(model_prefill_2048)
prefill_4096 = compile_model_prefill(model_prefill_4096)
for offset in [0, 2048]:
for _ in range(3):
input_pos = torch.arange(
offset,
offset + int(input_ids.shape[1]),
device=input_ids.device,
dtype=torch.int,
)
print(
'prefill compiling time:',
timed( | lambda: model_dispatch( | 13 | 2023-12-03 09:26:20+00:00 | 12k |
tmllab/Machine_Vision_Therapy | model/blip2/modeling_blip_2.py | [
{
"identifier": "Blip2Config",
"path": "model/blip2/configuration_blip_2.py",
"snippet": "class Blip2Config(PretrainedConfig):\n r\"\"\"\n [`Blip2Config`] is the configuration class to store the configuration of a [`Blip2ForConditionalGeneration`]. It is\n used to instantiate a BLIP-2 model according to the specified arguments, defining the vision model, Q-Former model\n and language model configs. Instantiating a configuration with the defaults will yield a similar configuration to\n that of the BLIP-2 [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`Blip2VisionConfig`].\n qformer_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`Blip2QFormerConfig`].\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize any [`PretrainedConfig`].\n num_query_tokens (`int`, *optional*, defaults to 32):\n The number of query tokens passed through the Transformer.\n\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import (\n ... Blip2VisionConfig,\n ... Blip2QFormerConfig,\n ... OPTConfig,\n ... Blip2Config,\n ... Blip2ForConditionalGeneration,\n ... )\n\n >>> # Initializing a Blip2Config with Salesforce/blip2-opt-2.7b style configuration\n >>> configuration = Blip2Config()\n\n >>> # Initializing a Blip2ForConditionalGeneration (with random weights) from the Salesforce/blip2-opt-2.7b style configuration\n >>> model = Blip2ForConditionalGeneration(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a Blip2Config from a Blip2VisionConfig, Blip2QFormerConfig and any PretrainedConfig\n\n >>> # Initializing BLIP-2 vision, BLIP-2 Q-Former and language model configurations\n >>> vision_config = Blip2VisionConfig()\n >>> qformer_config = Blip2QFormerConfig()\n >>> text_config = OPTConfig()\n\n >>> config = Blip2Config.from_text_vision_configs(vision_config, qformer_config, text_config)\n ```\"\"\"\n\n model_type = \"blip-2\"\n is_composition = True\n\n def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs):\n super().__init__(**kwargs)\n\n if vision_config is None:\n vision_config = {}\n logger.info(\"vision_config is None. initializing the Blip2VisionConfig with default values.\")\n\n if qformer_config is None:\n qformer_config = {}\n logger.info(\"qformer_config is None. Initializing the Blip2QFormerConfig with default values.\")\n\n if text_config is None:\n text_config = {}\n logger.info(\"text_config is None. Initializing the text config with default values (`OPTConfig`).\")\n\n self.vision_config = Blip2VisionConfig(**vision_config)\n self.qformer_config = Blip2QFormerConfig(**qformer_config)\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"opt\"\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n\n self.num_query_tokens = num_query_tokens\n self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n self.initializer_factor = 1.0\n self.initializer_range = 0.02\n\n @classmethod\n def from_vision_qformer_text_configs(\n cls,\n vision_config: Blip2VisionConfig,\n qformer_config: Blip2QFormerConfig,\n text_config: PretrainedConfig,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a [`Blip2Config`] (or a derived class) from a BLIP-2 vision model, Q-Former and language model\n configurations.\n\n Returns:\n [`Blip2Config`]: An instance of a configuration object\n \"\"\"\n\n return cls(\n vision_config=vision_config.to_dict(),\n qformer_config=qformer_config.to_dict(),\n text_config=text_config.to_dict(),\n **kwargs,\n )\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"qformer_config\"] = self.qformer_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output"
},
{
"identifier": "Blip2QFormerConfig",
"path": "model/blip2/configuration_blip_2.py",
"snippet": "class Blip2QFormerConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`Blip2QFormerModel`]. It is used to instantiate a\n BLIP-2 Querying Transformer (Q-Former) model according to the specified arguments, defining the model architecture.\n Instantiating a configuration with the defaults will yield a similar configuration to that of the BLIP-2\n [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture. Configuration objects\n inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from\n [`PretrainedConfig`] for more information.\n\n Note that [`Blip2QFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.\n\n Args:\n vocab_size (`int`, *optional*, defaults to 30522):\n Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by\n the `inputs_ids` passed when calling the model.\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `Callable`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n max_position_embeddings (`int`, *optional*, defaults to 512):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n position_embedding_type (`str`, *optional*, defaults to `\"absolute\"`):\n Type of position embedding. Choose one of `\"absolute\"`, `\"relative_key\"`, `\"relative_key_query\"`. For\n positional embeddings use `\"absolute\"`. For more information on `\"relative_key\"`, please refer to\n [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).\n For more information on `\"relative_key_query\"`, please refer to *Method 4* in [Improve Transformer Models\n with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).\n classifier_dropout (`float`, *optional*):\n The dropout ratio for the classification head.\n cross_attention_frequency (`int`, *optional*, defaults to 2):\n The frequency of adding cross-attention to the Transformer layers.\n encoder_hidden_size (`int`, *optional*, defaults to 1408):\n The hidden size of the hidden states for cross-attention.\n\n Examples:\n\n ```python\n >>> from transformers import Blip2QFormerConfig, Blip2QFormerModel\n\n >>> # Initializing a BLIP-2 Salesforce/blip2-opt-2.7b style configuration\n >>> configuration = Blip2QFormerConfig()\n\n >>> # Initializing a model (with random weights) from the Salesforce/blip2-opt-2.7b style configuration\n >>> model = Blip2QFormerModel(configuration)\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"blip_2_qformer\"\n\n def __init__(\n self,\n vocab_size=30522,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n pad_token_id=0,\n position_embedding_type=\"absolute\",\n classifier_dropout=None,\n cross_attention_frequency=2,\n encoder_hidden_size=1408,\n **kwargs,\n ):\n super().__init__(pad_token_id=pad_token_id, **kwargs)\n\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.position_embedding_type = position_embedding_type\n self.classifier_dropout = classifier_dropout\n self.cross_attention_frequency = cross_attention_frequency\n self.encoder_hidden_size = encoder_hidden_size\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the qformer config dict if we are loading from Blip2Config\n if config_dict.get(\"model_type\") == \"blip-2\":\n config_dict = config_dict[\"qformer_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
},
{
"identifier": "Blip2VisionConfig",
"path": "model/blip2/configuration_blip_2.py",
"snippet": "class Blip2VisionConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`Blip2VisionModel`]. It is used to instantiate a\n BLIP-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a\n configuration defaults will yield a similar configuration to that of the BLIP-2\n [Salesforce/blip2-opt-2.7b](https://huggingface.co/Salesforce/blip2-opt-2.7b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 1408):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 6144):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 39):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 14):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` ``\"gelu\"` are supported. layer_norm_eps (`float`, *optional*, defaults\n to 1e-5): The epsilon used by the layer normalization layers.\n dropout (`float`, *optional*, defaults to 0.0):\n The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float``, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to the queries and values in the self-attention layers.\n\n Example:\n\n ```python\n >>> from transformers import Blip2VisionConfig, Blip2VisionModel\n\n >>> # Initializing a Blip2VisionConfig with Salesforce/blip2-opt-2.7b style configuration\n >>> configuration = Blip2VisionConfig()\n\n >>> # Initializing a Blip2VisionModel (with random weights) from the Salesforce/blip2-opt-2.7b style configuration\n >>> model = Blip2VisionModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n\n model_type = \"blip_2_vision_model\"\n\n def __init__(\n self,\n hidden_size=1408,\n intermediate_size=6144,\n projection_dim=512,\n num_hidden_layers=39,\n num_attention_heads=16,\n num_channels=3,\n image_size=224,\n patch_size=14,\n hidden_act=\"gelu\",\n layer_norm_eps=0.00001,\n dropout=0.0,\n attention_dropout=0.0,\n initializer_range=1e-10,\n initializer_factor=1.0,\n qkv_bias=True,\n **kwargs,\n ):\n super().__init__(**kwargs)\n\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.dropout = dropout\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.qkv_bias = qkv_bias\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the vision config dict if we are loading from Blip2Config\n if config_dict.get(\"model_type\") == \"blip-2\":\n config_dict = config_dict[\"vision_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)"
},
{
"identifier": "Blip2Processor",
"path": "model/blip2/processing_blip_2.py",
"snippet": "class Blip2Processor(ProcessorMixin):\n r\"\"\"\n Constructs a BLIP-2 processor which wraps a BLIP image processor and an OPT/T5 tokenizer into a single processor.\n\n [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the docstring\n of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.\n\n Args:\n image_processor (`BlipImageProcessor`):\n An instance of [`BlipImageProcessor`]. The image processor is a required input.\n tokenizer (`AutoTokenizer`):\n An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.\n \"\"\"\n attributes = [\"image_processor\", \"tokenizer\"]\n image_processor_class = \"BlipImageProcessor\"\n tokenizer_class = \"AutoTokenizer\"\n\n # Copied from transformers.models.blip.processing_blip.BlipProcessor.__init__\n def __init__(self, image_processor, tokenizer):\n tokenizer.return_token_type_ids = False\n super().__init__(image_processor, tokenizer)\n self.current_processor = self.image_processor\n\n # Copied from transformers.models.blip.processing_blip.BlipProcessor.__call__\n def __call__(\n self,\n images=None,\n text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,\n add_special_tokens: bool = True,\n padding: Union[bool, str, PaddingStrategy] = False,\n truncation: Union[bool, str, TruncationStrategy] = None,\n max_length: Optional[int] = None,\n stride: int = 0,\n pad_to_multiple_of: Optional[int] = None,\n return_attention_mask: Optional[bool] = None,\n return_overflowing_tokens: bool = False,\n return_special_tokens_mask: bool = False,\n return_offsets_mapping: bool = False,\n return_token_type_ids: bool = False,\n return_length: bool = False,\n verbose: bool = True,\n return_tensors: Optional[Union[str, TensorType]] = None,\n **kwargs,\n ) -> BatchEncoding:\n \"\"\"\n This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and\n [`BertTokenizerFast.__call__`] to prepare text for the model.\n\n Please refer to the docstring of the above two methods for more information.\n \"\"\"\n if images is None and text is None:\n raise ValueError(\"You have to specify either images or text.\")\n\n # Get only text\n if images is None:\n self.current_processor = self.tokenizer\n text_encoding = self.tokenizer(\n text=text,\n add_special_tokens=add_special_tokens,\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n stride=stride,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n return_overflowing_tokens=return_overflowing_tokens,\n return_special_tokens_mask=return_special_tokens_mask,\n return_offsets_mapping=return_offsets_mapping,\n return_token_type_ids=return_token_type_ids,\n return_length=return_length,\n verbose=verbose,\n return_tensors=return_tensors,\n **kwargs,\n )\n return text_encoding\n\n # add pixel_values\n encoding_image_processor = self.image_processor(images, return_tensors=return_tensors)\n\n if text is not None:\n text_encoding = self.tokenizer(\n text=text,\n add_special_tokens=add_special_tokens,\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n stride=stride,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n return_overflowing_tokens=return_overflowing_tokens,\n return_special_tokens_mask=return_special_tokens_mask,\n return_offsets_mapping=return_offsets_mapping,\n return_token_type_ids=return_token_type_ids,\n return_length=return_length,\n verbose=verbose,\n return_tensors=return_tensors,\n **kwargs,\n )\n else:\n text_encoding = None\n\n if text_encoding is not None:\n encoding_image_processor.update(text_encoding)\n\n return encoding_image_processor\n\n # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer\n to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)\n\n @property\n # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n image_processor_input_names = self.image_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))"
}
] | import math
import torch
import torch.utils.checkpoint
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPooling,
BaseModelOutputWithPoolingAndCrossAttentions,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.auto.modeling_auto import AutoModelForCausalLM, AutoModelForSeq2SeqLM
from model.blip2.configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
from model.blip2.processing_blip_2 import Blip2Processor | 7,975 | return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def feed_forward_chunk_query(self, attention_output):
intermediate_output = self.intermediate_query(attention_output)
layer_output = self.output_query(intermediate_output, attention_output)
return layer_output
class Blip2QFormerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
query_length=0,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions, query_length)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
query_length,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if layer_module.has_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class Blip2QFormerModel(Blip2PreTrainedModel):
"""
Querying Transformer (Q-Former), used in BLIP-2.
"""
| # coding=utf-8
# Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BLIP-2 model."""
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "Salesforce/blip2-opt-2.7b"
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"Salesforce/blip2-opt-2.7b",
# See all BLIP-2 models at https://huggingface.co/models?filter=blip
]
@dataclass
class Blip2ForConditionalGenerationModelOutput(ModelOutput):
"""
Class defining the outputs of [`Blip2ForConditionalGeneration`].
Args:
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
Outputs of the Q-Former (Querying Transformer).
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
"""
loss: Optional[Tuple[torch.FloatTensor]] = None
logits: Optional[Tuple[torch.FloatTensor]] = None
vision_outputs: Optional[torch.FloatTensor] = None
qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
def to_tuple(self) -> Tuple[Any]:
return tuple(
self[k]
if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"]
else getattr(self, k).to_tuple()
for k in self.keys()
)
# Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2
class Blip2VisionEmbeddings(nn.Module):
def __init__(self, config: Blip2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(
torch.randn(1, 1, self.embed_dim),
)
self.patch_embedding = nn.Conv2d(
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
return embeddings
class Blip2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = nn.Dropout(config.attention_dropout)
# small tweak here compared to CLIP, no bias here
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
if config.qkv_bias:
q_bias = nn.Parameter(torch.zeros(self.embed_dim))
v_bias = nn.Parameter(torch.zeros(self.embed_dim))
else:
q_bias = None
v_bias = None
if q_bias is not None:
qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
self.qkv.bias = nn.Parameter(qkv_bias)
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
2, 0, 3, 1, 4
)
query_states, key_states, value_states = (
mixed_qkv[0],
mixed_qkv[1],
mixed_qkv[2],
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.projection(context_layer)
outputs = (output, attention_probs) if output_attentions else (output, None)
return outputs
# Copied from transformers.models.blip.modeling_blip.BlipMLP
class Blip2MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->Blip2
class Blip2EncoderLayer(nn.Module):
def __init__(self, config: Blip2Config):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = Blip2Attention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = Blip2MLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
head_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class Blip2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = Blip2Config
base_model_prefix = "blip"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [
r"position_ids",
r"language_model.encoder.embed_tokens.weight",
r"language_model.decoder.embed_tokens.weight",
]
_no_split_modules = ["Blip2Attention", "T5Block", "OPTDecoderLayer"]
_keep_in_fp32_modules = ["wo"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_range
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=factor)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.zero_()
if isinstance(module, Blip2VisionEmbeddings):
if hasattr(self.config, "vision_config"):
factor = self.config.vision_config.initializer_range
nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, Blip2Encoder):
module.gradient_checkpointing = value
BLIP_2_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`Blip2Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
BLIP_2_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
BLIP_2_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for
details.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be
provided to serve as text prompt, which the language model can continue.
Indices can be obtained using [`Blip2Processor`]. See [`Blip2Processor.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary of the language model. Only relevant in case an
encoder-decoder language model (like T5) is used.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Only relevant in case an encoder-decoder language model (like T5) is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
# Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->Blip2
class Blip2Encoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Blip2EncoderLayer`].
Args:
config (`Blip2Config`):
The corresponding vision configuration for the `Blip2Encoder`.
"""
def __init__(self, config: Blip2Config):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Blip2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2
class Blip2VisionModel(Blip2PreTrainedModel):
main_input_name = "pixel_values"
config_class = Blip2VisionConfig
def __init__(self, config: Blip2VisionConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.embeddings = Blip2VisionEmbeddings(config)
self.encoder = Blip2Encoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.post_init()
@add_start_docstrings_to_model_forward(BLIP_2_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.post_layernorm(last_hidden_state)
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.embeddings
class Blip2QFormerMultiHeadAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention heads (%d)"
% (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
mixed_query_layer = self.query(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Blip2QFormer
class Blip2QFormerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Blip2QFormerAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.attention = Blip2QFormerMultiHeadAttention(config, is_cross_attention)
self.output = Blip2QFormerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
self_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Blip2QFormer
class Blip2QFormerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Blip2QFormer
class Blip2QFormerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Blip2QFormerLayer(nn.Module):
def __init__(self, config, layer_idx):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Blip2QFormerAttention(config)
self.layer_idx = layer_idx
if layer_idx % config.cross_attention_frequency == 0:
self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True)
self.has_cross_attention = True
else:
self.has_cross_attention = False
self.intermediate_query = Blip2QFormerIntermediate(config)
self.output_query = Blip2QFormerOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
query_length=0,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
if query_length > 0:
query_attention_output = attention_output[:, :query_length, :]
if self.has_cross_attention:
if encoder_hidden_states is None:
raise ValueError("encoder_hidden_states must be given for cross-attention layers")
cross_attention_outputs = self.crossattention(
query_attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions=output_attentions,
)
query_attention_output = cross_attention_outputs[0]
# add cross attentions if we output attention weights
outputs = outputs + cross_attention_outputs[1:-1]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk_query,
self.chunk_size_feed_forward,
self.seq_len_dim,
query_attention_output,
)
if attention_output.shape[1] > query_length:
layer_output_text = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[:, query_length:, :],
)
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
else:
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def feed_forward_chunk_query(self, attention_output):
intermediate_output = self.intermediate_query(attention_output)
layer_output = self.output_query(intermediate_output, attention_output)
return layer_output
class Blip2QFormerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
query_length=0,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
next_decoder_cache = () if use_cache else None
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions, query_length)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
query_length,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if layer_module.has_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class Blip2QFormerModel(Blip2PreTrainedModel):
"""
Querying Transformer (Q-Former), used in BLIP-2.
"""
| def __init__(self, config: Blip2QFormerConfig): | 1 | 2023-12-04 00:14:18+00:00 | 12k |
hubo0417/EasyGC | web.py | [
{
"identifier": "Pipeline_Item",
"path": "utils/pipeline.py",
"snippet": "class Pipeline_Item:\n # 类的实例\n Obj: object = None\n # 方法名\n Method: str = None\n # 方法参数\n Params: Dict[str, Any] = None\n # 是否使用上一个方法的结果作为参数\n Is_Use_Pre_Result: bool = False,\n # 是否在节点执行完之后暂停管道\n Halt: bool = False\n # 当前方法返回结果为None时执行的后备方法\n Standby_Method = None\n\n def __init__(self,\n Obj: object,\n Method: str,\n Is_Use_Pre_Result: bool = False,\n Params: Dict[str, Any] = None,\n Halt: bool = False,\n Standby_Method=None):\n self.Obj = Obj\n self.Method = Method\n self.Is_Use_Pre_Result = Is_Use_Pre_Result\n self.Params = Params\n self.Halt = Halt\n self.Standby_Method = Standby_Method"
},
{
"identifier": "Pipeline_Process_Task",
"path": "utils/pipeline.py",
"snippet": "class Pipeline_Process_Task:\n pipe_queue: queue.Queue = None\n is_contain_halt_task: bool = False\n\n def __init__(self) -> None:\n if self.pipe_queue is None:\n self.pipe_queue = queue.Queue()\n\n def add_item(self, item: Pipeline_Item = None):\n if item is None:\n raise ValueError(\"添加进管道的执行节点为None\")\n if isinstance(item, Pipeline_Item):\n self.pipe_queue.put(item=item)\n else:\n raise ValueError(\"加进管道的执行节点类型发生错误\")\n\n def execute_pipeline(self, pre_result: Any = None):\n if self.pipe_queue.empty():\n return None\n size = self.pipe_queue.qsize()\n\n def _excute(item: Pipeline_Item):\n if hasattr(item.Obj, item.Method) and callable(\n getattr(item.Obj, item.Method)):\n method_to_call = getattr(item.Obj, item.Method)\n # 不使用上一个方法的返回值作为参数\n if item.Is_Use_Pre_Result is not True:\n if item.Params is not None:\n result = method_to_call(**item.Params)\n else:\n result = method_to_call()\n # 要使用上一个方法的返回值作为参数\n else:\n if pre_result is not None:\n if isinstance(pre_result, dict):\n result = method_to_call(**pre_result)\n else:\n result = method_to_call(pre_result)\n else:\n result = method_to_call()\n if result is None and item.Standby_Method is not None:\n result = _excute(item=item.Standby_Method)\n return result\n\n for i in range(0, size):\n item = self.pipe_queue.get()\n result = _excute(item)\n pre_result = result\n if item.Halt is True:\n self.is_contain_halt_task = True\n break\n return pre_result\n\n def continue_pipeline(self, pre_result: Any = None):\n return self.execute_pipeline(pre_result)"
},
{
"identifier": "Note_Generate_Utils",
"path": "utils/generate/note_generate_utils.py",
"snippet": "class Note_Generate_Utils:\n llm: BaseLanguageModel\n max_image_count: int = 10,\n num_per_prompt_image: int = 2\n\n def __init__(self,\n llm,\n base_file_path: str = None,\n max_image_count: int = 10,\n num_per_prompt_image: int = 2) -> None:\n self.llm = llm\n self.max_image_count = max_image_count\n self.num_per_prompt_image = num_per_prompt_image\n self.base_file_path = base_file_path\n self.split_count = 3\n\n @abstractmethod\n def read_note_list(self, **kwargs):\n pass\n\n @abstractmethod\n def summary_response(self, details: list = None) -> str:\n pass\n\n def generate_image_sentence(self, content: str):\n prompt_template = \"\"\"'{content}',你的任务是根据上述文章内容,尽量生成多条描述自然景色的句子,句与句之间用~进行分割\"\"\"\n PROMPT = PromptTemplate(template=prompt_template,\n input_variables=[\"content\"])\n # 重新设置模型参数\n chain = LLMChain(llm=self.llm,\n prompt=PROMPT,\n llm_kwargs={\n 'temperature': 0.95,\n 'top_p': 0.7\n })\n\n result = chain.predict(content=content)\n return result\n\n def get_image_flags(self, content: str, style: dict, loras: list):\n images = re.findall('【(.*?)】', content)\n text_to_image_list = []\n if images and len(images) > 0:\n for i in range(0, len(images)):\n text_to_image_list.append(images[i])\n else:\n content = self.generate_image_sentence(content)\n text_to_image_list = content.split(\"~\")\n\n # 翻译成英文\n texts_result: List[str] = []\n # texts_result.extend(text_to_image_list)\n for item in text_to_image_list[:self.max_image_count]:\n time.sleep(3)\n texts_result.append(Translation_Baidu.excute_translation(item))\n return {\"texts\": texts_result, \"style\": style, \"loras\": loras}\n\n def generate_images_by_image(self, image_url: str, style: dict, text: str):\n # 关闭LLM模型,释放显卡资源\n if self.llm.model is not None:\n self.llm.unload_model()\n time.sleep(5)\n text_english = \" \"\n if text:\n text_english = Translation_Baidu.excute_translation(text)\n sd_model = SD_Refiner_Model().instance(is_combine_base=False)\n sd_model.load_model()\n prompt = style[\"prompt\"].format(prompt=text_english).lower()\n negative_prompt = style[\"negative_prompt\"]\n images = sd_model.get_image_to_image_single_prompt(\n query=prompt,\n image_url=image_url,\n image_count=4,\n negative_prompt=negative_prompt)\n # 关闭SDXL模型,释放显卡资源\n sd_model.unload_model()\n return images\n\n def generate_images(self, texts: list, style: dict, loras: list):\n # 关闭LLM模型,释放显卡资源\n if self.llm.model is not None:\n self.llm.unload_model()\n time.sleep(5)\n # 加载了Lora,只使用basemodel,效果最好\n if len(loras) > 0:\n sd_model = SD_Base_Model.instance()\n sd_model.load_model()\n sd_model.fuse_lora(loras=loras)\n # 不加载lora 使用refiner+base,效果最好\n else:\n sd_model = SD_Refiner_Model.instance(is_combine_base=True)\n sd_model.load_model()\n image_addr = []\n for item in texts:\n try:\n name = self._name_image(item)\n prefix = \", \".join([(i[\"tag_words\"]) for i in loras\n ]) + \", \" if len(loras) > 0 else \"\"\n prompt = prefix + style[\"prompt\"].format(prompt=item).lower()\n negative_prompt = style[\"negative_prompt\"]\n target_image = sd_model.get_image_to_image_single_prompt(\n query=prompt,\n image_count=self.num_per_prompt_image,\n negative_prompt=negative_prompt)\n for i in range(len(target_image)):\n target_image[i].save(\n f\"{self.base_file_path}\\\\{name}_{i}.jpg\", \"JPEG\")\n image_addr.append(f\"{self.base_file_path}\\\\{name}_{i}.jpg\")\n except Exception:\n pass\n\n # 关闭SDXL模型,释放显卡资源\n sd_model.unload_model()\n return image_addr\n\n def _name_image(self, sentence: str):\n words = sentence.split()\n initials = [word[0] for word in words]\n return \"\".join(initials)\n\n def generate_article_content(self,\n text: str,\n prompt_template: str,\n is_only_return_result: bool = True):\n PROMPT = PromptTemplate(template=prompt_template,\n input_variables=[\"text\"])\n # 将文本进行拆分成段\n texts, text = self._get_middle_partal_chapter(\n text=text, split_count=self.split_count)\n # 定义分段生成方法\n # 内容长度过长,则采用分段参考生成的策略\n if len(text) > 1024 * 2 and self.split_count > 3:\n content = self._summarize_docs(texts=texts, PROMPT=PROMPT)\n else:\n # 重新设置模型参数\n chain = Stream_Chain(llm=self.llm,\n prompt=PROMPT,\n llm_kwargs={\n \"temperature\": 0.95,\n \"top_p\": 0.7\n })\n content = chain.predict(text=text)\n if is_only_return_result is False:\n return {\"original\": text, \"content\": content}\n return {\"content\": content}\n\n # 定义分段生成方法\n def _summarize_docs(self, texts: List[str], PROMPT: PromptTemplate):\n if texts:\n combine_prompt = PromptTemplate(template=\"\"\"已知信息:'{text}'\n 你的任务将已知信息,改编成一篇散文式的新文章,文章的用词,\n 造句必须丰富,而且文章里面对场景的描写一定要具体,要细致\"\"\",\n input_variables=[\"text\"])\n chain = load_summarize_chain(self.llm,\n chain_type=\"map_reduce\",\n return_intermediate_steps=True,\n map_prompt=PROMPT,\n combine_prompt=combine_prompt,\n verbose=True,\n llm_kwargs={\n \"temperature\": 0.8,\n \"top_p\": 0.6\n })\n\n docs = [Document(page_content=text) for text in texts]\n summ = chain.stream({\"input_documents\": docs},\n return_only_outputs=True)\n return summ\n\n def _get_middle_partal_chapter(self, text: str, split_count: int = 1):\n texts = EmbeddingHelper.splitText(text=text,\n chunk_size=1024 * 2,\n overlap=0)\n middle = len(texts) // 2\n if len(texts) % 2 == 0:\n texts = texts[middle - 1:middle + (split_count - 1)]\n else:\n texts = texts[middle:middle + (split_count - 1)]\n new_text = \"\".join(texts)\n return texts, new_text"
},
{
"identifier": "ChatGLM_Helper",
"path": "llms/llm_helper.py",
"snippet": "class ChatGLM_Helper(object):\n single_lock = RLock()\n model_id: str = BASE_CONFIG[\"llm_model_path\"]\n is_local_model: bool = True\n\n def __init__(self, **kwargs) -> None:\n if \"model_id\" in kwargs:\n self.model_id = kwargs[\"model_id\"]\n\n self.llm = ChatGPT()\n self.llm.load_model()\n\n # 给予用户输入构建提示词(用于知识中心问答)\n def build_prompt(\n self,\n output_parsers: StructuredOutputParser = None) -> PromptTemplate:\n template = \"\"\"已知信息:'{context}'。问题:'{question}'。\n 请根据已知信息,简洁和专业地使用中文回答用户的问题。\n 如果无法从已知信息中得到答案,请说 \"根据已知信息无法回答该问题\",\n 不允许在答案中添加编造成分。\"\"\"\n if output_parsers:\n template += \"\\n {format_instructions}\"\n format_instructions = output_parsers.get_format_instructions()\n prompt = PromptTemplate(\n input_variables=[\"context\", \"question\"],\n template=template,\n partial_variables={\"format_instructions\": format_instructions})\n return prompt\n else:\n prompt = PromptTemplate(input_variables=[\"context\", \"question\"],\n template=template)\n return prompt\n\n # 构建输出格式\n def build_output_parsers(\n self, schemas: List[ResponseSchema]) -> StructuredOutputParser:\n if not schemas:\n return None\n response_schemas = []\n for item in schemas:\n response_schemas.append(item)\n output_parser = StructuredOutputParser.from_response_schemas(\n response_schemas)\n return output_parser\n\n # 给予用户输入查询该输入的上线文相关性描述\n def query_context(self, input: str, k: int = 2):\n helper = EmbeddingHelper()\n result = helper.query(input, k)\n db_context = \"\"\n if len(result) > 0:\n db_context = \"\\n\".join(result)\n return db_context\n\n @classmethod\n def instance(cls, *args, **kwargs):\n if not hasattr(ChatGLM_Helper, \"_instance\"):\n with ChatGLM_Helper.single_lock:\n if not hasattr(ChatGLM_Helper, \"_instance\"):\n ChatGLM_Helper._instance = cls(*args, **kwargs)\n return ChatGLM_Helper._instance"
},
{
"identifier": "API_Sequence_Agent",
"path": "agents/agent_controller/api_sequence_agent.py",
"snippet": "class API_Sequence_Agent(BaseMultiActionAgent):\n tools: List[functional_Tool]\n llm: BaseLanguageModel\n intent_template: str = SEQUENCE_EXECUTE_API_TOOLS_PROMPT_TEMPLATE\n prompt = PromptTemplate.from_template(intent_template)\n llm_chain: LLMChain = None\n\n def get_llm_chain(self):\n if not self.llm_chain:\n self.llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)\n\n def output_parser(self, text: str):\n if not text:\n raise ValueError(\"未能获取到需要解析的文本信息\")\n # 使用正则表达式匹配方括号中的内容\n matches = re.findall(r'\\[(.*?)\\]', text)\n # 将匹配到的内容构造成一个数组\n result_array = []\n if matches:\n result_array = [match.strip() for match in matches[0].split(',')]\n return result_array\n\n def check_selected_tools(self, tools: list, selected_tools: list) -> bool:\n if not selected_tools or len(selected_tools) <= 0:\n return False\n for select_tool in selected_tools:\n if select_tool not in tools:\n return False\n return True\n\n # 根据提示(prompt)选择工具\n def choose_tools(self, query) -> List[str]:\n self.get_llm_chain()\n tool_infos = [{tool.name: tool.description} for tool in self.tools]\n resp = self.llm_chain.predict(intents=tool_infos, query=query)\n select_tools = self.output_parser(resp)\n tool_names = [tool.name for tool in self.tools]\n if self.check_selected_tools(tool_names, select_tools) is False:\n return None\n return select_tools\n\n @property\n def input_keys(self):\n return [\"input\"]\n\n # 通过 AgentAction 调用选择的工具,工具的输入是 \"input\"\n def plan(self, intermediate_steps: List[Tuple[AgentAction, str]],\n **kwargs: Any) -> Union[List[AgentAction], AgentFinish]:\n # 单工具调用\n tools = self.choose_tools(kwargs[\"input\"])\n if tools is None:\n return AgentFinish({\"output\": \"无工具\"}, log=\"选择工具时出现不能匹配的情况\")\n for tool in self.tools:\n if tool.name == tools[-1]:\n tool.return_direct = True\n result: List[Union[AgentAction, AgentFinish]] = []\n for tool in tools:\n result.append(\n AgentAction(tool=tool, tool_input=kwargs[\"input\"], log=\"\"))\n return result\n\n async def aplan(self, intermediate_steps: List[Tuple[AgentAction, str]],\n **kwargs: Any) -> Union[List[AgentAction], AgentFinish]:\n raise NotImplementedError(\"IntentAgent does not support async\")"
},
{
"identifier": "Sequence_AgentExecutor",
"path": "agents/agent_executor/sequence_agentexecutor.py",
"snippet": "class Sequence_AgentExecutor(AgentExecutor):\n\n @property\n def input_keys(self) -> List[str]:\n \"\"\"Return the input keys.\n\n :meta private:\n \"\"\"\n return self.agent.input_keys\n\n @property\n def output_keys(self) -> List[str]:\n \"\"\"Return the singular output key.\n\n :meta private:\n \"\"\"\n if self.return_intermediate_steps:\n return self.agent.return_values + [\"intermediate_steps\"]\n else:\n return self.agent.return_values\n\n def _take_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:\n\n intermediate_steps = self._prepare_intermediate_steps(\n intermediate_steps)\n\n output = self.agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n\n if isinstance(output, AgentFinish):\n return output\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n result = []\n for agent_action in actions:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self.agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n if result is not None and len(result) > 0:\n a, o = result[-1]\n observation = tool.run(\n o,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child()\n if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child()\n if run_manager else None,\n **tool_run_kwargs,\n )\n if tool.return_direct is False:\n result.append((agent_action, observation))\n else:\n return AgentFinish({\"output\": observation},\n log=observation)\n return result"
},
{
"identifier": "style_list",
"path": "sdxl/generate_style_config.py",
"snippet": ""
},
{
"identifier": "EmbeddingHelper",
"path": "embeddings/embedding_helper.py",
"snippet": "class EmbeddingHelper:\n\n def __init__(\n self,\n persist_directory: str = BASE_CONFIG[\"chromadb_path\"],\n collection_name: str = \"ZhoYu\",\n local_model_path: str = BASE_CONFIG[\"embedding_model_path\"]\n ) -> None:\n _instance = EmbeddingLocalModel.instance(\n persist_directory=persist_directory,\n collection_name=collection_name,\n local_model_path=local_model_path)\n self.db = _instance.db\n self.embeddingModel = _instance.embeddingModel\n self.persist_directory = persist_directory\n self.collection_name = collection_name\n\n @classmethod\n def loadDirectory(cls, dic_path: str):\n loader = DirectoryLoader(path=dic_path)\n docs = loader.load()\n return docs\n\n # 加载文件\n @classmethod\n def loadfile(cls, filePath: str):\n if filePath.find(\".docx\") != -1:\n loader = Docx2txtLoader(filePath)\n pages = loader.load()\n return pages\n elif filePath.find(\".pdf\") != -1:\n loader = PyPDFLoader(filePath)\n pages = loader.load()\n return pages\n else:\n loader = TextLoader(filePath)\n pages = loader.load()\n return pages\n\n # 分割文件\n @classmethod\n def splitDocs(cls, data, chunk_size: int = 200, voerlap: int = 50):\n if not data:\n raise ImportError(\"没有传入相应的文档数据\")\n else:\n text_splitter = RecursiveCharacterTextSplitter(\n separators=[\"\\n\\n\", \"。\", \"......\", \"!\", \"?\", \"?\", \"!\", \".\"],\n chunk_size=chunk_size,\n chunk_overlap=voerlap)\n docs = text_splitter.split_documents(data)\n return docs\n\n @classmethod\n def splitText(cls, text: str, chunk_size: int = 200, overlap: int = 50):\n if not text:\n raise ImportError(\"没有传入相应的文本数据\")\n text_spliter = CharacterTextSplitter(chunk_size=chunk_size,\n chunk_overlap=overlap)\n text_string = text_spliter.split_text(text)\n text_string = [re.sub(r'\\\\[ntr]', '', item) for item in text_string]\n return text_string\n\n @classmethod\n def splitArray(cls,\n array: List[dict[str, Any]] = None,\n chunk_size: int = 5) -> List[Document]:\n if array is None or len(array) == 0:\n raise ImportError(\"没有传入相应的数组数据\")\n\n split_array = [\n array[i:i + chunk_size] for i in range(0, len(array), 5)\n ]\n docs: List[Document] = []\n for group__orignal_array in split_array:\n # texts, metadatas = [], []\n text = json.dumps(group__orignal_array)\n metadata = {\"source\": group__orignal_array}\n doc = Document(page_content=text, metadata=metadata)\n docs.append(doc)\n return docs\n\n # 将文档转换成向量并存入向量数据库\n def embedding_docs(self, docs):\n if not docs:\n raise ImportError(\"没有传入对应的文档切分数据\")\n else:\n vector_db = Chroma.from_documents(\n docs,\n embedding=self.embeddingModel,\n persist_directory=self.persist_directory,\n collection_name=self.collection_name)\n return vector_db\n\n def embedding_texts(self, texts: List[str], metadatas: List[dict]):\n if not texts:\n raise ImportError(\"没有传入对应的文本数据\")\n else:\n vector_db = Chroma.from_texts(\n texts,\n embedding=self.embeddingModel,\n metadatas=metadatas,\n persist_directory=self.persist_directory,\n collection_name=self.collection_name)\n return vector_db\n\n # 查询最相近的向量\n def query(self,\n message,\n count,\n is_find_metedata: bool = False,\n filter: Dict[str, str] = None,\n where_document: Dict[str, str] = None) -> List[str]:\n if self.db:\n list = []\n result = self.db.similarity_search(message,\n count,\n filter=filter,\n where_document=where_document)\n if result:\n for doc in result:\n list.append(doc.page_content if is_find_metedata is\n False else doc.metadata)\n return list\n else:\n raise ImportError(\"未初始化向量数据库\")\n\n # 文档转向量\n def begin_embedding(self,\n filepath: str,\n chunk_size: int = 200,\n overlap: int = 50) -> OptResult:\n result = OptResult(False, \"\", [])\n if not filepath:\n result.msg = \"未能获取到对应文件的路径,请确保传入文件路径值不为空\"\n return result\n pages = self.loadfile(filepath)\n if len(pages) <= 0:\n result.msg = \"加载文件时出现错误,未能成功加载到文件内容信息\"\n return result\n docs = self.splitDocs(pages, chunk_size, overlap)\n if len(docs) <= 0:\n result.msg = \"切分文件时出现错误,未能成功切分到文件内容信息\"\n return result\n self.embedding_docs(docs)\n for doc in docs:\n result.contents.append(doc.page_content)\n result.isSuccess = True\n return result"
},
{
"identifier": "Tool_Sets",
"path": "utils/tool_sets.py",
"snippet": "class Tool_Sets:\n class_path: str = \"agents\\\\tools\"\n base_module_name = \"agents.tools\"\n\n @staticmethod\n def _check_tool(content: str):\n # 解析抽象语法树\n tree = ast.parse(content)\n # 初始化变量,用于记录是否找到指定的类和属性\n found_class = False\n found_name = False\n found_description = False\n found_call_func = False\n name = \"\",\n description = \"\",\n classname = \"\"\n # 遍历抽象语法树\n for node in ast.walk(tree):\n if isinstance(node, ast.ClassDef):\n # 判断类是否继承于指定的基类\n base_classes = [base.id for base in node.bases]\n if \"functional_Tool\" in base_classes:\n found_class = True\n # 获取工具类名\n classname = node.name\n elif isinstance(node, ast.Assign):\n for target in node.targets:\n if isinstance(target, ast.Name) and target.id == 'name':\n name = node.value.value\n found_name = True\n elif isinstance(target,\n ast.Name) and target.id == 'description':\n description = node.value.value\n found_description = True\n\n elif isinstance(node, ast.FunctionDef):\n # 判断是否存在名为 _call_func 的方法\n if node.name == '_call_func':\n found_call_func = True\n if found_class is False:\n raise ValueError(\"上传工具未继承于functional_Tool类\")\n if found_call_func is False:\n raise ValueError(\"上传工具未实现_call_func方法\")\n if found_name is False:\n raise ValueError(\"上传工具不包含name属性\")\n if found_description is False:\n raise ValueError(\"上传工具不包含description属性\")\n return name, description, classname\n\n @classmethod\n def load_tools(cls):\n # 获取当前脚本所在目录(main_directory)\n current_dir = os.path.dirname(__file__)\n # 向上一级移动两级,得到项目根目录\n project_root = os.path.abspath(os.path.join(current_dir, '..'))\n # 构建相对路径\n relative_path = os.path.join(project_root, \"configs\", 'tools.json')\n config_data = []\n with open(relative_path, 'r', encoding='utf-8') as file:\n config_data = json.load(file)\n config_data = [item for item in config_data if item['status'] == 1]\n toos = sorted(config_data,\n key=lambda x: int(x[\"sorted\"]),\n reverse=True)\n return toos\n\n @classmethod\n def regist_tool(cls, sorted: int, file_path: str):\n project_root = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..'))\n with open(file_path, 'r', encoding='utf-8') as file:\n content = file.read()\n file_name = os.path.basename(file_path)\n file_name, _ex = os.path.splitext(file_name)\n name, description, classname = cls._check_tool(content)\n relative_path = os.path.join(project_root, \"configs\", 'tools.json')\n data = {\n \"name\": name,\n \"decription\": description,\n \"sorted\": sorted,\n \"status\": 1,\n \"class_name\": classname,\n \"module_name\": f\"{cls.base_module_name}.{file_name}\"\n }\n name_is_exist = False\n with open(relative_path, 'r+', encoding='utf-8') as file:\n config_data = json.load(file)\n # 检查工具名称是否重复\n for tool in config_data:\n if name == tool[\"name\"]:\n name_is_exist = True\n break\n if name_is_exist is False:\n config_data.append(data)\n file.seek(0)\n json.dump(config_data, file, indent=4)\n if name_is_exist is True:\n raise ValueError(\"工具名称已经存在,请修改后重新上传\")\n module_path = os.path.join(project_root, cls.class_path)\n shutil.copy2(file_path, module_path)\n return cls.load_tools()\n\n @classmethod\n def init_tools(cls, tool: str, **kwargs):\n config_data = cls.load_tools()\n selected_tools = list(\n filter(lambda element: element.get('name') == tool, config_data))\n if len(selected_tools) <= 0:\n raise ValueError(\"待加载工具集合中并为发现任何工具信息\")\n item = selected_tools[0]\n # 类名的字符串\n class_name = item[\"class_name\"]\n # 根据字符串初始化类\n module_name = item[\"module_name\"]\n # 动态导入模块\n module = importlib.import_module(module_name)\n # 获取类对象\n class_obj = getattr(module, class_name)\n tool_instance = class_obj(**kwargs)\n return tool_instance"
},
{
"identifier": "Lora_Sets",
"path": "utils/lora_sets.py",
"snippet": "class Lora_Sets:\n\n @classmethod\n def load_loras(cls, is_only_return_name: bool = True):\n current_dir = os.path.dirname(__file__)\n # 向上一级移动两级,得到项目根目录\n project_root = os.path.abspath(os.path.join(current_dir, '..'))\n # 构建相对路径\n relative_path = os.path.join(project_root, \"configs\", 'loras.json')\n config_data = []\n with open(relative_path, 'r', encoding='utf-8') as file:\n config_data = json.load(file)\n if is_only_return_name is True:\n config_data = [item[\"name\"] for item in config_data]\n return config_data\n\n @classmethod\n def init_lora(cls, order_num: int, scale: float, tag_words: str,\n model_path: str):\n file_name = os.path.basename(model_path)\n file_name, _ex = os.path.splitext(file_name)\n lora = {\n \"name\": file_name,\n \"tag_words\": tag_words,\n \"scale\": scale,\n \"sored\": order_num\n }\n project_root = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..'))\n relative_path = os.path.join(project_root, \"configs\", 'loras.json')\n with open(relative_path, 'r+', encoding='utf-8') as file:\n config_data = json.load(file)\n # 检查模型名称是否重复\n name_is_exist = False\n for data in config_data:\n if file_name == data[\"name\"]:\n name_is_exist = True\n break\n if name_is_exist is False:\n config_data.append(lora)\n file.seek(0)\n json.dump(config_data, file, indent=4)\n if name_is_exist is True:\n raise ValueError(\"模型名称已经存在,请修改后重新上传\")\n lora_path = BASE_CONFIG[\"sdxl_lora_path\"]\n shutil.copy2(model_path, lora_path)\n return cls.load_loras()"
},
{
"identifier": "BASE_CONFIG",
"path": "configs/base_config.py",
"snippet": "BASE_CONFIG = {\n # 将需要进行向量化的文件上传到这个地址\n \"upload_file_base_path\":\n \"D:\\\\EasyGC\\\\application\\\\utils\\\\spider_resource\\\\upload\",\n # LLM大模型文件地址\n \"llm_model_path\": \"D:\\\\ChatGLM2-6B\\\\chatglm2-6b-model-int4\",\n # chroma_db向量数据库文件存放地址\n \"chromadb_path\": \"D:\\\\ChatGLM2-6B\\\\knowledge_center\\\\chroma_db\",\n # 向量化模型文件存放地址\n \"embedding_model_path\": \"D:\\\\Text2Vec\",\n # refiner模型文件地址\n \"sdxl_refiner_path\": \"D:\\\\ChatGLM2-6B\\\\stable-diffusion-xl-refiner-1.0\",\n # base模型文件地址\n \"sdxl_base_path\": \"D:\\\\ChatGLM2-6B\\\\stable-diffusion-xl-base-1.0\",\n # sdxl的lora模型文件地址\n \"sdxl_lora_path\": \"D:\\\\ChatGLM2-6B\\\\stable-diffusion-xl-base-1.0\\\\lora\",\n # 百度翻译的APPID\n \"baidu_appid\": \"\",\n # 百度翻译的APPKEY\n \"baidu_app_key\": \"\",\n # blip模型地址\n \"blip_model_path\": \"D:\\\\EasyGC\\\\blip\"\n}"
},
{
"identifier": "BASE_FILE_PATH",
"path": "configs/base_config.py",
"snippet": "BASE_FILE_PATH = \"D:\\\\ChatGLM2-6B\\\\knowledge_center\""
},
{
"identifier": "Web_Parser_Tool_Response",
"path": "agents/tools/web_parser_tool.py",
"snippet": "class Web_Parser_Tool_Response:\n original_infos: list = []\n content: str = None\n\n def __init__(self, content: str, original_infos: list) -> None:\n self.original_infos = original_infos\n self.content = content"
}
] | import os
import shutil
import gradio as gr
from gradio.components.chatbot import Chatbot
from utils.pipeline import Pipeline_Item, Pipeline_Process_Task
from utils.generate.note_generate_utils import Note_Generate_Utils
from llms.llm_helper import ChatGLM_Helper
from agents.agent_controller.api_sequence_agent import API_Sequence_Agent
from agents.agent_executor.sequence_agentexecutor import Sequence_AgentExecutor
from sdxl.generate_style_config import style_list
from embeddings.embedding_helper import EmbeddingHelper
from utils.tool_sets import Tool_Sets
from utils.lora_sets import Lora_Sets
from configs.base_config import BASE_CONFIG, BASE_FILE_PATH
from agents.tools.web_parser_tool import Web_Parser_Tool_Response | 10,367 | lora_upload = gr.Textbox(
lines=1,
show_label=True,
placeholder="填写模型完整的绝对路径如: D:/aaa/xxxx.safetensors",
label="模型路径",
interactive=True)
scale = gr.Number(value=1, label="权重", minimum=0.1, maximum=1)
order_num = gr.Number(value=0, label="排序", minimum=0)
tag_words = gr.Textbox(lines=1,
show_label=True,
placeholder="模型触发词,用逗号分隔",
label="触发词",
interactive=True)
handle_lora = gr.Button("加载模型",
variant="primary",
min_width=60,
width=100)
with gr.Row():
style_dropdown = gr.Dropdown(choices=style_name,
type="value",
value="",
show_label=False,
container=False,
max_choices=1,
multiselect=False,
interactive=True)
lora_dropdown = gr.Dropdown(choices=Lora_Sets.load_loras(),
type="value",
show_label=False,
container=False,
max_choices=2,
multiselect=True,
interactive=True)
with gr.Row():
comment = gr.Textbox(lines=2,
show_label=True,
placeholder="",
label="图片描述",
interactive=True)
ok_note_Btn = gr.Button("生成", variant="primary")
with gr.Tab("img2img"):
with gr.Row():
with gr.Column(scale=6):
img_input = gr.Image(source="upload",
show_label=False,
interactive=True,
type="filepath")
with gr.Column(scale=6):
img_output = gr.Gallery(label="Generated images",
show_label=False,
elem_id="img2img_gallery",
grid=[4])
with gr.Row():
style_dropdown_img2img = gr.Dropdown(choices=style_name,
type="value",
value="",
show_label=False,
container=False,
max_choices=1,
multiselect=False,
interactive=True)
img_modify_comment = gr.Textbox(show_label=False,
placeholder="关键词/句...",
lines=5).style(container=False)
with gr.Row():
img2img_btn = gr.Button("生成图片", variant="primary")
def handle_upload_lora(scale, order_num, tag_words, lora_upload):
if lora_upload and tag_words and os.path.exists(lora_upload):
loras = Lora_Sets.init_lora(order_num=order_num,
scale=scale,
tag_words=tag_words,
model_path=lora_upload)
new_drop_down = lora_dropdown.update(choices=loras)
return new_drop_down
def handle_upload_tools(upload_tool):
file_path = upload_tool.name
tools = Tool_Sets.regist_tool(sorted=1, file_path=file_path)
tool_result = [tool["name"] for tool in tools]
new_drop_down = content_type.update(choices=tool_result)
return new_drop_down
def handle_files(files, author_input, number, overlap):
upload_file_base_path = BASE_CONFIG["upload_file_base_path"]
files_path_array: list = []
for file_obj in files:
# 将文件复制到临时目录中
shutil.copy(file_obj.name, upload_file_base_path)
# 获取上传Gradio的文件名称
file_name = os.path.basename(file_obj.name)
# 获取拷贝在临时目录的新的文件地址
files_path_array.append(
os.path.join(upload_file_base_path, file_name))
for file_path in files_path_array:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
texts = EmbeddingHelper.splitText(
content,
chunk_size=number if number else 1000,
overlap=overlap if overlap else 0)
item = {}
item["content"] = content
item["source"] = author_input if author_input else "未知"
helper = EmbeddingHelper(collection_name="literature")
helper.embedding_texts(texts=texts, metadatas=[item])
def _load_llm_model():
if llm_helper.llm.model is None:
llm_helper.llm.load_model(model_name_or_path=llm_helper.model_id)
def generate_image_by_image(img_input, img_modify_comment,
style_dropdown_img2img):
style = {}
for i in style_list:
if i["name"] == style_dropdown_img2img:
style = i
break
| # 重写继承模型
uploaded_image_url = ""
llm_helper = ChatGLM_Helper.instance()
def init_agent(tool_names: list):
if len(tool_names) > 0:
tools = []
for name in tool_names:
tools.append(Tool_Sets.init_tools(name, llm=llm_helper.llm))
agent = API_Sequence_Agent(tools=tools, llm=llm_helper.llm)
agent_exec = Sequence_AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, max_iterations=1)
return agent_exec
return None
def parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = '<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>" + line
text = "".join(lines)
return text
# def join_history(is_note: bool = False):
# result = ""
# for item in history:
# for key, value in item.items():
# result += f"{value}\n"
# return result
style_name = [(i["name"]) for i in style_list]
css = """
#gallery {
min-height: 22rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
border-bottom-right-radius: .5rem !important;
border-bottom-left-radius: .5rem !important;
},
#upload_tool{
height:96px !important
width:280px !important
}
#content_type{
flex-grow:4 !important
}
"""
with gr.Blocks(css=css) as web_gc:
gr.HTML("""<h1 align="center">EasyGC</h1>""")
pipe_state = gr.State()
with gr.Tab("text generate"):
with gr.Row(variant="panel"):
with gr.Column(scale=10):
with gr.Row():
chatbot = gr.Chatbot()
with gr.Row():
user_input = gr.Textbox(show_label=False,
placeholder="Input...",
lines=4).style(container=False)
with gr.Column(scale=2):
files = gr.Files(label="上传文件", height=110)
author_input = gr.Textbox(lines=1,
show_label=True,
placeholder="文档元数据标记,用于后续关键字搜索",
label="文档源",
interactive=True)
number = gr.Number(value=1000, label="切分长度", minimum=100)
overlap = gr.Number(value=0, label="重叠长度", minimum=0)
lode_Btn = gr.Button("处理文件", variant="primary")
submitBtn = gr.Button("发送信息", variant="primary", min_width=60)
with gr.Row():
with gr.Column(scale=10):
content_type = gr.Dropdown(
choices=[tool["name"] for tool in Tool_Sets.load_tools()],
type="value",
multiselect=True,
label="工具",
interactive=True)
with gr.Column(scale=10):
upload_tool = gr.File(label="上传工具",
height=94,
elem_id="upload_tool")
with gr.Column(scale=2):
handle_tools = gr.Button("加载工具",
variant="primary",
min_width=60,
width=100)
with gr.Row():
# note_Btn = gr.Button("生成文章", variant="primary")
emptyBtn = gr.Button("清除历史会话")
load_llm_model = gr.Button("重新加载LL模型", variant="primary")
with gr.Tab("txt2img"):
with gr.Row():
with gr.Column(scale=6):
gallery = gr.Gallery(label="图片生成",
show_label=False,
elem_id="gallery",
grid=[4])
with gr.Column(scale=3):
lora_upload = gr.Textbox(
lines=1,
show_label=True,
placeholder="填写模型完整的绝对路径如: D:/aaa/xxxx.safetensors",
label="模型路径",
interactive=True)
scale = gr.Number(value=1, label="权重", minimum=0.1, maximum=1)
order_num = gr.Number(value=0, label="排序", minimum=0)
tag_words = gr.Textbox(lines=1,
show_label=True,
placeholder="模型触发词,用逗号分隔",
label="触发词",
interactive=True)
handle_lora = gr.Button("加载模型",
variant="primary",
min_width=60,
width=100)
with gr.Row():
style_dropdown = gr.Dropdown(choices=style_name,
type="value",
value="",
show_label=False,
container=False,
max_choices=1,
multiselect=False,
interactive=True)
lora_dropdown = gr.Dropdown(choices=Lora_Sets.load_loras(),
type="value",
show_label=False,
container=False,
max_choices=2,
multiselect=True,
interactive=True)
with gr.Row():
comment = gr.Textbox(lines=2,
show_label=True,
placeholder="",
label="图片描述",
interactive=True)
ok_note_Btn = gr.Button("生成", variant="primary")
with gr.Tab("img2img"):
with gr.Row():
with gr.Column(scale=6):
img_input = gr.Image(source="upload",
show_label=False,
interactive=True,
type="filepath")
with gr.Column(scale=6):
img_output = gr.Gallery(label="Generated images",
show_label=False,
elem_id="img2img_gallery",
grid=[4])
with gr.Row():
style_dropdown_img2img = gr.Dropdown(choices=style_name,
type="value",
value="",
show_label=False,
container=False,
max_choices=1,
multiselect=False,
interactive=True)
img_modify_comment = gr.Textbox(show_label=False,
placeholder="关键词/句...",
lines=5).style(container=False)
with gr.Row():
img2img_btn = gr.Button("生成图片", variant="primary")
def handle_upload_lora(scale, order_num, tag_words, lora_upload):
if lora_upload and tag_words and os.path.exists(lora_upload):
loras = Lora_Sets.init_lora(order_num=order_num,
scale=scale,
tag_words=tag_words,
model_path=lora_upload)
new_drop_down = lora_dropdown.update(choices=loras)
return new_drop_down
def handle_upload_tools(upload_tool):
file_path = upload_tool.name
tools = Tool_Sets.regist_tool(sorted=1, file_path=file_path)
tool_result = [tool["name"] for tool in tools]
new_drop_down = content_type.update(choices=tool_result)
return new_drop_down
def handle_files(files, author_input, number, overlap):
upload_file_base_path = BASE_CONFIG["upload_file_base_path"]
files_path_array: list = []
for file_obj in files:
# 将文件复制到临时目录中
shutil.copy(file_obj.name, upload_file_base_path)
# 获取上传Gradio的文件名称
file_name = os.path.basename(file_obj.name)
# 获取拷贝在临时目录的新的文件地址
files_path_array.append(
os.path.join(upload_file_base_path, file_name))
for file_path in files_path_array:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
texts = EmbeddingHelper.splitText(
content,
chunk_size=number if number else 1000,
overlap=overlap if overlap else 0)
item = {}
item["content"] = content
item["source"] = author_input if author_input else "未知"
helper = EmbeddingHelper(collection_name="literature")
helper.embedding_texts(texts=texts, metadatas=[item])
def _load_llm_model():
if llm_helper.llm.model is None:
llm_helper.llm.load_model(model_name_or_path=llm_helper.model_id)
def generate_image_by_image(img_input, img_modify_comment,
style_dropdown_img2img):
style = {}
for i in style_list:
if i["name"] == style_dropdown_img2img:
style = i
break | util = Note_Generate_Utils(llm=llm_helper.llm, | 2 | 2023-11-28 10:11:57+00:00 | 12k |
Fraunhofer-SCAI/llamol | trainer.py | [
{
"identifier": "fragment_creator_factory",
"path": "fragment_creator.py",
"snippet": "def fragment_creator_factory(key: Union[str, None]):\n if key is None:\n return None\n\n if key == \"mol_frags\":\n return MolFragsFragmentCreator()\n elif key == \"recap\":\n return RecapFragmentCreator()\n elif key == \"bricks\":\n return BricksFragmentCreator()\n elif key == \"rss\":\n return RandomSubsliceFragmentCreator()\n else:\n raise ValueError(f\"Do not have factory for the given key: {key}\")"
},
{
"identifier": "ContextArgs",
"path": "model.py",
"snippet": "class ContextArgs:\n context_keys: List[str] = field(default_factory=list)\n context_dims: List[int] = field(default_factory=list)"
},
{
"identifier": "ModelArgs",
"path": "model.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n norm_eps: float = 1e-5\n max_seq_len: int = 2048\n dropout: float = 0.0"
},
{
"identifier": "ContextArgs",
"path": "model.py",
"snippet": "class ContextArgs:\n context_keys: List[str] = field(default_factory=list)\n context_dims: List[int] = field(default_factory=list)"
},
{
"identifier": "Transformer",
"path": "model.py",
"snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs, context_params: ContextArgs):\n super().__init__()\n self.params = params\n self.context_params = context_params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)\n\n self.frag_embeddings = nn.Embedding(params.vocab_size, params.dim)\n self.frag_type_embedding = nn.Embedding(1, params.dim)\n\n self.context_lookup = {k: i for i, k in enumerate(context_params.context_keys)}\n self.conditions_type_embeddings = nn.Embedding(\n len(context_params.context_keys), params.dim\n )\n self.conditions_embeddings_lookup = nn.ModuleDict(\n {\n k: nn.Sequential(\n nn.Linear(dim, params.dim, bias=True),\n )\n for k, dim in zip(\n context_params.context_keys, context_params.context_dims\n )\n }\n )\n\n self.dropout = nn.Dropout(params.dropout)\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = nn.Linear(params.dim, params.vocab_size, bias=False)\n\n # share the unembedding parameters with the embedding parameters\n self.tok_embeddings.weight = (\n self.output.weight\n ) # https://paperswithcode.com/method/weight-tying\n\n # some useful precompute for the RoPE relative positional embeddings\n freqs_cos, freqs_sin = precompute_freqs_cis(\n self.params.dim // self.params.n_heads, self.params.max_seq_len\n )\n self.register_buffer(\"freqs_cos\", freqs_cos, persistent=False)\n self.register_buffer(\"freqs_sin\", freqs_sin, persistent=False)\n\n # init all weights\n self.apply(self._init_weights)\n # apply special scaled init to the residual projections, per GPT-2 paper\n for pn, p in self.named_parameters():\n if pn.endswith(\"w3.weight\") or pn.endswith(\"wo.weight\"):\n torch.nn.init.normal_(\n p, mean=0.0, std=0.02 / math.sqrt(2 * params.n_layers)\n )\n\n # Initialize attribute for the loss of the last forward call. This will be set if the forward is called with a targets tensor.\n self.last_loss = None\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n\n def forward(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n\n freqs_cos = self.freqs_cos[:seqlen]\n freqs_sin = self.freqs_sin[:seqlen]\n\n for layer in self.layers:\n h = layer(h, freqs_cos, freqs_sin)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def forward_with_kvcache(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n cache_id: int = 1,\n pos_seq_len: Optional[int] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n if pos_seq_len is None:\n pos_seq_len = seqlen\n else:\n pos_seq_len = max(seqlen, pos_seq_len + context_seq_len)\n\n freqs_cos = self.freqs_cos[:pos_seq_len]\n freqs_sin = self.freqs_sin[:pos_seq_len]\n\n for layer in self.layers:\n h = layer.forward_with_kvcache(h, freqs_cos, freqs_sin, cache_id=cache_id)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def _add_context_to_seq(self, tokens, context, fragment, bsz, device):\n h = self.tok_embeddings(tokens)\n h = self.dropout(h)\n\n if fragment is not None:\n fragment_type_enc = torch.zeros_like(\n fragment, dtype=torch.long, device=device\n )\n\n h = torch.concat(\n (\n self.tok_embeddings(fragment)\n + self.frag_embeddings(fragment)\n + self.frag_type_embedding(fragment_type_enc),\n h,\n ),\n dim=1,\n )\n\n if context is not None and len(context) != 0:\n # context is a dictionary with key : context_tensor of shape (batch_size, context_dim)\n type_ids = []\n context_vals = []\n\n for emb_key, context_val in context.items():\n emb_context_val = self.conditions_embeddings_lookup[emb_key](\n context_val.unsqueeze(1).to(device)\n ).unsqueeze(1)\n\n context_vals.append(emb_context_val)\n type_ids_tensor = torch.tensor(\n [self.context_lookup[emb_key]], device=device, dtype=torch.long\n )\n type_ids.append(type_ids_tensor)\n\n context_types = (\n torch.concat(type_ids, dim=0).reshape(-1, 1).expand(-1, bsz).T\n )\n # shape(len(context),batch_size, emb_size)\n context_types = self.conditions_type_embeddings(context_types)\n\n context_vals = torch.concat(context_vals, dim=1).to(device)\n\n # SHAPE\n h = torch.concat([context_vals + context_types, h], dim=1)\n return h\n\n def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):\n # start with all of the candidate parameters\n param_dict = {pn: p for pn, p in self.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n print(\n f\"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters\"\n )\n print(\n f\"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters\"\n )\n # Create AdamW optimizer and use the fused version if it is available\n fused_available = \"fused\" in inspect.signature(torch.optim.AdamW).parameters\n use_fused = fused_available and device_type == \"cuda\"\n extra_args = dict(fused=True) if use_fused else dict()\n optimizer = torch.optim.AdamW(\n optim_groups, lr=learning_rate, betas=betas, **extra_args\n )\n print(f\"using fused AdamW: {use_fused}\")\n\n return optimizer\n\n def estimate_mfu(self, fwdbwd_per_iter, dt):\n \"\"\"estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS\"\"\"\n # first estimate the number of flops we do per iteration.\n # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311\n N = sum(p.numel() for p in self.parameters())\n cfg = self.params\n L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim // cfg.n_heads, cfg.max_seq_len\n flops_per_token = 6 * N + 12 * L * H * Q * T\n flops_per_fwdbwd = flops_per_token * T\n flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter\n # express our flops throughput as ratio of A100 bfloat16 peak flops\n flops_achieved = flops_per_iter * (1.0 / dt) # per second\n flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS\n mfu = flops_achieved / flops_promised\n return mfu\n\n @torch.inference_mode()\n def generate(\n self,\n tokenizer: SmilesTokenizer,\n context: Union[torch.Tensor, None] = None,\n fragments: Union[torch.Tensor, None] = None,\n max_length: int = 50,\n num_gen: int = 200,\n start_smiles: Union[str, None] = None,\n temperature: float = 1.0,\n top_k: Union[int, None] = None,\n device: torch.device = torch.device(\"cpu\"),\n cache_kv: bool = False,\n ) -> List[str]:\n batch_size = num_gen\n if start_smiles is not None:\n tokenized_start_selfie = tokenizer.encode(start_smiles)[\n :-1\n ] # remove <eos> token\n tokenized_start_selfie = torch.tensor(\n tokenized_start_selfie, device=device, dtype=torch.long\n ).view(-1, 1)\n tokenized_start_selfie = tokenized_start_selfie.repeat(1, batch_size)\n\n outputs = tokenized_start_selfie.T\n else:\n outputs = (\n torch.LongTensor([[tokenizer.cls_token_id] * batch_size]).to(device)\n ).T # batch_size\n self.eval()\n\n start_len = outputs.shape[1]\n has_end_idx = np.array([0] * batch_size)\n cache_id = np.random.randint(0, int(1e10), 1).item()\n with torch.no_grad():\n with tqdm(total=max_length, desc=\"Generation\") as pbar:\n for i in range(start_len, max_length):\n # trg_tensor = #torch.LongTensor(outputs).to(model.device)\n if not cache_kv:\n logits = self(outputs, context=context, fragment=fragments)\n else:\n # logits_ = self(outputs, context=context, fragment=fragments)\n if i == start_len:\n # When starting pass the whole input, so that \"start_smiles\" works, then only the newly generated token, because of the cache\n func_input = outputs\n else:\n func_input = outputs[:, -1].unsqueeze(-1)\n logits = self.forward_with_kvcache(\n func_input,\n context=context,\n fragment=fragments,\n cache_id=cache_id,\n pos_seq_len=outputs.size(-1),\n )\n\n # raise NotImplementedError(\"Currently not working / right implemented\")\n # logits = self.forward_with_kvcache(outputs, context=context, fragment=fragments,cache_id = cache_id)\n\n logits = logits[:, -1, :] # crop to just the final time step\n if temperature == 0.0:\n # \"sample\" the single most likely index\n _, logits = torch.topk(logits, k=1, dim=-1)\n else:\n # pluck the logits at the final step and scale by desired temperature\n logits = logits / temperature\n # optionally crop the logits to only the top k options\n if top_k is not None:\n v, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n logits[logits < v[:, [-1]]] = -float(\"Inf\")\n\n probs = F.softmax(logits, dim=-1)\n idx_next = torch.multinomial(probs, num_samples=1)\n\n ended_sentences = idx_next == tokenizer.sep_token_id\n if torch.count_nonzero(ended_sentences) != 0:\n indicies = torch.nonzero(ended_sentences)\n indicies = indicies.cpu().numpy()\n for end_idx in indicies[:, 0]:\n if has_end_idx[end_idx] == 0:\n has_end_idx[end_idx] = i\n\n # print(has_end_idx)\n\n if all([idx != 0 for idx in has_end_idx]):\n break\n\n # outputs.append(best_guesses)\n # outputs = torch.row_stack((outputs, idx_next))\n outputs = torch.cat((outputs, idx_next), dim=1)\n pbar.update(1)\n\n out_selfies = []\n for output, end_idx in zip(outputs.cpu().numpy(), has_end_idx):\n # Incase of limiting the max_len\n if end_idx == 0:\n selfie = [tokenizer._convert_id_to_token(idx) for idx in output[:]]\n else:\n selfie = [\n tokenizer._convert_id_to_token(idx) for idx in output[:end_idx]\n ]\n selfie = \"\".join(selfie[1:])\n out_selfies.append(selfie)\n\n # for indicies in outputs:\n # translated_sentence = [tokenizer.idx_to_tokens[idx] for idx in outputs]\n # remove start token\n return out_selfies\n\n @staticmethod\n def load(path, device: torch.device = torch.device(\"cpu\")) -> Transformer:\n data = torch.load(path, map_location=device)\n\n newinstace = Transformer(data[\"model_params\"], data[\"context_params\"])\n newinstace.load_state_dict(data[\"state_dict\"])\n return newinstace.to(device)\n\n def save(self, filepath):\n torch.save(\n {\n \"state_dict\": self.state_dict(),\n **dict(model_params=self.params, context_params=self.context_params),\n },\n filepath,\n )\n\n def getNumberTrainableParams(self) -> int:\n return sum(p.numel() for p in self.parameters() if p.requires_grad)\n\n def getNumberParams(self) -> int:\n return sum(p.numel() for p in self.parameters())"
},
{
"identifier": "ModelArgs",
"path": "model.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n norm_eps: float = 1e-5\n max_seq_len: int = 2048\n dropout: float = 0.0"
},
{
"identifier": "SmilesTask",
"path": "preprocess_dataset.py",
"snippet": "class SmilesTask:\n @staticmethod\n def iter_batches(\n split,\n batch_size,\n device,\n context_keys: List[str],\n num_workers=0,\n dataset=\"processed_dataset.pkl\",\n fragment_creator: BaseFragmentCreator = BricksFragmentCreator(),\n ):\n tokenizer = SmilesTokenizer()\n ds = PretokDataset(split, tokenizer.pad_token_id, dataset=dataset)\n is_ddp = int(os.environ.get(\"RANK\", -1)) != -1\n dl = torch.utils.data.DataLoader(\n ds,\n batch_size=batch_size,\n pin_memory=True,\n num_workers=num_workers,\n shuffle=False,\n sampler=DistributedSampler(ds) if is_ddp else None,\n collate_fn=lambda batch: padding_collate_fn(\n batch, tokenizer, fragment_creator\n ),\n )\n\n for data in dl:\n data[\"src\"] = data[\"src\"].to(device, non_blocking=True)\n data[\"tgt\"] = data[\"src\"].to(device, non_blocking=True)\n\n data[\"src\"] = data[\"src\"][:-1, :].T # batch_size, seq_len\n data[\"tgt\"] = data[\"tgt\"][1:, :].T # batch_size, seq_len\n\n data[\"fragment\"] = (\n data[\"fragment\"].to(device, non_blocking=True).T\n ) # batch_size, seq_len\n keys = list(data[\"context\"].keys())\n for d in keys:\n if d not in context_keys:\n del data[\"context\"][d]\n else:\n data[\"context\"][d] = data[\"context\"][d].to(\n device, non_blocking=True\n )\n\n yield data"
},
{
"identifier": "SmilesTokenizer",
"path": "tokenizer.py",
"snippet": "class SmilesTokenizer(BertTokenizer):\n \"\"\"\n Creates the SmilesTokenizer class. The tokenizer heavily inherits from the BertTokenizer\n implementation found in Huggingface's transformers library. It runs a WordPiece tokenization\n algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.\n\n Please see https://github.com/huggingface/transformers\n and https://github.com/rxn4chemistry/rxnfp for more details.\n\n Examples\n --------\n >>> from deepchem.feat.smiles_tokenizer import SmilesTokenizer\n >>> current_dir = os.path.dirname(os.path.realpath(__file__))\n >>> vocab_path = os.path.join(current_dir, 'tests/data', 'vocab.txt')\n >>> tokenizer = SmilesTokenizer(vocab_path)\n >>> print(tokenizer.encode(\"CC(=O)OC1=CC=CC=C1C(=O)O\"))\n [12, 16, 16, 17, 22, 19, 18, 19, 16, 20, 22, 16, 16, 22, 16, 16, 22, 16, 20, 16, 17, 22, 19, 18, 19, 13]\n\n\n References\n ----------\n .. [1] Schwaller, Philippe; Probst, Daniel; Vaucher, Alain C.; Nair, Vishnu H; Kreutter, David;\n Laino, Teodoro; et al. (2019): Mapping the Space of Chemical Reactions using Attention-Based Neural\n Networks. ChemRxiv. Preprint. https://doi.org/10.26434/chemrxiv.9897365.v3\n\n Notes\n ----\n This class requires huggingface's transformers and tokenizers libraries to be installed.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n\n def __init__(\n self,\n # unk_token=\"[UNK]\",\n # sep_token=\"[SEP]\",\n # pad_token=\"[PAD]\",\n # cls_token=\"[CLS]\",\n # mask_token=\"[MASK]\",\n **kwargs\n ):\n \"\"\"Constructs a SmilesTokenizer.\n\n Parameters\n ----------\n vocab_file: str\n Path to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n \"\"\"\n\n vocab_file = os.path.join(os.path.dirname(__file__), \"data\", \"vocab.txt\")\n\n super().__init__(vocab_file, **kwargs)\n\n self.sos = \"[SOS]\"\n self.eos = \"[EOS]\"\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\"Can't find a vocab file at path '{}'.\".format(vocab_file))\n self.vocab = load_vocab(vocab_file)\n self.highest_unused_index = max(\n [i for i, v in enumerate(self.vocab.keys()) if v.startswith(\"[unused\")]\n )\n self.ids_to_tokens = collections.OrderedDict(\n [(ids, tok) for tok, ids in self.vocab.items()]\n )\n self.basic_tokenizer = BasicSmilesTokenizer()\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n @property\n def vocab_list(self):\n return list(self.vocab.keys())\n\n def _tokenize(self, text: str):\n \"\"\"\n Tokenize a string into a list of tokens.\n\n Parameters\n ----------\n text: str\n Input string sequence to be tokenized.\n \"\"\"\n\n split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"\n Converts a token (str/unicode) in an id using the vocab.\n\n Parameters\n ----------\n token: str\n String token from a larger sequence to be converted to a numerical id.\n \"\"\"\n\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"\n Converts an index (integer) in a token (string/unicode) using the vocab.\n\n Parameters\n ----------\n index: int\n Integer index to be converted back to a string-based token as part of a larger sequence.\n \"\"\"\n\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: List[str]):\n \"\"\"Converts a sequence of tokens (string) in a single string.\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n Returns\n -------\n out_string: str\n Single string from combined tokens.\n \"\"\"\n\n out_string: str = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def add_special_tokens_ids_single_sequence(self, token_ids: List[int]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n \"\"\"\n\n return [self.cls_token_id] + token_ids + [self.sep_token_id]\n\n def add_special_tokens_single_sequence(self, tokens: List[str]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n \"\"\"\n return [self.cls_token] + tokens + [self.sep_token]\n\n def add_special_tokens_ids_sequence_pair(\n self, token_ids_0: List[int], token_ids_1: List[int]\n ) -> List[int]:\n \"\"\"\n Adds special tokens to a sequence pair for sequence classification tasks.\n A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]\n\n Parameters\n ----------\n token_ids_0: List[int]\n List of ids for the first string sequence in the sequence pair (A).\n\n token_ids_1: List[int]\n List of tokens for the second string sequence in the sequence pair (B).\n \"\"\"\n\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def add_padding_tokens(\n self, token_ids: List[int], length: int, right: bool = True\n ) -> List[int]:\n \"\"\"\n Adds padding tokens to return a sequence of length max_length.\n By default padding tokens are added to the right of the sequence.\n\n Parameters\n ----------\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n length: int\n\n right: bool (True by default)\n\n Returns\n ----------\n token_ids :\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n padding: int\n Integer to be added as padding token\n\n \"\"\"\n padding = [self.pad_token_id] * (length - len(token_ids))\n\n if right:\n return token_ids + padding\n else:\n return padding + token_ids\n\n def save_vocabulary(\n self, vocab_path: str\n ): # -> tuple[str]: doctest issue raised with this return type annotation\n \"\"\"\n Save the tokenizer vocabulary to a file.\n\n Parameters\n ----------\n vocab_path: obj: str\n The directory in which to save the SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n Returns\n ----------\n vocab_file: :obj:`Tuple(str)`:\n Paths to the files saved.\n typle with string to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n \"\"\"\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES[\"vocab_file\"])\n else:\n vocab_file = vocab_path\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(\n vocab_file\n )\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)"
}
] | from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple, List, Union
from fragment_creator import fragment_creator_factory
from model import ContextArgs, ModelArgs
from tqdm import tqdm
from contextlib import nullcontext
from datetime import datetime
from functools import partial
from model import ContextArgs, Transformer, ModelArgs
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DistributedDataParallel as DDP
from preprocess_dataset import SmilesTask
from tokenizer import SmilesTokenizer
import math
import os
import time
import torch
import numpy as np
import logging | 8,867 | learning_rate: float = 1e-4 # max learning rate
max_iters: int = 100000 # total number of training iterations
weight_decay: float = 1e-1
beta1: float = 0.9
beta2: float = 0.95
grad_clip: float = 1.0 # clip gradients at this value, or disable if == 0.0
# learning rate decay settings
decay_lr: bool = True # whether to decay the learning rate
warmup_iters: int = 1000 # how many steps to warm up for
lr_decay_iters: int = 100000 # should be ~= max_iters per Chinchilla
min_lr: float = (
0.0 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
)
@dataclass
class TrainerArgs:
# Input / Output
io_conf: IOConfig
# Loader Configs
loader_conf: LoaderConfig
# Transformer Args
model_conf: ModelArgs
context_conf: ContextArgs
# Optimizer
optimizer_conf: OptimizerConfig
run_name: str
class Trainer:
def __init__(
self, train_args: TrainerArgs, dtype: str = "float16", compile: bool = False
) -> None:
self.train_conf = train_args
self.dtype = dtype
self.compile = compile
# system
self.run_name = train_args.run_name
self.device = (
"cuda:0" if torch.cuda.is_available() else "cpu"
) # "cuda" # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
self.CKPT_PT = f"{self.run_name}.pt"
self.SNAPSHOT_PT = f"snapshot_{self.run_name}.pt"
def _init_ddp_if_possible(self):
# various inits, derived attributes, I/O setup
self.ddp = int(os.environ.get("RANK", -1)) != -1 # is this a ddp run?
if self.ddp:
logger.info(f"Using ddp!")
init_process_group(backend="nccl")
self.ddp_rank = int(os.environ["RANK"])
self.ddp_local_rank = int(os.environ["LOCAL_RANK"])
self.ddp_world_size = int(os.environ["WORLD_SIZE"])
logger.info(f"{self.ddp_rank}, {self.ddp_local_rank},{self.ddp_world_size}")
self.device = f"cuda:{self.ddp_local_rank}"
torch.cuda.set_device(self.device)
self.master_process = (
self.ddp_rank == 0
) # this process will do logging, checkpointing etc.
logger.info(f"Is master process {self.device}? {self.master_process}")
self.seed_offset = self.ddp_rank # each process gets a different seed
# world_size number of processes will be training simultaneously, so we can scale
# down the desired gradient accumulation iterations per process proportionally
assert (
self.train_conf.optimizer_conf.gradient_accumulation_steps
% self.ddp_world_size
== 0
)
self.train_conf.optimizer_conf.gradient_accumulation_steps //= (
self.ddp_world_size
)
else:
# if not ddp, we are running on a single gpu, and one process
self.master_process = True
self.seed_offset = 0
self.ddp_world_size = 1
def _init_train(self):
self.tokens_per_iter = (
self.train_conf.optimizer_conf.gradient_accumulation_steps
* self.ddp_world_size
* self.train_conf.loader_conf.batch_size
* self.train_conf.loader_conf.max_seq_len
)
if self.master_process:
logger.info(f"tokens per iteration will be: {self.tokens_per_iter:,}")
logger.info(
f"breaks down as: {self.train_conf.optimizer_conf.gradient_accumulation_steps} grad accum steps * {self.ddp_world_size} processes * {self.train_conf.loader_conf.batch_size} batch size * {self.train_conf.loader_conf.max_seq_len } max seq len"
)
if self.master_process:
os.makedirs(self.train_conf.io_conf.out_dir, exist_ok=True)
torch.manual_seed(1337 + self.seed_offset)
np.random.seed(1337 + self.seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
self.device_type = (
"cuda" if "cuda" in self.device else "cpu"
) # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {
"float32": torch.float32,
"bfloat16": torch.bfloat16,
"float16": torch.float16,
}[self.dtype]
self.ctx = (
nullcontext()
if self.device_type == "cpu"
else torch.amp.autocast(device_type=self.device_type, dtype=ptdtype)
)
# task-specific setup
|
logger = logging.getLogger(__name__)
@dataclass
class IOConfig:
# I/O
out_dir: str = "out"
eval_interval: int = 500
log_interval: int = 10
eval_iters: int = 25
eval_only: bool = False # if True, script exits right after the first eval
always_save_checkpoint: bool = (
False # if True, always save a checkpoint after each eval
)
init_from: str = "scratch" # 'scratch' or 'resume'
resume_when_snapshot_available: bool = True
@dataclass
class LoaderConfig:
# data
batch_size: int = (
384 # if gradient_accumulation_steps > 1, this is the micro-batch size
)
max_seq_len: int = 768
dataset: str = "smiles"
processed_dataset_ckpt: str = "processed_dataset_None.pkl"
fragment_creator: Union[str, None] = None
# dim = 256
# n_layers = 8
# n_heads = 8
# multiple_of = 128
# dropout = 0.1
@dataclass
class OptimizerConfig:
# adamw optimizer
gradient_accumulation_steps: int = 4 # used to simulate larger batch sizes
learning_rate: float = 1e-4 # max learning rate
max_iters: int = 100000 # total number of training iterations
weight_decay: float = 1e-1
beta1: float = 0.9
beta2: float = 0.95
grad_clip: float = 1.0 # clip gradients at this value, or disable if == 0.0
# learning rate decay settings
decay_lr: bool = True # whether to decay the learning rate
warmup_iters: int = 1000 # how many steps to warm up for
lr_decay_iters: int = 100000 # should be ~= max_iters per Chinchilla
min_lr: float = (
0.0 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
)
@dataclass
class TrainerArgs:
# Input / Output
io_conf: IOConfig
# Loader Configs
loader_conf: LoaderConfig
# Transformer Args
model_conf: ModelArgs
context_conf: ContextArgs
# Optimizer
optimizer_conf: OptimizerConfig
run_name: str
class Trainer:
def __init__(
self, train_args: TrainerArgs, dtype: str = "float16", compile: bool = False
) -> None:
self.train_conf = train_args
self.dtype = dtype
self.compile = compile
# system
self.run_name = train_args.run_name
self.device = (
"cuda:0" if torch.cuda.is_available() else "cpu"
) # "cuda" # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
self.CKPT_PT = f"{self.run_name}.pt"
self.SNAPSHOT_PT = f"snapshot_{self.run_name}.pt"
def _init_ddp_if_possible(self):
# various inits, derived attributes, I/O setup
self.ddp = int(os.environ.get("RANK", -1)) != -1 # is this a ddp run?
if self.ddp:
logger.info(f"Using ddp!")
init_process_group(backend="nccl")
self.ddp_rank = int(os.environ["RANK"])
self.ddp_local_rank = int(os.environ["LOCAL_RANK"])
self.ddp_world_size = int(os.environ["WORLD_SIZE"])
logger.info(f"{self.ddp_rank}, {self.ddp_local_rank},{self.ddp_world_size}")
self.device = f"cuda:{self.ddp_local_rank}"
torch.cuda.set_device(self.device)
self.master_process = (
self.ddp_rank == 0
) # this process will do logging, checkpointing etc.
logger.info(f"Is master process {self.device}? {self.master_process}")
self.seed_offset = self.ddp_rank # each process gets a different seed
# world_size number of processes will be training simultaneously, so we can scale
# down the desired gradient accumulation iterations per process proportionally
assert (
self.train_conf.optimizer_conf.gradient_accumulation_steps
% self.ddp_world_size
== 0
)
self.train_conf.optimizer_conf.gradient_accumulation_steps //= (
self.ddp_world_size
)
else:
# if not ddp, we are running on a single gpu, and one process
self.master_process = True
self.seed_offset = 0
self.ddp_world_size = 1
def _init_train(self):
self.tokens_per_iter = (
self.train_conf.optimizer_conf.gradient_accumulation_steps
* self.ddp_world_size
* self.train_conf.loader_conf.batch_size
* self.train_conf.loader_conf.max_seq_len
)
if self.master_process:
logger.info(f"tokens per iteration will be: {self.tokens_per_iter:,}")
logger.info(
f"breaks down as: {self.train_conf.optimizer_conf.gradient_accumulation_steps} grad accum steps * {self.ddp_world_size} processes * {self.train_conf.loader_conf.batch_size} batch size * {self.train_conf.loader_conf.max_seq_len } max seq len"
)
if self.master_process:
os.makedirs(self.train_conf.io_conf.out_dir, exist_ok=True)
torch.manual_seed(1337 + self.seed_offset)
np.random.seed(1337 + self.seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
self.device_type = (
"cuda" if "cuda" in self.device else "cpu"
) # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {
"float32": torch.float32,
"bfloat16": torch.bfloat16,
"float16": torch.float16,
}[self.dtype]
self.ctx = (
nullcontext()
if self.device_type == "cpu"
else torch.amp.autocast(device_type=self.device_type, dtype=ptdtype)
)
# task-specific setup | task = {"smiles": SmilesTask}[self.train_conf.loader_conf.dataset] | 6 | 2023-11-28 09:50:31+00:00 | 12k |
kiharalab/Distance-AF | Train/train.py | [
{
"identifier": "DistAF_Dataset",
"path": "Dataset/dataset.py",
"snippet": "class DistAF_Dataset(Dataset):\n\n def __init__(self, args=None):\n self.train_file = args.target_file\n\n with open(self.train_file, 'r') as f:\n self.targets = f.read().splitlines()\n \n self.max_len = args.max_len\n self.embedding_file = args.emd_file\n self.fasta_file = args.fasta_file\n self.initial_pdb = args.initial_pdb\n self.window_info = args.window_info\n self.dist_constraint_file = args.dist_info\n self.output_dir = args.output_dir\n self.args = args\n self.target_seq_len = 0\n self.start_position = 0\n self.end_position = self.start_position + self.target_seq_len\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, index):\n target = self.targets[index]\n target_seq = get_seq(self.fasta_file)\n self.target_seq_len = len(target_seq)\n self.output_dir = os.path.join(self.output_dir,target)\n self.end_position = self.target_seq_len\n\n data = {}\n data[\"target\"] = target\n \n if self.embedding_file.endswith('.npz'):\n emd = np.load(self.embedding_file)\n else:\n import pickle\n with open(self.embedding_file, 'rb') as f:\n emd = pickle.load(f)['representations']\n pair = emd['pair']\n single = emd['single']\n \n single = torch.tensor(single)\n pair = torch.tensor(pair)\n\n resolution = 1\n \n data['resolution'] = torch.tensor([resolution])\n\n initial_pdb = self.initial_pdb\n coords = read_pdb_info(initial_pdb,self.target_seq_len)\n mask = np.ones(self.target_seq_len)\n \n data['single_representation'] = single\n single_emd = single.unsqueeze(0)\n pair_emd = pair.unsqueeze(0)\n \n data['single_representation'] = single_emd\n data['embed'] = pair_emd\n\n data['coords'] = coords\n data['mask'] = mask\n \n '''all atom attribute load using DeepMind's utils'''\n \n pdb_str = ''\n with open(initial_pdb,\"r\") as f:\n pdb_str = f.read()\n prot = protein.from_pdb_string(pdb_str)\n \n prot_dict = {\n 'aatype': np.asarray(prot.aatype),\n 'all_atom_positions':np.asarray(prot.atom_positions),\n 'all_atom_mask':np.asarray(prot.atom_mask),\n }\t\t\t\t\t\t\n prot_dict = all_atom.make_atom14_positions(prot_dict)\n data['aatype'] = torch.tensor(np.asarray(prot.aatype))\n for key in prot_dict.keys():\n data[key] = torch.tensor(prot_dict[key][:, ...])\n \n protein_object = protein.from_pdb_string(pdb_str)\n residue_index = torch.tensor(protein_object.residue_index)\n protein_object = data_transforms.atom37_to_frames(\n {'aatype': torch.tensor(protein_object.aatype),\n 'all_atom_positions': torch.tensor(protein_object.atom_positions), # (..., 37, 3)\n 'all_atom_mask': torch.tensor(protein_object.atom_mask)})\n protein_object = data_transforms.atom37_to_torsion_angles(protein_object)\n protein_object.update({'residue_index': residue_index})\n protein_object.update({'residue_index': residue_index,\n 'chi_angles_sin_cos': protein_object[\"torsion_angles_sin_cos\"][..., 3:, :],\n 'chi_mask': protein_object[\"torsion_angles_mask\"][..., 3:],\n 'seq_mask': torch.ones(protein_object[\"aatype\"].shape, dtype=torch.float32)})\n for key in protein_object.keys():\n data[key] = protein_object[key][:, ...]\n\n data['seq_length'] = self.target_seq_len\n \n data['aatype_start'] = 0\n \n # variables needed in distance af defined below\n if os.path.exists(os.path.join(self.output_dir, f\"{target}_constraint.pt\")):\n data['domain_window'] = torch.load(os.path.join(self.output_dir, f\"{target}_domain_window.pt\"))\n data['dist_constraint'] = torch.load(os.path.join(self.output_dir, f\"{target}_constraint.pt\"))\n else:\n data['domain_window'] = None\n data['dist_constraint'] = None\n domain_window = torch.zeros((self.target_seq_len,self.target_seq_len))\n with open(self.window_info, 'r') as file:\n lines = file.readlines()\n for line in lines:\n line = line.strip()\n start = int(line.split(',')[0])-1\n end = int(line.split(',')[1])+1\n domain_window[start:end,start:end] = 1\n data['domain_window'] = domain_window\n\n dist_constraint = torch.zeros((self.target_seq_len,self.target_seq_len))\n with open(self.dist_constraint_file, 'r') as file:\n lines = file.readlines()\n for line in lines:\n line = line.strip()\n first_resi = int(line.split(',')[0])-1\n second_resi = int(line.split(',')[1])-1\n dist_cons = float(line.split(',')[2])\n dist_constraint[first_resi, second_resi] = dist_cons\n dist_constraint[second_resi, first_resi] = dist_cons\n\n data['dist_constraint'] = dist_constraint\n torch.save(data['dist_constraint'], os.path.join(self.output_dir, f\"{target}_constraint.pt\"))\n torch.save(data['domain_window'], os.path.join(self.output_dir, f\"{target}_domain_window.pt\")) \n\n return data"
},
{
"identifier": "Dist_AF_IPA",
"path": "Model/Dist_AF.py",
"snippet": "class Dist_AF_IPA(nn.Module):\n def __init__(self, args):\n super(Dist_AF_IPA, self).__init__()\n self.structure_module = StructureModule(trans_scale_factor=args.point_scale, no_blocks=args.ipa_depth, no_heads_ipa=12, c_ipa=16) #no_heads_ipa=24, c_ipa=64\n self.plddt = PerResidueLDDTCaPredictor()\n self.experimentally_resolved = ExperimentallyResolvedHead()\n self.args = args\n\n self.dropout1 = nn.Dropout(p=0.3)\n self.dropout2 = nn.Dropout(p=0.3)\n #self.pair_project = nn.Linear(128 + 128, 128)\n def forward(self, embedding, single_repr, aatype, batch_gt_frames):\n\n output_bb, translation, outputs = self.structure_module(single_repr, embedding, f=aatype, mask=batch_gt_frames['seq_mask'])\n pred_frames = torch.stack(output_bb)\n lddt = self.plddt(outputs['single'])\n experimentally_resolved_logits = self.experimentally_resolved(outputs['single'])\n del lddt, experimentally_resolved_logits\n return translation, outputs, pred_frames"
},
{
"identifier": "backbone_loss",
"path": "Loss/backbone_loss.py",
"snippet": "def backbone_loss(\n backbone_affine_tensor: torch.Tensor,\n backbone_affine_mask: torch.Tensor,\n traj: torch.Tensor,\n use_clamped_fape: Optional[torch.Tensor] = None,\n clamp_distance: float = 10.0,\n loss_unit_distance: float = 10.0,\n eps: float = 1e-4,\n dis_gt = None,\n mask_window=None,\n **kwargs,\n) -> torch.Tensor:\n pred_aff = T.from_tensor(traj)\n gt_aff = T.from_tensor(backbone_affine_tensor)\n fape_loss = compute_fape(\n pred_aff,\n gt_aff[None],\n backbone_affine_mask[None],\n pred_aff.get_trans(),\n gt_aff[None].get_trans(),\n backbone_affine_mask[None],\n l1_clamp_distance=clamp_distance,\n length_scale=loss_unit_distance,\n eps=eps,\n dis_gt=dis_gt,\n mask_window=mask_window\n )\n if use_clamped_fape is not None:\n unclamped_fape_loss = compute_fape(\n pred_aff,\n gt_aff[None],\n backbone_affine_mask[None],\n pred_aff.get_trans(),\n gt_aff[None].get_trans(),\n backbone_affine_mask[None],\n l1_clamp_distance=None,\n length_scale=loss_unit_distance,\n eps=eps,\n dis_gt=dis_gt,\n mask_window=mask_window\n )\n\n fape_loss = fape_loss * use_clamped_fape + unclamped_fape_loss * (\n 1 - use_clamped_fape\n )\n\n # Average over the batch dimension\n fape_loss = torch.mean(fape_loss)\n # return fape_loss\n dis_loss = compute_fape_dis(\n pred_frames=pred_aff,\n pred_positions=pred_aff.get_trans(),\n length_scale=loss_unit_distance,\n eps=eps,\n dis_gt=dis_gt)\n\n return fape_loss, dis_loss"
},
{
"identifier": "sidechain_loss_dis",
"path": "Loss/sidechain_loss.py",
"snippet": "def sidechain_loss_dis(\n sidechain_frames: torch.Tensor,\n sidechain_atom_pos: torch.Tensor,\n rigidgroups_gt_frames: torch.Tensor,\n rigidgroups_alt_gt_frames: torch.Tensor,\n rigidgroups_gt_exists: torch.Tensor,\n renamed_atom14_gt_positions: torch.Tensor,\n renamed_atom14_gt_exists: torch.Tensor,\n alt_naming_is_better: torch.Tensor,\n dis_gt: torch.Tensor,\n dist_window: torch.Tensor,\n clamp_distance: float = 10.0,\n length_scale: float = 10.0,\n eps: float = 1e-4,\n **kwargs,\n) -> torch.Tensor:\n \n renamed_gt_frames = (\n 1.0 - alt_naming_is_better[..., None, None, None]\n ) * rigidgroups_gt_frames + alt_naming_is_better[\n ..., None, None, None\n ] * rigidgroups_alt_gt_frames\n \n # Steamroll the inputs\n sidechain_frames = sidechain_frames[-1]\n batch_dims = sidechain_frames.shape[:-4]\n sidechain_frames = sidechain_frames.view(*batch_dims, -1, 4, 4)\n sidechain_frames = T.from_4x4(sidechain_frames)\n renamed_gt_frames = renamed_gt_frames.view(*batch_dims, -1, 4, 4)\n renamed_gt_frames = T.from_4x4(renamed_gt_frames)\n rigidgroups_gt_exists = rigidgroups_gt_exists.reshape(*batch_dims, -1)\n sidechain_atom_pos = sidechain_atom_pos[-1]\n sidechain_atom_pos = sidechain_atom_pos.view(*batch_dims, -1, 3)\n renamed_atom14_gt_positions = renamed_atom14_gt_positions.view(\n *batch_dims, -1, 3\n )\n renamed_atom14_gt_exists = renamed_atom14_gt_exists.view(*batch_dims, -1)\n\n fape = compute_sidechain_dis(\n sidechain_frames,\n renamed_gt_frames,\n rigidgroups_gt_exists,\n sidechain_atom_pos,\n renamed_atom14_gt_positions,\n renamed_atom14_gt_exists,\n l1_clamp_distance=clamp_distance,\n length_scale=length_scale,\n eps=eps,\n dis_gt=dis_gt,\n dist_window=dist_window\n )\n\n return fape"
},
{
"identifier": "compute_renamed_ground_truth",
"path": "Loss/openfold_loss.py",
"snippet": "def compute_renamed_ground_truth(\n batch: Dict[str, torch.Tensor],\n atom14_pred_positions: torch.Tensor,\n eps=1e-10,\n) -> Dict[str, torch.Tensor]:\n \"\"\"\n Find optimal renaming of ground truth based on the predicted positions.\n\n Alg. 26 \"renameSymmetricGroundTruthAtoms\"\n\n This renamed ground truth is then used for all losses,\n such that each loss moves the atoms in the same direction.\n\n Args:\n batch: Dictionary containing:\n * atom14_gt_positions: Ground truth positions.\n * atom14_alt_gt_positions: Ground truth positions with renaming swaps.\n * atom14_atom_is_ambiguous: 1.0 for atoms that are affected by\n renaming swaps.\n * atom14_gt_exists: Mask for which atoms exist in ground truth.\n * atom14_alt_gt_exists: Mask for which atoms exist in ground truth\n after renaming.\n * atom14_atom_exists: Mask for whether each atom is part of the given\n amino acid type.\n atom14_pred_positions: Array of atom positions in global frame with shape\n Returns:\n Dictionary containing:\n alt_naming_is_better: Array with 1.0 where alternative swap is better.\n renamed_atom14_gt_positions: Array of optimal ground truth positions\n after renaming swaps are performed.\n renamed_atom14_gt_exists: Mask after renaming swap is performed.\n \"\"\"\n\n pred_dists = torch.sqrt(\n eps\n + torch.sum(\n (\n atom14_pred_positions[..., None, :, None, :]\n - atom14_pred_positions[..., None, :, None, :, :]\n )\n ** 2,\n dim=-1,\n )\n )\n\n atom14_gt_positions = batch[\"atom14_gt_positions\"]\n gt_dists = torch.sqrt(\n eps\n + torch.sum(\n (\n atom14_gt_positions[..., None, :, None, :]\n - atom14_gt_positions[..., None, :, None, :, :]\n )\n ** 2,\n dim=-1,\n )\n )\n\n atom14_alt_gt_positions = batch[\"atom14_alt_gt_positions\"]\n alt_gt_dists = torch.sqrt(\n eps\n + torch.sum(\n (\n atom14_alt_gt_positions[..., None, :, None, :]\n - atom14_alt_gt_positions[..., None, :, None, :, :]\n )\n ** 2,\n dim=-1,\n )\n )\n\n lddt = torch.sqrt(eps + (pred_dists - gt_dists) ** 2)\n alt_lddt = torch.sqrt(eps + (pred_dists - alt_gt_dists) ** 2)\n\n atom14_gt_exists = batch[\"atom14_gt_exists\"]\n atom14_atom_is_ambiguous = batch[\"atom14_atom_is_ambiguous\"]\n mask = (\n atom14_gt_exists[..., None, :, None]\n * atom14_atom_is_ambiguous[..., None, :, None]\n * atom14_gt_exists[..., None, :, None, :]\n * (1.0 - atom14_atom_is_ambiguous[..., None, :, None, :])\n )\n\n per_res_lddt = torch.sum(mask * lddt, dim=(-1, -2, -3))\n alt_per_res_lddt = torch.sum(mask * alt_lddt, dim=(-1, -2, -3))\n\n fp_type = atom14_pred_positions.dtype\n alt_naming_is_better = (alt_per_res_lddt < per_res_lddt).type(fp_type)\n\n renamed_atom14_gt_positions = (\n 1.0 - alt_naming_is_better[..., None, None]\n ) * atom14_gt_positions + alt_naming_is_better[\n ..., None, None\n ] * atom14_alt_gt_positions\n\n renamed_atom14_gt_mask = (\n 1.0 - alt_naming_is_better[..., None]\n ) * atom14_gt_exists + alt_naming_is_better[..., None] * batch[\n \"atom14_alt_gt_exists\"\n ]\n\n return {\n \"alt_naming_is_better\": alt_naming_is_better,\n \"renamed_atom14_gt_positions\": renamed_atom14_gt_positions,\n \"renamed_atom14_gt_exists\": renamed_atom14_gt_mask,\n }"
},
{
"identifier": "supervised_chi_loss",
"path": "Loss/openfold_loss.py",
"snippet": "def supervised_chi_loss(\n angles_sin_cos: torch.Tensor,\n unnormalized_angles_sin_cos: torch.Tensor,\n aatype: torch.Tensor,\n seq_mask: torch.Tensor,\n chi_mask: torch.Tensor,\n chi_angles_sin_cos: torch.Tensor,\n chi_weight: float,\n angle_norm_weight: float,\n eps=1e-6,\n dist=0,\n **kwargs,\n) -> torch.Tensor:\n pred_angles = angles_sin_cos[..., 3:, :]\n\n residue_type_one_hot = torch.nn.functional.one_hot(\n aatype,\n residue_constants.restype_num + 1,\n )\n chi_pi_periodic = torch.einsum(\n \"...ij,jk->ik\",\n residue_type_one_hot.type(angles_sin_cos.dtype),\n angles_sin_cos.new_tensor(residue_constants.chi_pi_periodic),\n )\n \n if dist:\n global true_chi\n if true_chi == None:\n with torch.no_grad():\n true_chi = pred_angles[-1][None].clone()\n else:\n true_chi = chi_angles_sin_cos[None]\n # print(true_chi.shape)\n # print(pred_angles.shape)\n # exit(0)\n shifted_mask = (1 - 2 * chi_pi_periodic).unsqueeze(-1)\n true_chi_shifted = shifted_mask * true_chi\n \n sq_chi_error = torch.sum((true_chi - pred_angles) ** 2, dim=-1)\n sq_chi_error_shifted = torch.sum(\n (true_chi_shifted - pred_angles) ** 2, dim=-1\n )\n sq_chi_error = torch.minimum(sq_chi_error, sq_chi_error_shifted)\n # The ol' switcheroo\n sq_chi_error = sq_chi_error.permute(\n *range(len(sq_chi_error.shape))[1:-2], 0, -2, -1\n )\n sq_chi_loss = masked_mean(\n chi_mask[..., None, :, :], sq_chi_error, dim=(-1, -2, -3)\n )\n\n loss = chi_weight * sq_chi_loss\n\n angle_norm = torch.sqrt(\n torch.sum(unnormalized_angles_sin_cos ** 2, dim=-1) + eps\n )\n norm_error = torch.abs(angle_norm - 1.0)\n norm_error = norm_error.permute(\n *range(len(norm_error.shape))[1:-2], 0, -2, -1\n )\n angle_norm_loss = masked_mean(\n seq_mask[..., None, :, None], norm_error, dim=(-1, -2, -3)\n )\n\n loss = loss + angle_norm_weight * angle_norm_loss\n\n # Average over the batch dimension\n loss = torch.mean(loss)\n\n return loss"
},
{
"identifier": "find_structural_violations",
"path": "Loss/openfold_loss.py",
"snippet": "def find_structural_violations(\n batch: Dict[str, torch.Tensor],\n atom14_pred_positions: torch.Tensor,\n violation_tolerance_factor: float,\n clash_overlap_tolerance: float,\n **kwargs,\n) -> Dict[str, torch.Tensor]:\n \"\"\"Computes several checks for structural violations.\"\"\"\n\n # Compute between residue backbone violations of bonds and angles.\n connection_violations = between_residue_bond_loss(\n pred_atom_positions=atom14_pred_positions,\n pred_atom_mask=batch[\"atom14_atom_exists\"],\n residue_index=batch[\"residue_index\"],\n aatype=batch[\"aatype\"],\n tolerance_factor_soft=violation_tolerance_factor,\n tolerance_factor_hard=violation_tolerance_factor,\n )\n\n # Compute the Van der Waals radius for every atom\n # (the first letter of the atom name is the element type).\n # Shape: (N, 14).\n atomtype_radius = [\n residue_constants.van_der_waals_radius[name[0]]\n for name in residue_constants.atom_types\n ]\n atomtype_radius = atom14_pred_positions.new_tensor(atomtype_radius)\n atom14_atom_radius = (\n batch[\"atom14_atom_exists\"]\n * atomtype_radius[batch[\"residx_atom14_to_atom37\"]]\n )\n\n # Compute the between residue clash loss.\n between_residue_clashes = between_residue_clash_loss(\n atom14_pred_positions=atom14_pred_positions,\n atom14_atom_exists=batch[\"atom14_atom_exists\"],\n atom14_atom_radius=atom14_atom_radius,\n residue_index=batch[\"residue_index\"],\n overlap_tolerance_soft=clash_overlap_tolerance,\n overlap_tolerance_hard=clash_overlap_tolerance,\n )\n\n # Compute all within-residue violations (clashes,\n # bond length and angle violations).\n restype_atom14_bounds = residue_constants.make_atom14_dists_bounds(\n overlap_tolerance=clash_overlap_tolerance,\n bond_length_tolerance_factor=violation_tolerance_factor,\n )\n atom14_atom_exists = batch[\"atom14_atom_exists\"]\n atom14_dists_lower_bound = atom14_pred_positions.new_tensor(\n restype_atom14_bounds[\"lower_bound\"]\n )[batch[\"aatype\"]]\n atom14_dists_upper_bound = atom14_pred_positions.new_tensor(\n restype_atom14_bounds[\"upper_bound\"]\n )[batch[\"aatype\"]]\n residue_violations = within_residue_violations(\n atom14_pred_positions=atom14_pred_positions,\n atom14_atom_exists=batch[\"atom14_atom_exists\"],\n atom14_dists_lower_bound=atom14_dists_lower_bound,\n atom14_dists_upper_bound=atom14_dists_upper_bound,\n tighten_bounds_for_loss=0.0,\n )\n\n # Combine them to a single per-residue violation mask (used later for LDDT).\n per_residue_violations_mask = torch.max(\n torch.stack(\n [\n connection_violations[\"per_residue_violation_mask\"],\n torch.max(\n between_residue_clashes[\"per_atom_clash_mask\"], dim=-1\n )[0],\n torch.max(residue_violations[\"per_atom_violations\"], dim=-1)[0],\n ],\n dim=-1,\n ),\n dim=-1,\n )[0]\n\n return {\n \"between_residues\": {\n \"bonds_c_n_loss_mean\": connection_violations[\"c_n_loss_mean\"], # ()\n \"angles_ca_c_n_loss_mean\": connection_violations[\n \"ca_c_n_loss_mean\"\n ], # ()\n \"angles_c_n_ca_loss_mean\": connection_violations[\n \"c_n_ca_loss_mean\"\n ], # ()\n \"connections_per_residue_loss_sum\": connection_violations[\n \"per_residue_loss_sum\"\n ], # (N)\n \"connections_per_residue_violation_mask\": connection_violations[\n \"per_residue_violation_mask\"\n ], # (N)\n \"clashes_mean_loss\": between_residue_clashes[\"mean_loss\"], # ()\n \"clashes_per_atom_loss_sum\": between_residue_clashes[\n \"per_atom_loss_sum\"\n ], # (N, 14)\n \"clashes_per_atom_clash_mask\": between_residue_clashes[\n \"per_atom_clash_mask\"\n ], # (N, 14)\n },\n \"within_residues\": {\n \"per_atom_loss_sum\": residue_violations[\n \"per_atom_loss_sum\"\n ], # (N, 14)\n \"per_atom_violations\": residue_violations[\n \"per_atom_violations\"\n ], # (N, 14),\n },\n \"total_per_residue_violations_mask\": per_residue_violations_mask, # (N)\n }"
},
{
"identifier": "violation_loss",
"path": "Loss/openfold_loss.py",
"snippet": "def violation_loss(\n violations: Dict[str, torch.Tensor],\n atom14_atom_exists: torch.Tensor,\n eps=1e-6,\n **kwargs,\n) -> torch.Tensor:\n num_atoms = torch.sum(atom14_atom_exists)\n l_clash = torch.sum(\n violations[\"between_residues\"][\"clashes_per_atom_loss_sum\"]\n + violations[\"within_residues\"][\"per_atom_loss_sum\"]\n )\n l_clash = l_clash / (eps + num_atoms)\n loss = (\n violations[\"between_residues\"][\"bonds_c_n_loss_mean\"]\n + violations[\"between_residues\"][\"angles_ca_c_n_loss_mean\"]\n + violations[\"between_residues\"][\"angles_c_n_ca_loss_mean\"]\n + l_clash\n )\n\n return loss"
},
{
"identifier": "atom14_to_atom37",
"path": "train_utils/feats.py",
"snippet": "def atom14_to_atom37(atom14, batch):\n atom37_data = batched_gather(\n atom14,\n batch[\"residx_atom37_to_atom14\"].long(), #tensor indexing must be long\n dim=-2,\n no_batch_dims=len(atom14.shape[:-2]),\n )\n\n atom37_data = atom37_data * batch[\"atom37_atom_exists\"][..., None]\n\n return atom37_data"
},
{
"identifier": "protein",
"path": "protein_utils/protein.py",
"snippet": "class Protein:\ndef from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:\ndef to_pdb(prot: Protein) -> str:\ndef ideal_atom_mask(prot: Protein) -> np.ndarray:\ndef from_prediction(features: FeatureDict, result: ModelOutput) -> Protein:"
},
{
"identifier": "rmsd",
"path": "utils/rmsd.py",
"snippet": "def rmsd(V, W):\n \"\"\"\n Calculate Root-mean-square deviation from two sets of vectors V and W.\n Parameters\n ----------\n V : array\n (N,D) matrix, where N is points and D is dimension.\n W : array\n (N,D) matrix, where N is points and D is dimension.\n Returns\n -------\n rmsd : float\n Root-mean-square deviation between the two vectors\n \"\"\"\n diff = np.array(V) - np.array(W)\n N = len(V)\n return np.sqrt((diff * diff).sum() / N)"
},
{
"identifier": "set_seed",
"path": "utils/set_seed.py",
"snippet": "def set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n # Set CuDNN to use deterministic algorithms\n cudnn.deterministic = True\n cudnn.benchmark = False"
},
{
"identifier": "collate_fn",
"path": "train_utils/collate.py",
"snippet": "def collate_fn(batch, args):\n\n gt_keys_3d = ['all_atom_positions', 'atom14_gt_positions', 'atom14_alt_gt_positions']\n gt_keys_2d = ['all_atom_mask', 'atom14_atom_exists', 'atom14_gt_exists', 'residx_atom14_to_atom37', 'residx_atom37_to_atom14', 'atom37_atom_exists','atom14_alt_gt_exists', 'atom14_atom_is_ambiguous']\n gt_frames_keys_3d = ['rigidgroups_gt_exists', 'rigidgroups_group_exists', 'rigidgroups_group_is_ambiguous']\n gt_frames_keys_4d = ['torsion_angles_sin_cos', 'alt_torsion_angles_sin_cos']\n gt_frames_keys_5d = ['rigidgroups_gt_frames', 'rigidgroups_alt_gt_frames']\n collate_dict = {}\n\n keys_3d = ['dist', 'mu', 'rho', 'theta', 'sce', 'no']\n keys_2d = ['phi', 'psi']\n\n lens = [data['embed'].shape[1] for data in batch]\n\n embed_size = batch[0]['embed'].shape[-1]\n batch_size = len(batch)\n max_len = max(lens)\n single_size = batch[0]['single_representation'].shape[-1]\n\n for key in keys_3d:\n if key in batch[0]: #It wont be present for test dataloader\n collate_dict[key] = pad_tensor(batch, lens, [batch_size, max_len, max_len], key, dtype='long')\n for key in keys_2d:\n if key in batch[0]:\n collate_dict[key] = pad_tensor(batch, lens, [batch_size, max_len], key, dtype='long')\n\n if args.embed =='msa_transformer':\n collate_dict['embed'] = pad_tensor3(batch, lens, [batch_size, max_len, max_len, embed_size], 'embed')\n collate_dict['single_representation'] = pad_tensor3(batch, lens, [batch_size, max_len, single_size], 'single_representation')\n collate_dict['aatype'] = pad_tensor3(batch, lens, [batch_size, max_len], 'aatype', dtype='long')\n collate_dict['residue_index'] = pad_tensor3(batch, lens, [batch_size, max_len], 'residue_index', dtype='long')\n \n for key in gt_keys_3d:\n if 'atom14' in key:\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 14, 3], key)\n else:\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 37, 3], key)\n for key in gt_keys_2d:\n if key == 'residx_atom37_to_atom14':\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 37], key)#, dtype='long')\n continue\n if key == 'residx_atom14_to_atom37':\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 14], key, dtype='long')\n continue\n if 'atom14' in key:\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 14], key)\n else:\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 37], key)\n for key in gt_frames_keys_3d:\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 8], key)\n for key in gt_frames_keys_4d:\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 7, 2], key)\n for key in gt_frames_keys_5d:\n collate_dict[key] = pad_tensor4(batch, lens, [batch_size, max_len, 8, 4, 4], key)\n collate_dict['torsion_angles_mask'] = pad_tensor4(batch, lens, [batch_size, max_len, 7], 'torsion_angles_mask')\n collate_dict['chi_angles_sin_cos'] = pad_tensor4(batch, lens, [batch_size, max_len, 4, 2], 'chi_angles_sin_cos')\n collate_dict['chi_mask'] = pad_tensor4(batch, lens, [batch_size, max_len, 4], 'chi_mask')\n collate_dict['seq_mask'] = pad_tensor3(batch, lens, [batch_size, max_len], 'seq_mask')\n\n targets = []\n record_lines = []\n resolution = []\n seq_length = list()\n\n for i_batch, (data, length) in enumerate(zip(batch, lens)):\n target = data[\"target\"]\n res = data['resolution']\n targets.append(target)\n resolution.append(res)\n # record_lines.append(data['record_lines'])\n seq_length.append(data['seq_length'])\n collate_dict['resolution'] = torch.stack(resolution)\n collate_dict['seq_length'] = torch.tensor(seq_length)\n collate_dict['domain_window'] = data['domain_window']\n collate_dict['dist_constraint'] = data['dist_constraint']\n if args.dist:\n collate_dict['aatype_start'] = data['aatype_start']\n return collate_dict, targets"
}
] | from torch.utils.data import DataLoader
from Dataset.dataset import DistAF_Dataset
from Model.Dist_AF import Dist_AF_IPA
from Loss.backbone_loss import backbone_loss
from Loss.sidechain_loss import sidechain_loss_dis
from Loss.openfold_loss import compute_renamed_ground_truth, supervised_chi_loss,find_structural_violations,violation_loss
from train_utils.feats import atom14_to_atom37
from protein_utils import protein
from utils import rmsd
from utils.set_seed import set_seed
from train_utils.collate import collate_fn
from torch.cuda.amp import autocast, GradScaler
from torch.utils.checkpoint import checkpoint as ckpt
import torch.optim as optim
import torch
import os
import numpy as np
import functools
import gc | 9,589 | #results = val(args, model, val_dataloader)
gt_keys = ['all_atom_positions', 'all_atom_mask', 'atom14_atom_exists', 'atom14_gt_exists', 'atom14_gt_positions',
'residx_atom14_to_atom37', 'residx_atom37_to_atom14', 'atom37_atom_exists', 'atom14_alt_gt_positions', 'atom14_alt_gt_exists',
'atom14_atom_is_ambiguous', 'residue_index']
gt_frames_keys = ['rigidgroups_gt_frames', 'rigidgroups_gt_exists', 'rigidgroups_group_exists', 'rigidgroups_group_is_ambiguous', 'rigidgroups_alt_gt_frames',
'torsion_angles_sin_cos', 'alt_torsion_angles_sin_cos', 'torsion_angles_mask', 'chi_angles_sin_cos', 'chi_mask', 'seq_mask']
for epoch in range(starting_epoch + 1,args.epochs+1):
for step, (batch,target) in enumerate(train_dataloader):
optimizer.zero_grad()
embedding = batch['embed']
domain_window = batch['domain_window'].squeeze(0)
dist_constraint = batch['dist_constraint'].squeeze(0)
single_repr_batch = batch['single_representation']
aatype_batch = batch["aatype"]
batch_gt = {key: batch[key] for key in gt_keys}
batch_gt_frames = {key: batch[key] for key in gt_frames_keys}
batch_gt.update({'seq_length': batch['seq_length']})
resolution = batch['resolution']
representation = None
if args.cuda:
embedding = embedding.to(args.device_id)
resolution = resolution.to(args.device_id)
for key in batch_gt.keys():
batch_gt[key] = batch_gt[key].to(args.device_id)
for key in batch_gt_frames.keys():
batch_gt_frames[key] = batch_gt_frames[key].to(args.device_id)
single_repr_batch = single_repr_batch.to(args.device_id)
#coords_batch = coords_batch.cuda(args.device_id)
#masks_batch = masks_batch.cuda(args.device_id)
aatype_batch = aatype_batch.to(args.device_id)
domain_window = domain_window.to(args.device_id)
dist_constraint = dist_constraint.to(args.device_id)
dummy = torch.Tensor(1)
dummy.requires_grad = True
if args.use_checkpoint:
def run_ckpt(model,embedding, single_repr_batch, aatype_batch, batch_gt_frames,dummy):
return model(embedding, single_repr_batch, aatype_batch, batch_gt_frames)
translation, outputs, pred_frames = ckpt(run_ckpt,model,embedding, single_repr_batch, aatype_batch, batch_gt_frames,dummy)
else:
translation, outputs, pred_frames = model(embedding, single_repr_batch, aatype_batch, batch_gt_frames)
#compute all needed loss
bb_loss, dis_loss = backbone_loss(
backbone_affine_tensor=batch_gt_frames["rigidgroups_gt_frames"][..., 0, :, :],
backbone_affine_mask=batch_gt_frames['rigidgroups_gt_exists'][..., 0],
traj=pred_frames,
dis_gt=dist_constraint,
mask_window=domain_window
)
rename =compute_renamed_ground_truth(batch_gt, outputs['positions'][-1])
sc_loss = sidechain_loss_dis(
sidechain_frames=outputs['sidechain_frames'],
sidechain_atom_pos=outputs['positions'],
rigidgroups_gt_frames=batch_gt_frames['rigidgroups_gt_frames'],
rigidgroups_alt_gt_frames=batch_gt_frames['rigidgroups_alt_gt_frames'],
rigidgroups_gt_exists=batch_gt_frames['rigidgroups_gt_exists'],
renamed_atom14_gt_positions=rename['renamed_atom14_gt_positions'],
renamed_atom14_gt_exists=rename['renamed_atom14_gt_exists'],
alt_naming_is_better=rename['alt_naming_is_better'],
dis_gt=dist_constraint,
dist_window=domain_window
)
angle_loss = supervised_chi_loss(outputs['angles'],
outputs['unnormalized_angles'],
aatype=aatype_batch,
seq_mask=batch_gt_frames['seq_mask'],
chi_mask=batch_gt_frames['chi_mask'],
chi_angles_sin_cos=batch_gt_frames['chi_angles_sin_cos'],
chi_weight=0.5,
angle_norm_weight=0.01,
dist=args.dist
)
batch_gt.update({'aatype': aatype_batch})
violation = find_structural_violations(batch_gt, outputs['positions'][-1],
violation_tolerance_factor=12,
clash_overlap_tolerance=1.5)
violation_loss_ = violation_loss(violation, batch_gt['atom14_atom_exists'])
vio_loss = torch.mean(violation_loss_)
#print(violation_loss_)
seq_len = torch.mean(batch_gt["seq_length"].float())
crop_len = torch.tensor(aatype_batch.shape[-1]).to(device=aatype_batch.device)
if dis_loss > 10.0:
fape = 12 * dis_loss * args.dist_weight + (bb_loss+ sc_loss + vio_loss + angle_loss ) * torch.sqrt(min(seq_len, crop_len))
elif dis_loss > 5.0 and dis_loss < 10.0:
fape = 24 * dis_loss * args.dist_weight + ( bb_loss+ sc_loss + vio_loss+ angle_loss ) * torch.sqrt(min(seq_len, crop_len))
else:
if args.loose_dist and dis_loss < 1.0:
fape = 12 * dis_loss * args.dist_weight + (bb_loss+ sc_loss + vio_loss + angle_loss ) * torch.sqrt(min(seq_len, crop_len))
else:
fape = 48 * dis_loss * args.dist_weight + (bb_loss + sc_loss + vio_loss+ angle_loss ) * torch.sqrt(min(seq_len, crop_len))
positions = outputs['positions'][-1]
del bb_loss, dis_loss, sc_loss, vio_loss, angle_loss, violation_loss_, outputs
del pred_frames, translation
gc.collect()
torch.cuda.empty_cache()
print(f"Epoch:{epoch}, FAPE loss:{fape.item()}")
fape.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
# save model checkpoint
if args.val_epochs > 0 and epoch % args.val_epochs == 0 and epoch > 0:
epoch_output_dir = os.path.join(target_output_dir, f"checkpoint-{epoch}-{epoch}")
if not os.path.exists(epoch_output_dir):
os.makedirs(epoch_output_dir)
checkpoint = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch,
'rng_state': torch.get_rng_state()}
torch.save(checkpoint, os.path.join(epoch_output_dir, "checkpoint.pth"))
torch.save(model.state_dict(), os.path.join(epoch_output_dir, "model_state_dict.pt"))
torch.save(optimizer.state_dict(), os.path.join(epoch_output_dir, "optimizer.pt"))
#save predicted pdb for each evaluted epoch
|
def train(args):
set_seed(args)
with open(args.target_file, 'r') as f:
target_name = f.read().splitlines()[0]
target_output_dir = os.path.join(args.output_dir,target_name)
if not os.path.exists(target_output_dir):
os.makedirs(target_output_dir)
train_dataset = DistAF_Dataset(args)
args.training_examples = len(train_dataset)
collate = functools.partial(collate_fn, args=args)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch, shuffle=True, num_workers=args.num_workers, collate_fn=collate)
model = Dist_AF_IPA(args)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
model.to(args.device_id)
if args.model_dir:
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# optimizer.load_state_dict(
# torch.load(f'{args.model_dir}/optimizer.pt', map_location=f'{device}:{args.device_id}')
# )
if os.path.exists(f'{args.model_dir}/checkpoint.pth'):
checkpoint = torch.load(f'{args.model_dir}/checkpoint.pth', map_location=args.device_id)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer_state_dict = checkpoint['optimizer_state_dict']
optimizer.load_state_dict(optimizer_state_dict)
starting_epoch = checkpoint['epoch']
# rng_state = checkpoint['rng_state']
# torch.set_rng_state(rng_state)
else:
starting_epoch = 0
model.load_state_dict(torch.load(f'{args.model_dir}/model_state_dict.pt', map_location=args.device_id))
optimizer_state_dict = torch.load(f'{args.model_dir}/optimizer.pt', map_location=args.device_id)
for key in optimizer_state_dict.keys():
optimizer_state_dict[key] = optimizer_state_dict[key].to(args.device_id) if isinstance(optimizer_state_dict[key], torch.Tensor) else optimizer_state_dict[key]
optimizer.load_state_dict(optimizer_state_dict)
print(f'Checkpoints (model and optimizer) loaded from {args.model_dir}')
else:
starting_epoch = 0
print("----------------- Starting Training ---------------")
print(" Num examples = %d" % (int(args.training_examples)))
print(" Num Epochs = %d" % (int(args.epochs)))
print(" Batch Size = %d" % (int(args.batch)))
model.train()
#casp_results = test(args, model, test_dataloader)
#results = val(args, model, val_dataloader)
gt_keys = ['all_atom_positions', 'all_atom_mask', 'atom14_atom_exists', 'atom14_gt_exists', 'atom14_gt_positions',
'residx_atom14_to_atom37', 'residx_atom37_to_atom14', 'atom37_atom_exists', 'atom14_alt_gt_positions', 'atom14_alt_gt_exists',
'atom14_atom_is_ambiguous', 'residue_index']
gt_frames_keys = ['rigidgroups_gt_frames', 'rigidgroups_gt_exists', 'rigidgroups_group_exists', 'rigidgroups_group_is_ambiguous', 'rigidgroups_alt_gt_frames',
'torsion_angles_sin_cos', 'alt_torsion_angles_sin_cos', 'torsion_angles_mask', 'chi_angles_sin_cos', 'chi_mask', 'seq_mask']
for epoch in range(starting_epoch + 1,args.epochs+1):
for step, (batch,target) in enumerate(train_dataloader):
optimizer.zero_grad()
embedding = batch['embed']
domain_window = batch['domain_window'].squeeze(0)
dist_constraint = batch['dist_constraint'].squeeze(0)
single_repr_batch = batch['single_representation']
aatype_batch = batch["aatype"]
batch_gt = {key: batch[key] for key in gt_keys}
batch_gt_frames = {key: batch[key] for key in gt_frames_keys}
batch_gt.update({'seq_length': batch['seq_length']})
resolution = batch['resolution']
representation = None
if args.cuda:
embedding = embedding.to(args.device_id)
resolution = resolution.to(args.device_id)
for key in batch_gt.keys():
batch_gt[key] = batch_gt[key].to(args.device_id)
for key in batch_gt_frames.keys():
batch_gt_frames[key] = batch_gt_frames[key].to(args.device_id)
single_repr_batch = single_repr_batch.to(args.device_id)
#coords_batch = coords_batch.cuda(args.device_id)
#masks_batch = masks_batch.cuda(args.device_id)
aatype_batch = aatype_batch.to(args.device_id)
domain_window = domain_window.to(args.device_id)
dist_constraint = dist_constraint.to(args.device_id)
dummy = torch.Tensor(1)
dummy.requires_grad = True
if args.use_checkpoint:
def run_ckpt(model,embedding, single_repr_batch, aatype_batch, batch_gt_frames,dummy):
return model(embedding, single_repr_batch, aatype_batch, batch_gt_frames)
translation, outputs, pred_frames = ckpt(run_ckpt,model,embedding, single_repr_batch, aatype_batch, batch_gt_frames,dummy)
else:
translation, outputs, pred_frames = model(embedding, single_repr_batch, aatype_batch, batch_gt_frames)
#compute all needed loss
bb_loss, dis_loss = backbone_loss(
backbone_affine_tensor=batch_gt_frames["rigidgroups_gt_frames"][..., 0, :, :],
backbone_affine_mask=batch_gt_frames['rigidgroups_gt_exists'][..., 0],
traj=pred_frames,
dis_gt=dist_constraint,
mask_window=domain_window
)
rename =compute_renamed_ground_truth(batch_gt, outputs['positions'][-1])
sc_loss = sidechain_loss_dis(
sidechain_frames=outputs['sidechain_frames'],
sidechain_atom_pos=outputs['positions'],
rigidgroups_gt_frames=batch_gt_frames['rigidgroups_gt_frames'],
rigidgroups_alt_gt_frames=batch_gt_frames['rigidgroups_alt_gt_frames'],
rigidgroups_gt_exists=batch_gt_frames['rigidgroups_gt_exists'],
renamed_atom14_gt_positions=rename['renamed_atom14_gt_positions'],
renamed_atom14_gt_exists=rename['renamed_atom14_gt_exists'],
alt_naming_is_better=rename['alt_naming_is_better'],
dis_gt=dist_constraint,
dist_window=domain_window
)
angle_loss = supervised_chi_loss(outputs['angles'],
outputs['unnormalized_angles'],
aatype=aatype_batch,
seq_mask=batch_gt_frames['seq_mask'],
chi_mask=batch_gt_frames['chi_mask'],
chi_angles_sin_cos=batch_gt_frames['chi_angles_sin_cos'],
chi_weight=0.5,
angle_norm_weight=0.01,
dist=args.dist
)
batch_gt.update({'aatype': aatype_batch})
violation = find_structural_violations(batch_gt, outputs['positions'][-1],
violation_tolerance_factor=12,
clash_overlap_tolerance=1.5)
violation_loss_ = violation_loss(violation, batch_gt['atom14_atom_exists'])
vio_loss = torch.mean(violation_loss_)
#print(violation_loss_)
seq_len = torch.mean(batch_gt["seq_length"].float())
crop_len = torch.tensor(aatype_batch.shape[-1]).to(device=aatype_batch.device)
if dis_loss > 10.0:
fape = 12 * dis_loss * args.dist_weight + (bb_loss+ sc_loss + vio_loss + angle_loss ) * torch.sqrt(min(seq_len, crop_len))
elif dis_loss > 5.0 and dis_loss < 10.0:
fape = 24 * dis_loss * args.dist_weight + ( bb_loss+ sc_loss + vio_loss+ angle_loss ) * torch.sqrt(min(seq_len, crop_len))
else:
if args.loose_dist and dis_loss < 1.0:
fape = 12 * dis_loss * args.dist_weight + (bb_loss+ sc_loss + vio_loss + angle_loss ) * torch.sqrt(min(seq_len, crop_len))
else:
fape = 48 * dis_loss * args.dist_weight + (bb_loss + sc_loss + vio_loss+ angle_loss ) * torch.sqrt(min(seq_len, crop_len))
positions = outputs['positions'][-1]
del bb_loss, dis_loss, sc_loss, vio_loss, angle_loss, violation_loss_, outputs
del pred_frames, translation
gc.collect()
torch.cuda.empty_cache()
print(f"Epoch:{epoch}, FAPE loss:{fape.item()}")
fape.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
# save model checkpoint
if args.val_epochs > 0 and epoch % args.val_epochs == 0 and epoch > 0:
epoch_output_dir = os.path.join(target_output_dir, f"checkpoint-{epoch}-{epoch}")
if not os.path.exists(epoch_output_dir):
os.makedirs(epoch_output_dir)
checkpoint = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch,
'rng_state': torch.get_rng_state()}
torch.save(checkpoint, os.path.join(epoch_output_dir, "checkpoint.pth"))
torch.save(model.state_dict(), os.path.join(epoch_output_dir, "model_state_dict.pt"))
torch.save(optimizer.state_dict(), os.path.join(epoch_output_dir, "optimizer.pt"))
#save predicted pdb for each evaluted epoch | final_pos = atom14_to_atom37(positions, batch_gt) | 8 | 2023-12-01 03:48:10+00:00 | 12k |
kai-wen-yang/QVix | models/instruct_blip/models/blip2_models/blip2_t5.py | [
{
"identifier": "registry",
"path": "models/instruct_blip/common/registry.py",
"snippet": "class Registry:\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "Blip2Base",
"path": "models/instruct_blip/models/blip2_models/blip2.py",
"snippet": "class Blip2Base(BaseModel):\n @classmethod\n def init_tokenizer(cls, truncation_side=\"right\"):\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", truncation_side=truncation_side)\n tokenizer.add_special_tokens({\"bos_token\": \"[DEC]\"})\n return tokenizer\n\n def maybe_autocast(self, dtype=torch.float16):\n # if on cpu, don't use autocast\n # if on gpu, use autocast with dtype if provided, otherwise use torch.float16\n enable_autocast = self.device != torch.device(\"cpu\")\n\n if enable_autocast:\n return torch.cuda.amp.autocast(dtype=dtype)\n else:\n return contextlib.nullcontext()\n\n @classmethod\n def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2):\n encoder_config = BertConfig.from_pretrained(\"bert-base-uncased\")\n encoder_config.encoder_width = vision_width\n # insert cross-attention layer every other block\n encoder_config.add_cross_attention = True\n encoder_config.cross_attention_freq = cross_attention_freq\n encoder_config.query_length = num_query_token\n Qformer = BertLMHeadModel.from_pretrained(\n \"bert-base-uncased\", config=encoder_config\n )\n query_tokens = nn.Parameter(\n torch.zeros(1, num_query_token, encoder_config.hidden_size)\n )\n query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)\n return Qformer, query_tokens\n\n def init_vision_encoder(\n self, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision\n ):\n assert model_name in [\n \"eva_clip_g\",\n \"eva2_clip_L\",\n \"clip_L\",\n ], \"vit model must be eva_clip_g, eva2_clip_L or clip_L\"\n if model_name == \"eva_clip_g\":\n visual_encoder = create_eva_vit_g(\n img_size, drop_path_rate, use_grad_checkpoint, precision\n )\n# elif model_name == \"eva2_clip_L\":\n# visual_encoder = create_eva2_vit_L(\n# img_size, drop_path_rate, use_grad_checkpoint, precision\n# )\n elif model_name == \"clip_L\":\n visual_encoder = create_clip_vit_L(img_size, use_grad_checkpoint, precision)\n ln_vision = LayerNorm(visual_encoder.num_features)\n self.vit_name = model_name\n return visual_encoder, ln_vision\n\n def load_from_pretrained(self, url_or_filename):\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=\"cpu\")\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=\"cpu\")\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n for key, value in checkpoint['model'].items():\n print(key)\n state_dict = checkpoint[\"model\"]\n\n msg = self.load_state_dict(state_dict, strict=False)\n\n # logging.info(\"Missing keys {}\".format(msg.missing_keys))\n logging.info(\"load checkpoint from %s\" % url_or_filename)\n\n return msg\n\n def get_optimizer_params(self, weight_decay, lr_scale=1):\n if self.vit_name == \"eva_clip_g\":\n vit_num_layers = self.visual_encoder.get_num_layer()\n lr_scales = list(lr_scale ** (vit_num_layers + 1 - i) for i in range(vit_num_layers + 2))\n\n parameter_group_names = {}\n parameter_group_vars = {}\n\n for name, param in self.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\"):\n group_name = \"no_decay\"\n this_weight_decay = 0.\n else:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n if 'visual_encoder' in name:\n layer_id = self.visual_encoder.get_num_layer(name.replace('visual_encoder.',''))\n group_name = \"vit_layer_%d_%s\" % (layer_id, group_name)\n else:\n layer_id = None\n\n if group_name not in parameter_group_names:\n if layer_id is not None:\n scale = lr_scales[layer_id]\n else:\n scale = 1\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n # import json\n # print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\n optim_params = list(parameter_group_vars.values())\n return optim_params\n else:\n return super().get_optimizer_params(weight_decay,lr_scale)\n\n def _lemmatize(self, answers):\n def apply(answer):\n doc = self.lemmatizer(answer)\n\n words = []\n for token in doc:\n if token.pos_ in [\"NOUN\", \"VERB\"]:\n words.append(token.lemma_)\n else:\n words.append(token.text)\n answer = \" \".join(words)\n\n return answer\n\n return [apply(answer) for answer in answers]\n\n @property\n def lemmatizer(self):\n if self._lemmatizer is None:\n try:\n import spacy\n\n self._lemmatizer = spacy.load(\"en_core_web_sm\")\n except ImportError:\n logging.error(\n \"\"\"\n Please install spacy and en_core_web_sm model to apply lemmatization.\n python -m spacy download en_core_web_sm\n OR\n import spacy.cli\n spacy.cli.download(\"en_core_web_sm\")\n \"\"\"\n )\n exit(1)\n\n return self._lemmatizer"
},
{
"identifier": "disabled_train",
"path": "models/instruct_blip/models/blip2_models/blip2.py",
"snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self"
},
{
"identifier": "T5Config",
"path": "models/instruct_blip/models/blip2_models/modeling_t5.py",
"snippet": "_CONFIG_FOR_DOC = \"T5Config\"\n_TOKENIZER_FOR_DOC = \"T5Tokenizer\"\n_CHECKPOINT_FOR_DOC = \"t5-small\"\nT5_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n # See all T5 models at https://huggingface.co/models?filter=t5\n]\nPARALLELIZE_DOCSTRING = r\"\"\"\n This is an experimental feature and is a subject to change at a moment's notice.\n\n Uses a device map to distribute attention modules of the model across several devices. If no device map is given,\n it will evenly distribute blocks across all devices.\n\n Args:\n device_map (`Dict[int, list]`, optional, defaults to None):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the t5 models have the\n following number of attention modules:\n\n - t5-small: 6\n - t5-base: 12\n - t5-large: 24\n - t5-3b: 24\n - t5-11b: 24\n\n Example:\n\n ```python\n # Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules:\n model = T5ForConditionalGeneration.from_pretrained(\"t5-3b\")\n device_map = {\n 0: [0, 1, 2],\n 1: [3, 4, 5, 6, 7, 8, 9],\n 2: [10, 11, 12, 13, 14, 15, 16],\n 3: [17, 18, 19, 20, 21, 22, 23],\n }\n model.parallelize(device_map)\n ```\n\"\"\"\nDEPARALLELIZE_DOCSTRING = r\"\"\"\n Moves the model to cpu from a model parallel state.\n\n Example:\n\n ```python\n # On a 4 GPU machine with t5-3b:\n model = T5ForConditionalGeneration.from_pretrained(\"t5-3b\")\n device_map = {\n 0: [0, 1, 2],\n 1: [3, 4, 5, 6, 7, 8, 9],\n 2: [10, 11, 12, 13, 14, 15, 16],\n 3: [17, 18, 19, 20, 21, 22, 23],\n }\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n ```\n\"\"\"\nT5_START_DOCSTRING = r\"\"\"\n\n The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text\n Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan\n Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a\n text-to-text denoising generative setting.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`T5Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nT5_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for detail.\n\n [What are input IDs?](../glossary#input-ids)\n\n To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`\n is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).\n\n To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5\n Training](./t5#training).\n decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in\n `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at\n the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded\n representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be\n input (see `past_key_values`). This is useful if you want more control over how to convert\n `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value\n of `inputs_embeds`.\n\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\nT5_ENCODER_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for detail.\n\n To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n__HEAD_MASK_WARNING_MSG = \"\"\"\nThe input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,\n`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions.\nIf you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,\nnum_heads)`.\n\"\"\"\ndef load_tf_weights_in_t5(model, config, tf_checkpoint_path):\n def __init__(self, hidden_size, eps=1e-6):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config):\n def forward(self, hidden_states):\n def __init__(self, config: T5Config, has_relative_attention_bias=False):\n def prune_heads(self, heads):\n def _relative_position_bucket(\n relative_position, bidirectional=True, num_buckets=32, max_distance=128\n ):\n def compute_bias(self, query_length, key_length, device=None):\n def forward(\n self,\n hidden_states,\n mask=None,\n key_value_states=None,\n position_bias=None,\n past_key_value=None,\n layer_head_mask=None,\n query_length=None,\n use_cache=False,\n output_attentions=False,\n ):\n def shape(states):\n def unshape(states):\n def project(hidden_states, proj_layer, key_value_states, past_key_value):\n def __init__(self, config, has_relative_attention_bias=False):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n layer_head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n key_value_states,\n attention_mask=None,\n position_bias=None,\n layer_head_mask=None,\n past_key_value=None,\n use_cache=False,\n query_length=None,\n output_attentions=False,\n ):\n def __init__(self, config, has_relative_attention_bias=False):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n position_bias=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n encoder_decoder_position_bias=None,\n layer_head_mask=None,\n cross_attn_layer_head_mask=None,\n past_key_value=None,\n use_cache=False,\n output_attentions=False,\n return_dict=True,\n ):\n def dummy_inputs(self):\n def _init_weights(self, module):\n def _set_gradient_checkpointing(self, module, value=False):\n def _shift_right(self, input_ids):\n def __init__(self, config, embed_tokens=None):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n inputs_embeds=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config: T5Config):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def get_encoder(self):\n def get_decoder(self):\n def _prune_heads(self, heads_to_prune):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n decoder_inputs_embeds: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:\n def __init__(self, config: T5Config):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def set_output_embeddings(self, new_embeddings):\n def get_output_embeddings(self):\n def get_encoder(self):\n def get_decoder(self):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n reduction: Optional[str] = \"mean\",\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs,\n ):\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n def _reorder_cache(self, past, beam_idx):\n def __init__(self, config: T5Config):\n def parallelize(self, device_map=None):\n def deparallelize(self):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings):\n def get_encoder(self):\n def _prune_heads(self, heads_to_prune):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:\nclass T5LayerNorm(nn.Module):\nclass T5DenseActDense(nn.Module):\nclass T5DenseGatedActDense(nn.Module):\nclass T5LayerFF(nn.Module):\nclass T5Attention(nn.Module):\nclass T5LayerSelfAttention(nn.Module):\nclass T5LayerCrossAttention(nn.Module):\nclass T5Block(nn.Module):\nclass T5PreTrainedModel(PreTrainedModel):\nclass T5Stack(T5PreTrainedModel):\nclass T5Model(T5PreTrainedModel):\nclass T5ForConditionalGeneration(T5PreTrainedModel):\nclass T5EncoderModel(T5PreTrainedModel):"
}
] | import logging
import torch
import torch.nn as nn
import spacy
from torch.cuda.amp import autocast as autocast
from transformers import T5TokenizerFast
from ...common.registry import registry
from .blip2 import Blip2Base, disabled_train
from .modeling_t5 import T5Config, T5ForConditionalGeneration | 7,514 | """
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
@registry.register_model("blip2_t5")
class Blip2T5(Blip2Base):
"""
BLIP2 T5 model.
Supported model types:
- pretrain_flant5xl: pretrained model with FlanT5-XL
- pretrain_flant5xl_vitL: pretrained model with FlanT5-XL
- pretrain_flant5xxl: pretrained model with FlanT5-XXL
- caption_coco_flant5xl: fintuned image captioning model with FlanT5-XL
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip2_t5", "pretrain_flant5xl")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"pretrain_flant5xl": "configs/models/blip2/blip2_pretrain_flant5xl.yaml",
"pretrain_flant5xl_vitL": "configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml",
"pretrain_flant5xxl": "configs/models/blip2/blip2_pretrain_flant5xxl.yaml",
"caption_coco_flant5xl": "configs/models/blip2/blip2_caption_flant5xl.yaml",
}
def __init__(
self,
vit_model="eva_clip_g",
img_size=224,
drop_path_rate=0,
use_grad_checkpoint=False,
vit_precision="fp16",
freeze_vit=True,
num_query_token=32,
t5_model="google/flan-t5-xl",
prompt="",
max_txt_len=32,
apply_lemmatizer=False,
):
"""
apply_lemmatizer: when set to True, postprocess predict_answers() result with lemmas.
"""
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder, self.ln_vision = self.init_vision_encoder(
vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision
)
if freeze_vit:
for name, param in self.visual_encoder.named_parameters():
param.requires_grad = False
self.visual_encoder = self.visual_encoder.eval()
self.visual_encoder.train = disabled_train
logging.info("freeze vision encoder")
self.Qformer, self.query_tokens = self.init_Qformer(
num_query_token, self.visual_encoder.num_features
)
self.Qformer.cls = None
self.Qformer.bert.embeddings.word_embeddings = None
self.Qformer.bert.embeddings.position_embeddings = None
for layer in self.Qformer.bert.encoder.layer:
layer.output = None
layer.intermediate = None
self.t5_tokenizer = T5TokenizerFast.from_pretrained(t5_model)
| """
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
@registry.register_model("blip2_t5")
class Blip2T5(Blip2Base):
"""
BLIP2 T5 model.
Supported model types:
- pretrain_flant5xl: pretrained model with FlanT5-XL
- pretrain_flant5xl_vitL: pretrained model with FlanT5-XL
- pretrain_flant5xxl: pretrained model with FlanT5-XXL
- caption_coco_flant5xl: fintuned image captioning model with FlanT5-XL
Usage:
>>> from lavis.models import load_model
>>> model = load_model("blip2_t5", "pretrain_flant5xl")
"""
PRETRAINED_MODEL_CONFIG_DICT = {
"pretrain_flant5xl": "configs/models/blip2/blip2_pretrain_flant5xl.yaml",
"pretrain_flant5xl_vitL": "configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml",
"pretrain_flant5xxl": "configs/models/blip2/blip2_pretrain_flant5xxl.yaml",
"caption_coco_flant5xl": "configs/models/blip2/blip2_caption_flant5xl.yaml",
}
def __init__(
self,
vit_model="eva_clip_g",
img_size=224,
drop_path_rate=0,
use_grad_checkpoint=False,
vit_precision="fp16",
freeze_vit=True,
num_query_token=32,
t5_model="google/flan-t5-xl",
prompt="",
max_txt_len=32,
apply_lemmatizer=False,
):
"""
apply_lemmatizer: when set to True, postprocess predict_answers() result with lemmas.
"""
super().__init__()
self.tokenizer = self.init_tokenizer()
self.visual_encoder, self.ln_vision = self.init_vision_encoder(
vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision
)
if freeze_vit:
for name, param in self.visual_encoder.named_parameters():
param.requires_grad = False
self.visual_encoder = self.visual_encoder.eval()
self.visual_encoder.train = disabled_train
logging.info("freeze vision encoder")
self.Qformer, self.query_tokens = self.init_Qformer(
num_query_token, self.visual_encoder.num_features
)
self.Qformer.cls = None
self.Qformer.bert.embeddings.word_embeddings = None
self.Qformer.bert.embeddings.position_embeddings = None
for layer in self.Qformer.bert.encoder.layer:
layer.output = None
layer.intermediate = None
self.t5_tokenizer = T5TokenizerFast.from_pretrained(t5_model) | t5_config = T5Config.from_pretrained(t5_model) | 3 | 2023-12-04 03:28:21+00:00 | 12k |
hhd-dev/hhd | src/hhd/__main__.py | [
{
"identifier": "set_log_plugin",
"path": "src/hhd/logging.py",
"snippet": "def set_log_plugin(plugin: str = \"main\"):\n global _main\n with _lock:\n _plugins[get_ident()] = plugin\n _main = plugin"
},
{
"identifier": "setup_logger",
"path": "src/hhd/logging.py",
"snippet": "def setup_logger(\n log_dir: str | None = None, init: bool = True, ctx: Context | None = None\n):\n from rich import get_console\n from rich.traceback import install\n\n if log_dir:\n log_dir = expanduser(log_dir, ctx)\n\n install()\n handlers = []\n handlers.append(PluginRichHandler(PluginLogRender()))\n if log_dir:\n os.makedirs(log_dir, exist_ok=True)\n if ctx:\n fix_perms(log_dir, ctx)\n handler = UserRotatingFileHandler(\n os.path.join(log_dir, \"hhd.log\"),\n maxBytes=10_000_000,\n backupCount=10,\n ctx=ctx,\n )\n handler.setFormatter(\n NewLineFormatter(\"%(asctime)s %(module)-15s %(levelname)-8s|||%(message)s\")\n )\n handler.doRollover()\n handlers.append(handler)\n\n FORMAT = \"%(message)s\"\n logging.basicConfig(\n level=logging.INFO,\n datefmt=\"[%H:%M]\",\n format=FORMAT,\n handlers=handlers,\n )\n if init:\n get_console().print(RASTER, justify=\"full\", markup=False, highlight=False)\n logger.info(f\"Handheld Daemon starting...\")"
},
{
"identifier": "update_log_plugins",
"path": "src/hhd/logging.py",
"snippet": "def update_log_plugins():\n for t in enumerate():\n if t.ident and t.ident not in _plugins:\n _plugins[t.ident] = _main"
},
{
"identifier": "Config",
"path": "src/hhd/plugins/conf.py",
"snippet": "class Config:\n def __init__(\n self, conf: Pytree | Sequence[Pytree] = [], readonly: bool = False\n ) -> None:\n self._conf: Pytree | MutableMapping = {}\n self._lock = Lock()\n self._updated = False\n self.readonly = readonly\n self.update(conf)\n self.updated = False\n\n def update(self, conf: Pytree | Sequence[Pytree]):\n with self._lock:\n conf = deepcopy(conf)\n if isinstance(conf, Sequence):\n self._conf = parse_confs(conf, self._conf)\n else:\n if isinstance(self._conf, MutableMapping):\n parse_conf(conf, self._conf)\n else:\n self._conf = conf\n self.updated = True\n\n def __eq__(self, __value: object) -> bool:\n if not isinstance(__value, Config):\n return False\n\n if __value is self:\n return True\n\n with __value._lock, self._lock:\n return compare_dicts(__value._conf, self._conf)\n\n def __setitem__(self, key: str | tuple[str, ...], val):\n with self._lock:\n val = deepcopy(val)\n seq = to_seq(key)\n\n cont = {}\n d = cont\n for s in seq[:-1]:\n d[s] = {}\n d = d[s]\n\n d[seq[-1]] = val\n if isinstance(self._conf, MutableMapping):\n parse_conf(cont, self._conf)\n else:\n self._conf = cont\n if self._conf != cont:\n self.updated = True\n\n def __contains__(self, key: str | tuple[str, ...]):\n with self._lock:\n seq = to_seq(key)\n d = self._conf\n for s in seq:\n if s not in d:\n return False\n d = cast(Mapping, d)[s]\n return True\n\n def __getitem__(self, key: str | tuple[str, ...]) -> \"Config\":\n with self._lock:\n assert isinstance(self._conf, MutableMapping)\n seq = to_seq(key)\n d = self._conf\n for s in seq:\n d = cast(Mapping, d)[s]\n return Config([deepcopy(d)])\n\n def __delitem__(self, key: str | tuple[str, ...]):\n with self._lock:\n assert isinstance(self._conf, MutableMapping)\n seq = to_seq(key)\n d = self._conf\n for s in seq[:-1]:\n d = cast(Mapping, d)[s]\n del d[seq[-1]]\n self.updated = True\n\n def get(self, key, default: A) -> A:\n try:\n return self[key].to(type(default))\n except KeyError:\n return default\n\n def to(self, t: type[A]) -> A:\n return cast(t, self.conf)\n\n def copy(self):\n return Config([self.conf])\n\n @property\n def conf(self):\n with self._lock:\n return deepcopy(self._conf)\n\n @property\n def updated(self):\n with self._lock:\n return self._updated\n\n @updated.setter\n def updated(self, v: bool):\n with self._lock:\n self._updated = v"
},
{
"identifier": "HHDAutodetect",
"path": "src/hhd/plugins/plugin.py",
"snippet": "class Context(NamedTuple):\nclass SettingsEvent(TypedDict):\nclass ProfileEvent(TypedDict):\nclass ApplyEvent(TypedDict):\nclass ConfigEvent(TypedDict):\nclass InputEvent(TypedDict):\nclass Emitter(Protocol):\nclass HHDPlugin:\nclass HHDAutodetect(Protocol):\n def __call__(self, event: Event | Sequence[Event]) -> None:\n def open(\n self,\n emit: Emitter,\n context: Context,\n ):\n def settings(self) -> HHDSettings:\n def validate(self, tags: Sequence[str], config: Any, value: Any):\n def prepare(self, conf: Config):\n def update(self, conf: Config):\n def close(self):\n def __call__(self, existing: Sequence[HHDPlugin]) -> Sequence[HHDPlugin]:"
},
{
"identifier": "HHDSettings",
"path": "src/hhd/plugins/settings.py",
"snippet": "class ButtonSetting(TypedDict):\nclass BooleanSetting(TypedDict):\nclass MultipleSetting(TypedDict):\nclass DiscreteSetting(TypedDict):\nclass NumericalSetting(TypedDict):\nclass IntegerSetting(TypedDict):\nclass Color(TypedDict):\nclass ColorSetting(TypedDict):\nclass DisplaySetting(TypedDict):\nclass CustomSetting(TypedDict):\nclass Container(TypedDict):\nclass Mode(TypedDict):\nclass Validator(Protocol):\nSTATE_HEADER = (\n \"\\n\"\n + \"# Handheld Daemon State Config\\n\"\n + \"#\\n\"\n + \"# This file contains plugin software-only configuration that will be retained\\n\"\n + \"# across reboots. You may edit this file in lueu of using a frontend.\\n\"\n + \"# This header is on the bottom to make editing easier with e.g., nano.\\n\"\n + \"#\\n\"\n + \"# Parameters that are stored in hardware (TDP, RGB colors, etc) and\\n\"\n + \"# risky parameters that might cause instability and should be reset\\n\"\n + \"# across sessions are not part of this file.\\n\"\n + \"# Use profiles to apply changes to these settings.\\n\"\n + \"#\\n\"\n + \"# Persisted (software) parameters are marked by having a default value.\\n\"\n + \"# Non-persisted/hardware parameters do not have a default value.\\n\"\n + \"#\\n\"\n + \"# This file and comments are autogenerated. Your comments will be discarded\\n\"\n + \"# during configuration changes. Parameters with the value `default` are\\n\"\n + \"# ignored and are meant as a template for you to change them.\\n\"\n + \"#\\n\"\n + \"# - CONFIGURATION PARAMETERS\\n\"\n + \"#\"\n)\nPROFILE_HEADER = (\n \"\\n\"\n + \"# Handheld Daemon Profile Config\\n\"\n + \"#\\n\"\n + \"# This file contains the configuration options that will be set when\\n\"\n + \"# applying the profile which shares this file name.\\n\"\n + \"# This header is on the bottom to make editing easier with e.g., nano.\\n\"\n + \"#\\n\"\n + \"# Settings are applied once, when applying the profile, and only the ones\\n\"\n + \"# that are stated change. Therefore, they may drift as the system state changes\\n\"\n + \"# (e.g., using native TDP shortcuts, or controller profile shortcuts).\\n\"\n + \"#\\n\"\n + \"# It is possible to set all supported parameters using profiles, and\\n\"\n + \"# it is encouraged for you to stack profiles together.\\n\"\n + \"#\\n\"\n + \"# For example, you can have TDP only profiles that control the energy budget,\\n\"\n + \"# and controller profiles that switch controller behavior.\\n\"\n + \"# Then, depending on the game, you can apply the appropriate 2 profiles\\n\"\n + \"# together.\\n\"\n + \"#\\n\"\n + \"# This file and comments are autogenerated. Your comments will be discarded\\n\"\n + \"# during configuration changes. Parameters with the value `unset` are\\n\"\n + \"# ignored and are meant to act as a template for you to change them.\\n\"\n + \"#\\n\"\n + \"# - CONFIGURATION PARAMETERS\\n\"\n + \"#\"\n)\ndef parse(d: Setting | Container | Mode, prev: Sequence[str], out: MutableMapping):\ndef parse_defaults(sets: HHDSettings):\ndef fill_in_defaults(s: Setting | Container | Mode):\ndef merge_reduce(\n a: Setting | Container | Mode, b: Setting | Container | Mode\n) -> Setting | Container | Mode:\ndef merge_reduce_sec(a: Section, b: Section):\ndef merge_reduce_secs(a: HHDSettings, b: HHDSettings):\ndef merge_settings(sets: Sequence[HHDSettings]):\ndef generate_desc(s: Setting | Container | Mode):\ndef traverse_desc(set: Setting | Container | Mode, prev: Sequence[str]):\ndef tranverse_desc_sec(set: HHDSettings):\ndef dump_comment(set: HHDSettings, header: str = STATE_HEADER):\ndef dump_setting(\n set: Container | Mode,\n prev: Sequence[str],\n conf: Config,\n unmark: Literal[\"unset\", \"default\"] = \"default\",\n):\ndef merge_dicts(a: Mapping | Any, b: Mapping | Any):\ndef dump_settings(\n set: HHDSettings, conf: Config, unmark: Literal[\"unset\", \"default\"] = \"default\"\n):\ndef save_state_yaml(fn: str, set: HHDSettings, conf: Config, shash=None):\ndef save_blacklist_yaml(fn: str, avail: Sequence[str], blacklist: Sequence[str]):\ndef load_blacklist_yaml(fn: str):\ndef save_profile_yaml(\n fn: str, set: HHDSettings, conf: Config | None = None, shash=None\n):\ndef strip_defaults(c):\ndef get_default_state(set: HHDSettings):\ndef load_state_yaml(fn: str, set: HHDSettings):\ndef load_profile_yaml(fn: str):\ndef get_settings_hash(set: HHDSettings):\ndef unravel(d: Setting | Container | Mode, prev: Sequence[str], out: MutableMapping):\ndef unravel_options(settings: HHDSettings):\n def __call__(self, tags: Sequence[str], config: Any, value: Any) -> bool:\ndef validate_config(\n conf: Config, settings: HHDSettings, validator: Validator, use_defaults: bool = True\n):"
},
{
"identifier": "load_relative_yaml",
"path": "src/hhd/plugins/utils.py",
"snippet": "def load_relative_yaml(fn: str):\n \"\"\"Returns the yaml data of a file in the relative dir provided.\"\"\"\n import inspect\n import os\n import yaml\n\n script_fn = inspect.currentframe().f_back.f_globals[\"__file__\"] # type: ignore\n dirname = os.path.dirname(script_fn)\n with open(os.path.join(dirname, fn), \"r\") as f:\n return yaml.safe_load(f)"
},
{
"identifier": "Validator",
"path": "src/hhd/plugins/settings.py",
"snippet": "class Validator(Protocol):\n def __call__(self, tags: Sequence[str], config: Any, value: Any) -> bool:\n return False"
},
{
"identifier": "get_default_state",
"path": "src/hhd/plugins/settings.py",
"snippet": "def get_default_state(set: HHDSettings):\n return Config(parse_defaults(set))"
},
{
"identifier": "load_blacklist_yaml",
"path": "src/hhd/plugins/settings.py",
"snippet": "def load_blacklist_yaml(fn: str):\n import yaml\n\n try:\n with open(fn, \"r\") as f:\n return yaml.safe_load(f)[\"blacklist\"]\n except Exception as e:\n logger.warning(f\"Plugin blacklist not found, using default (empty).\")\n return [\"myplugin1\"]"
},
{
"identifier": "load_profile_yaml",
"path": "src/hhd/plugins/settings.py",
"snippet": "def load_profile_yaml(fn: str):\n import yaml\n\n try:\n with open(fn, \"r\") as f:\n state = cast(Mapping, strip_defaults(yaml.safe_load(f)) or {})\n except FileNotFoundError:\n logger.warning(\n f\"Profile file not found, using defaults. Searched location:\\n{fn}\"\n )\n return None\n except yaml.YAMLError:\n logger.warning(\n f\"Profile file is invalid, skipping loading. Searched location:\\n{fn}\"\n )\n return None\n\n return Config([state])"
},
{
"identifier": "load_state_yaml",
"path": "src/hhd/plugins/settings.py",
"snippet": "def load_state_yaml(fn: str, set: HHDSettings):\n import yaml\n\n defaults = parse_defaults(set)\n try:\n with open(fn, \"r\") as f:\n state = cast(Mapping, strip_defaults(yaml.safe_load(f)) or {})\n except FileNotFoundError:\n logger.warning(f\"State file not found. Searched location:\\n{fn}\")\n return None\n except yaml.YAMLError:\n logger.warning(f\"State file is invalid. Searched location:\\n{fn}\")\n return None\n\n return Config([defaults, state])"
},
{
"identifier": "merge_settings",
"path": "src/hhd/plugins/settings.py",
"snippet": "def merge_settings(sets: Sequence[HHDSettings]):\n if not sets:\n return {}\n if len(sets) > 1:\n return reduce(merge_reduce_secs, sets)\n return merge_reduce_secs({}, sets[0])"
},
{
"identifier": "save_blacklist_yaml",
"path": "src/hhd/plugins/settings.py",
"snippet": "def save_blacklist_yaml(fn: str, avail: Sequence[str], blacklist: Sequence[str]):\n import yaml\n\n with open(fn, \"w\") as f:\n f.write(\n (\n \"\"\n + \"# \\n\"\n + \"# Plugin blacklist\\n\"\n + \"# The plugin providers under blacklist will not run.\\n\"\n + \"# \\n\"\n + \"# Warning: this file is read only on startup.\\n\"\n + \"# `sudo systemctl restart hhd@$(whoami)`\\n\"\n + \"# \\n\"\n + \"# Available providers:\\n\"\n + f\"# [{', '.join(avail)}]\\n\\n\"\n )\n )\n yaml.safe_dump({\"blacklist\": blacklist}, f, width=85, sort_keys=False)\n\n return True"
},
{
"identifier": "save_profile_yaml",
"path": "src/hhd/plugins/settings.py",
"snippet": "def save_profile_yaml(\n fn: str, set: HHDSettings, conf: Config | None = None, shash=None\n):\n import yaml\n\n if shash is None:\n shash = get_settings_hash(set)\n if conf is None:\n conf = Config({})\n elif conf.get(\"version\", None) == shash and not conf.updated:\n return False\n\n conf[\"version\"] = shash\n with open(fn, \"w\") as f:\n yaml.safe_dump(dump_settings(set, conf, \"unset\"), f, width=85, sort_keys=False)\n f.write(\"\\n\")\n f.write(dump_comment(set, PROFILE_HEADER))\n return True"
},
{
"identifier": "get_settings_hash",
"path": "src/hhd/plugins/settings.py",
"snippet": "def get_settings_hash(set: HHDSettings):\n import hashlib\n\n return hashlib.md5(dump_comment(set).encode()).hexdigest()[:8]"
},
{
"identifier": "save_state_yaml",
"path": "src/hhd/plugins/settings.py",
"snippet": "def save_state_yaml(fn: str, set: HHDSettings, conf: Config, shash=None):\n import yaml\n\n if shash is None:\n shash = get_settings_hash(set)\n if conf.get(\"version\", None) == shash and not conf.updated:\n return False\n\n conf[\"version\"] = shash\n with open(fn, \"w\") as f:\n yaml.safe_dump(\n dump_settings(set, conf, \"default\"), f, sort_keys=False\n )\n f.write(\"\\n\")\n f.write(dump_comment(set, STATE_HEADER))\n\n return True"
},
{
"identifier": "validate_config",
"path": "src/hhd/plugins/settings.py",
"snippet": "def validate_config(\n conf: Config, settings: HHDSettings, validator: Validator, use_defaults: bool = True\n):\n options = unravel_options(settings)\n\n for k, d in options.items():\n v = conf.get(k, None)\n if d[\"type\"] == \"action\":\n default = False\n else:\n default = d[\"default\"]\n if v is None:\n if use_defaults and default is not None:\n conf[k] = default\n continue\n\n match d[\"type\"]:\n case \"mode\":\n if v not in d[\"modes\"]:\n if use_defaults:\n conf[k] = default\n else:\n del conf[k]\n case \"bool\" | \"action\":\n if v not in (False, True):\n conf[k] = bool(v)\n case \"multiple\" | \"discrete\":\n if v not in d[\"options\"]:\n if use_defaults:\n conf[k] = default\n else:\n del conf[k]\n case \"integer\":\n if not isinstance(v, int):\n conf[k] = int(v)\n if v < d[\"min\"]:\n conf[k] = d[\"min\"]\n if v > d[\"max\"]:\n conf[k] = d[\"max\"]\n case \"float\":\n if not isinstance(v, float):\n conf[k] = float(v)\n if v < d[\"min\"]:\n conf[k] = d[\"min\"]\n if v > d[\"max\"]:\n conf[k] = d[\"max\"]\n case \"color\":\n invalid = False\n\n if not isinstance(v, Mapping):\n invalid = True\n else:\n for c in (\"red\", \"green\", \"blue\"):\n if c not in v:\n invalid = True\n elif not (0 <= v[c] < 256):\n invalid = True\n\n if invalid:\n if use_defaults:\n conf[k] = default\n else:\n del conf[k]\n case \"custom\":\n if not validator(d[\"tags\"], d[\"config\"], v):\n if use_defaults:\n conf[k] = default\n else:\n del conf[k]"
},
{
"identifier": "expanduser",
"path": "src/hhd/utils.py",
"snippet": "def expanduser(path: str, user: int | str | Context | None = None):\n \"\"\"Expand ~ and ~user constructions. If user or $HOME is unknown,\n do nothing.\n\n Modified from the python implementation to support using the target userid/user.\"\"\"\n\n path = os.fspath(path)\n\n if not path.startswith(\"~\"):\n return path\n\n i = path.find(\"/\", 1)\n if i < 0:\n i = len(path)\n if i == 1:\n if \"HOME\" in os.environ and not user:\n # Fallback to environ only if user not set\n userhome = os.environ[\"HOME\"]\n else:\n try:\n import pwd\n except ImportError:\n # pwd module unavailable, return path unchanged\n return path\n try:\n if not user:\n userhome = pwd.getpwuid(os.getuid()).pw_dir\n elif isinstance(user, int):\n userhome = pwd.getpwuid(user).pw_dir\n elif isinstance(user, Context):\n userhome = pwd.getpwuid(user.euid).pw_dir\n else:\n userhome = pwd.getpwnam(user).pw_dir\n except KeyError:\n # bpo-10496: if the current user identifier doesn't exist in the\n # password database, return the path unchanged\n return path\n else:\n try:\n import pwd\n except ImportError:\n # pwd module unavailable, return path unchanged\n return path\n name = path[1:i]\n try:\n pwent = pwd.getpwnam(name)\n except KeyError:\n # bpo-10496: if the user name from the path doesn't exist in the\n # password database, return the path unchanged\n return path\n userhome = pwent.pw_dir\n\n root = \"/\"\n userhome = userhome.rstrip(root)\n return (userhome + path[i:]) or root"
},
{
"identifier": "fix_perms",
"path": "src/hhd/utils.py",
"snippet": "def fix_perms(fn: str, ctx: Context):\n os.chown(fn, ctx.euid, ctx.egid)"
},
{
"identifier": "get_context",
"path": "src/hhd/utils.py",
"snippet": "def get_context(user: str | None) -> Context | None:\n try:\n uid = os.getuid()\n gid = os.getgid()\n\n if not user:\n if not uid:\n print(f\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\n \"Running as root without a specified user (`--user`). Configs will be placed at `/root/.config`.\"\n )\n print(f\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n return Context(uid, gid, uid, gid, getpass.getuser())\n\n euid = int(\n subprocess.run(\n [\"id\", \"-u\", user], capture_output=True, check=True\n ).stdout.decode()\n )\n egid = int(\n subprocess.run(\n [\"id\", \"-g\", user], capture_output=True, check=True\n ).stdout.decode()\n )\n\n if (uid or gid) and (uid != euid or gid != egid):\n print(\n f\"The user specified with --user is not the user this process was started with.\"\n )\n return None\n\n return Context(euid, egid, uid, gid, user)\n except subprocess.CalledProcessError as e:\n print(f\"Getting the user uid/gid returned an error:\\n{e.stderr.decode()}\")\n return None\n except Exception as e:\n print(f\"Failed getting permissions with error:\\n{e}\")\n return None"
},
{
"identifier": "switch_priviledge",
"path": "src/hhd/utils.py",
"snippet": "def switch_priviledge(p: Context, escalate=False):\n uid = os.geteuid()\n gid = os.getegid()\n\n if escalate:\n os.seteuid(p.uid)\n os.setegid(p.gid)\n else:\n os.setegid(p.egid)\n os.seteuid(p.euid)\n\n return uid, gid"
}
] | import argparse
import fcntl
import logging
import os
import signal
import subprocess
import sys
import pkg_resources
import hashlib
import random
from os.path import join
from threading import Condition
from threading import Event as TEvent
from threading import RLock
from time import sleep
from typing import Sequence
from .logging import set_log_plugin, setup_logger, update_log_plugins
from .plugins import (
Config,
Emitter,
Event,
HHDAutodetect,
HHDPlugin,
HHDSettings,
load_relative_yaml,
)
from .plugins.settings import (
Validator,
get_default_state,
load_blacklist_yaml,
load_profile_yaml,
load_state_yaml,
merge_settings,
save_blacklist_yaml,
save_profile_yaml,
get_settings_hash,
save_state_yaml,
validate_config,
)
from .utils import expanduser, fix_perms, get_context, switch_priviledge
from importlib.metadata import version
from .http import HHDHTTPServer | 7,460 |
# Save new blacklist file
save_blacklist_yaml(blacklist_fn, detector_names, blacklist)
fix_perms(blacklist_fn, ctx)
logger.info(f"Found plugin providers: {', '.join(list(detectors))}")
for name, autodetect in detectors.items():
plugins[name] = autodetect([])
plugin_str = "Loaded the following plugins:"
for pkg_name, sub_plugins in plugins.items():
if not sub_plugins:
continue
plugin_str += (
f"\n - {pkg_name:>8s}: {', '.join(p.name for p in sub_plugins)}"
)
logger.info(plugin_str)
# Get sorted plugins
sorted_plugins: Sequence[HHDPlugin] = []
for plugs in plugins.values():
sorted_plugins.extend(plugs)
sorted_plugins.sort(key=lambda x: x.priority)
validator: Validator = lambda tags, config, value: any(
p.validate(tags, config, value) for p in sorted_plugins
)
if not sorted_plugins:
logger.error(f"No plugins started, exiting...")
return
# Open plugins
lock = RLock()
cond = Condition(lock)
emit = EmitHolder(cond)
for p in sorted_plugins:
set_log_plugin(getattr(p, "log") if hasattr(p, "log") else "ukwn")
p.open(emit, ctx)
update_log_plugins()
set_log_plugin("main")
# Compile initial configuration
state_fn = expanduser(join(CONFIG_DIR, "state.yml"), ctx)
token_fn = expanduser(join(CONFIG_DIR, "token"), ctx)
settings: HHDSettings = {}
shash = None
# Load profiles
profiles = {}
templates = {}
conf = Config({})
profile_dir = expanduser(join(CONFIG_DIR, "profiles"), ctx)
os.makedirs(profile_dir, exist_ok=True)
fix_perms(profile_dir, ctx)
# Monitor config files for changes
should_initialize = TEvent()
initial_run = True
should_exit = TEvent()
signal.signal(signal.SIGPOLL, notifier(should_initialize, cond))
signal.signal(signal.SIGINT, notifier(should_exit, cond))
signal.signal(signal.SIGTERM, notifier(should_exit, cond))
while not should_exit.is_set():
#
# Configuration
#
# Initialize if files changed
if should_initialize.is_set() or initial_run:
# wait a bit to allow other processes to save files
if not initial_run:
sleep(POLL_DELAY)
initial_run = False
set_log_plugin("main")
logger.info(f"Reloading configuration.")
# Settings
hhd_settings = {"hhd": load_relative_yaml("settings.yml")}
# TODO: Improve check
try:
if "venv" not in exe_python:
del hhd_settings["hhd"]["version"]["children"]["update_stable"]
del hhd_settings["hhd"]["version"]["children"]["update_beta"]
except Exception as e:
logger.warning(f"Could not hide update settings. Error:\n{e}")
settings = merge_settings(
[*[p.settings() for p in sorted_plugins], hhd_settings]
)
shash = get_settings_hash(hhd_settings)
# State
new_conf = load_state_yaml(state_fn, settings)
if not new_conf:
if conf.conf:
logger.warning(f"Using previous configuration.")
else:
logger.info(f"Using default configuration.")
conf = get_default_state(settings)
else:
conf = new_conf
try:
conf["hhd.version.version"] = version("hhd")
except Exception:
pass
# Profiles
profiles = {}
templates = {}
os.makedirs(profile_dir, exist_ok=True)
fix_perms(profile_dir, ctx)
for fn in os.listdir(profile_dir):
if not fn.endswith(".yml"):
continue
name = fn.replace(".yml", "")
s = load_profile_yaml(join(profile_dir, fn))
if s:
|
logger = logging.getLogger(__name__)
CONFIG_DIR = os.environ.get("HHD_CONFIG_DIR", "~/.config/hhd")
ERROR_DELAY = 5
POLL_DELAY = 2
class EmitHolder(Emitter):
def __init__(self, condition: Condition) -> None:
self._events = []
self._condition = condition
def __call__(self, event: Event | Sequence[Event]) -> None:
with self._condition:
if isinstance(event, Sequence):
self._events.extend(event)
else:
self._events.append(event)
self._condition.notify_all()
def get_events(self, timeout: int = -1) -> Sequence[Event]:
with self._condition:
if not self._events and timeout != -1:
self._condition.wait()
ev = self._events
self._events = []
return ev
def has_events(self):
with self._condition:
return bool(self._events)
def notifier(ev: TEvent, cond: Condition):
def _inner(sig, frame):
with cond:
ev.set()
cond.notify_all()
return _inner
def print_token(ctx):
token_fn = expanduser(join(CONFIG_DIR, "token"), ctx)
try:
with open(token_fn, "r") as f:
token = f.read().strip()
logger.info(f'Current HHD token (for user "{ctx.name}") is: "{token}"')
except Exception as e:
logger.error(f"Token not found or could not be read, error:\n{e}")
logger.info(
"Enable the http endpoint to generate a token automatically.\n"
+ "Or place it under '~/.config/hhd/token' manually.\n"
+ "'chown 600 ~/.config/hhd/token' for security reasons!"
)
def main():
parser = argparse.ArgumentParser(
prog="HHD: Handheld Daemon main interface.",
description="Handheld Daemon is a daemon for managing the quirks inherent in handheld devices.",
)
parser.add_argument(
"-u",
"--user",
default=None,
help="The user whose home directory will be used to store the files (~/.config/hhd).",
dest="user",
)
parser.add_argument(
"command",
nargs="*",
default=[],
help="The command to run. If empty, run as daemon. Right now, only the command token is supported.",
)
args = parser.parse_args()
user = args.user
# Setup temporary logger for permission retrieval
ctx = get_context(user)
if not ctx:
print(f"Could not get user information. Exiting...")
return
detectors: dict[str, HHDAutodetect] = {}
plugins: dict[str, Sequence[HHDPlugin]] = {}
cfg_fds = []
# HTTP data
https = None
prev_http_cfg = None
updated = False
# Check we are in a virtual environment
# TODO: Improve
exe_python = sys.executable
try:
# Create nested hhd dir
# This might mess up permissions in upward directories
# So try to deescalate
hhd_dir = expanduser(CONFIG_DIR, ctx)
try:
switch_priviledge(ctx, False)
os.makedirs(hhd_dir, exist_ok=True)
switch_priviledge(ctx, True)
fix_perms(hhd_dir, ctx)
except Exception:
pass
# Remove old dir
try:
os.rename(
join(hhd_dir, "plugins"), join(hhd_dir, "plugins_old_USE_STATEYML")
)
except Exception:
pass
set_log_plugin("main")
setup_logger(join(CONFIG_DIR, "log"), ctx=ctx)
if args.command:
if args.command[0] == "token":
print_token(ctx)
return
else:
logger.error(f"Command '{args.command[0]}' is unknown. Ignoring...")
# Use blacklist
blacklist_fn = join(hhd_dir, "plugins.yml")
blacklist = load_blacklist_yaml(blacklist_fn)
logger.info(f"Running autodetection...")
detector_names = []
for autodetect in pkg_resources.iter_entry_points("hhd.plugins"):
name = autodetect.name
detector_names.append(name)
if name in blacklist:
logger.info(f"Skipping blacklisted provider '{name}'.")
else:
detectors[autodetect.name] = autodetect.resolve()
# Save new blacklist file
save_blacklist_yaml(blacklist_fn, detector_names, blacklist)
fix_perms(blacklist_fn, ctx)
logger.info(f"Found plugin providers: {', '.join(list(detectors))}")
for name, autodetect in detectors.items():
plugins[name] = autodetect([])
plugin_str = "Loaded the following plugins:"
for pkg_name, sub_plugins in plugins.items():
if not sub_plugins:
continue
plugin_str += (
f"\n - {pkg_name:>8s}: {', '.join(p.name for p in sub_plugins)}"
)
logger.info(plugin_str)
# Get sorted plugins
sorted_plugins: Sequence[HHDPlugin] = []
for plugs in plugins.values():
sorted_plugins.extend(plugs)
sorted_plugins.sort(key=lambda x: x.priority)
validator: Validator = lambda tags, config, value: any(
p.validate(tags, config, value) for p in sorted_plugins
)
if not sorted_plugins:
logger.error(f"No plugins started, exiting...")
return
# Open plugins
lock = RLock()
cond = Condition(lock)
emit = EmitHolder(cond)
for p in sorted_plugins:
set_log_plugin(getattr(p, "log") if hasattr(p, "log") else "ukwn")
p.open(emit, ctx)
update_log_plugins()
set_log_plugin("main")
# Compile initial configuration
state_fn = expanduser(join(CONFIG_DIR, "state.yml"), ctx)
token_fn = expanduser(join(CONFIG_DIR, "token"), ctx)
settings: HHDSettings = {}
shash = None
# Load profiles
profiles = {}
templates = {}
conf = Config({})
profile_dir = expanduser(join(CONFIG_DIR, "profiles"), ctx)
os.makedirs(profile_dir, exist_ok=True)
fix_perms(profile_dir, ctx)
# Monitor config files for changes
should_initialize = TEvent()
initial_run = True
should_exit = TEvent()
signal.signal(signal.SIGPOLL, notifier(should_initialize, cond))
signal.signal(signal.SIGINT, notifier(should_exit, cond))
signal.signal(signal.SIGTERM, notifier(should_exit, cond))
while not should_exit.is_set():
#
# Configuration
#
# Initialize if files changed
if should_initialize.is_set() or initial_run:
# wait a bit to allow other processes to save files
if not initial_run:
sleep(POLL_DELAY)
initial_run = False
set_log_plugin("main")
logger.info(f"Reloading configuration.")
# Settings
hhd_settings = {"hhd": load_relative_yaml("settings.yml")}
# TODO: Improve check
try:
if "venv" not in exe_python:
del hhd_settings["hhd"]["version"]["children"]["update_stable"]
del hhd_settings["hhd"]["version"]["children"]["update_beta"]
except Exception as e:
logger.warning(f"Could not hide update settings. Error:\n{e}")
settings = merge_settings(
[*[p.settings() for p in sorted_plugins], hhd_settings]
)
shash = get_settings_hash(hhd_settings)
# State
new_conf = load_state_yaml(state_fn, settings)
if not new_conf:
if conf.conf:
logger.warning(f"Using previous configuration.")
else:
logger.info(f"Using default configuration.")
conf = get_default_state(settings)
else:
conf = new_conf
try:
conf["hhd.version.version"] = version("hhd")
except Exception:
pass
# Profiles
profiles = {}
templates = {}
os.makedirs(profile_dir, exist_ok=True)
fix_perms(profile_dir, ctx)
for fn in os.listdir(profile_dir):
if not fn.endswith(".yml"):
continue
name = fn.replace(".yml", "")
s = load_profile_yaml(join(profile_dir, fn))
if s: | validate_config(s, settings, validator, use_defaults=False) | 17 | 2023-11-30 21:44:04+00:00 | 12k |
IDSIA/automated-cl | torchmeta_local/datasets/helpers_tabular.py | [
{
"identifier": "Letter",
"path": "torchmeta_local/datasets/letter.py",
"snippet": "class Letter(CombinationMetaDataset):\n \"\"\"The Letter Image Recognition Dataset \"\"\"\n def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False,\n meta_split=None, transform=None, target_transform=None, dataset_transform=None,\n class_augmentations=None, download=False):\n \"\"\"\n Letter Image Recognition Data [1]:\n open-ml-id: 6\n https://archive.ics.uci.edu/ml/datasets/Letter+Recognition - 01-01-1991\n\n The objective is to identify each of a large number of black-and-white\n rectangular pixel displays as one of the 26 capital letters in the English\n alphabet. The character images were based on 20 different fonts and each\n letter within these 20 fonts was randomly distorted to produce a file of\n 20,000 unique stimuli. Each stimulus was converted into 16 primitive\n numerical attributes (statistical moments and edge counts) which were then\n scaled to fit into a range of integer values from 0 through 15. We\n typically train on the first 16000 items and then use the resulting model\n to predict the letter category for the remaining 4000. See the article\n cited above for more details.\n\n Parameters\n ----------\n root : string\n Root directory where the dataset folder `letter` exists.\n\n num_classes_per_task : int\n Number of classes per tasks. This corresponds to \"N\" in \"N-way\"\n classification.\n\n meta_train : bool (default: `False`)\n Use the meta-train split of the dataset. If set to `True`, then the\n arguments `meta_val` and `meta_test` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_val : bool (default: `False`)\n Use the meta-validation split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_test` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_test : bool (default: `False`)\n Use the meta-test split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_val` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_split : string in {'train', 'val', 'test'}, optional\n Name of the split to use. This overrides the arguments `meta_train`,\n `meta_val` and `meta_test` if all three are set to `False`.\n\n transform : callable, optional\n A function/transform that takes a numpy array or a pytorch array\n (depending when the transforms is applied), and returns a transformed\n version.\n\n target_transform : callable, optional\n A function/transform that takes a target, and returns a transformed\n version.\n\n dataset_transform : callable, optional\n A function/transform that takes a dataset (ie. a task), and returns a\n transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`.\n\n class_augmentations : list of callable, optional\n A list of functions that augment the dataset with new classes. These\n classes are transformations of existing classes.\n\n download : bool (default: `False`)\n If `True`, downloads the original files and processes the dataset in the\n root directory (under the `letter` folder). If the dataset\n is already available, this does not download/process the dataset again.\n\n References\n -----\n [1] P. W. Frey and D. J. Slate. \"Letter Recognition Using Holland-style\n Adaptive Classifiers\". Machine Learning 6(2), 1991\n \"\"\"\n dataset = LetterClassDataset(root,\n meta_train=meta_train,\n meta_val=meta_val,\n meta_test=meta_test,\n meta_split=meta_split,\n transform=transform,\n class_augmentations=class_augmentations,\n download=download)\n super(Letter, self).__init__(dataset,\n num_classes_per_task,\n target_transform=target_transform,\n dataset_transform=dataset_transform)"
},
{
"identifier": "PlantsTexture",
"path": "torchmeta_local/datasets/one_hundred_plants_texture.py",
"snippet": "class PlantsTexture(CombinationMetaDataset):\n \"\"\"The PlantsTexture dataset \"\"\"\n def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False,\n meta_split=None, transform=None, target_transform=None, dataset_transform=None,\n class_augmentations=None, download=False, process_features=False):\n \"\"\"\n One-hundred plant species leaves dataset (Class = Texture) [1], [2], [3]\n open-ml-id: 1493\n https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set) - 2010\n\n\n (a) Original owners of colour Leaves Samples:\n\n James Cope, Thibaut Beghin, Paolo Remagnino, Sarah Barman.\n The colour images are not included.\n The Leaves were collected in the Royal Botanic Gardens, Kew, UK.\n email: [email protected]\n\n (b) This dataset consists of work carried out by James Cope, Charles Mallah, and James Orwell.\n Donor of database Charles Mallah: [email protected]; James Cope: [email protected]\n\n The original data directory contains the binary images (masks) of the leaf samples (colour images not included).\n There are three features for each image: Shape, Margin and Texture.\n For each feature, a 64 element vector is given per leaf sample.\n These vectors are taken as a contiguous descriptor (for shape) or histograms (for texture and margin).\n So, there are three different files, one for each feature problem:\n * 'data_Sha_64.txt' -> prediction based on shape\n * 'data_Tex_64.txt' -> prediction based on texture [dataset provided here]\n * 'data_Mar_64.txt' -> prediction based on margin\n\n Each row has a 64-element feature vector followed by the Class label.\n There is a total of 1600 samples with 16 samples per leaf class (100 classes), and no missing values.\n\n Three 64 element feature vectors per sample.\n\n Parameters\n ----------\n root : string\n Root directory where the dataset folder `one_hundred_plants_texture` exists.\n\n num_classes_per_task : int\n Number of classes per tasks. This corresponds to \"N\" in \"N-way\"\n classification.\n\n meta_train : bool (default: `False`)\n Use the meta-train split of the dataset. If set to `True`, then the\n arguments `meta_val` and `meta_test` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_val : bool (default: `False`)\n Use the meta-validation split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_test` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_test : bool (default: `False`)\n Use the meta-test split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_val` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_split : string in {'train', 'val', 'test'}, optional\n Name of the split to use. This overrides the arguments `meta_train`,\n `meta_val` and `meta_test` if all three are set to `False`.\n\n transform : callable, optional\n A function/transform that takes a numpy array or a pytorch array\n (depending when the transforms is applied), and returns a transformed\n version.\n\n target_transform : callable, optional\n A function/transform that takes a target, and returns a transformed\n version.\n\n dataset_transform : callable, optional\n A function/transform that takes a dataset (ie. a task), and returns a\n transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`.\n\n class_augmentations : list of callable, optional\n A list of functions that augment the dataset with new classes. These\n classes are transformations of existing classes.\n\n download : bool (default: `False`)\n If `True`, downloads the original files and processes the dataset in the\n root directory (under the `one_hundred_plants_texture' folder). If the dataset\n is already available, this does not download/process the dataset again.\n\n process_features : bool (default: `False`)\n If `True`, normalizes each feature f with (f-lower) / (upper - lower) where upper\n and lower are the min and max values of feature f of the meta-train dataset.\n\n References\n -----\n [1] Charles Mallah, James Cope, James Orwell.\n Plant Leaf Classification Using Probabilistic Integration of Shape, Texture and Margin Features.\n Signal Processing, Pattern Recognition and Applications, in press.\n\n [2] J. Cope, P. Remagnino, S. Barman, and P. Wilkin.\n Plant texture classification using gabor co-occurrences.\n Advances in Visual Computing, pages 699-677, 2010.\n\n [3] T. Beghin, J. Cope, P. Remagnino, and S. Barman.\n Shape and texture based plant leaf classification.\n In: Advanced Concepts for Intelligent Vision Systems, pages 345-353. Springer, 2010.\n\n \"\"\"\n dataset = PlantsTextureClassDataset(root,\n meta_train=meta_train,\n meta_val=meta_val,\n meta_test=meta_test,\n meta_split=meta_split,\n transform=transform,\n class_augmentations=class_augmentations,\n download=download,\n normalize=process_features)\n super(PlantsTexture, self).__init__(dataset,\n num_classes_per_task,\n target_transform=target_transform,\n dataset_transform=dataset_transform)"
},
{
"identifier": "PlantsShape",
"path": "torchmeta_local/datasets/one_hundred_plants_shape.py",
"snippet": "class PlantsShape(CombinationMetaDataset):\n \"\"\"The PlantsShape dataset \"\"\"\n def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False,\n meta_split=None, transform=None, target_transform=None, dataset_transform=None,\n class_augmentations=None, download=False, process_features=False):\n \"\"\"\n One-hundred plant species leaves dataset (Class = Shape) [1], [2], [3]\n open-ml-id: 1492\n https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set) - 2010\n\n\n (a) Original owners of colour Leaves Samples:\n\n James Cope, Thibaut Beghin, Paolo Remagnino, Sarah Barman.\n The colour images are not included.\n The Leaves were collected in the Royal Botanic Gardens, Kew, UK.\n email: [email protected]\n\n (b) This dataset consists of work carried out by James Cope, Charles Mallah, and James Orwell.\n Donor of database Charles Mallah: [email protected]; James Cope: [email protected]\n\n The original data directory contains the binary images (masks) of the leaf samples (colour images not included).\n There are three features for each image: Shape, Margin and Texture.\n For each feature, a 64 element vector is given per leaf sample.\n These vectors are taken as a contiguous descriptor (for shape) or histograms (for texture and margin).\n So, there are three different files, one for each feature problem:\n * 'data_Sha_64.txt' -> prediction based on shape [dataset provided here]\n * 'data_Tex_64.txt' -> prediction based on texture\n * 'data_Mar_64.txt' -> prediction based on margin\n\n Each row has a 64-element feature vector followed by the Class label.\n There is a total of 1600 samples with 16 samples per leaf class (100 classes), and no missing values.\n\n Three 64 element feature vectors per sample.\n\n Parameters\n ----------\n root : string\n Root directory where the dataset folder `one_hundred_plants_shape` exists.\n\n num_classes_per_task : int\n Number of classes per tasks. This corresponds to \"N\" in \"N-way\"\n classification.\n\n meta_train : bool (default: `False`)\n Use the meta-train split of the dataset. If set to `True`, then the\n arguments `meta_val` and `meta_test` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_val : bool (default: `False`)\n Use the meta-validation split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_test` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_test : bool (default: `False`)\n Use the meta-test split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_val` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_split : string in {'train', 'val', 'test'}, optional\n Name of the split to use. This overrides the arguments `meta_train`,\n `meta_val` and `meta_test` if all three are set to `False`.\n\n transform : callable, optional\n A function/transform that takes a numpy array or a pytorch array\n (depending when the transforms is applied), and returns a transformed\n version.\n\n target_transform : callable, optional\n A function/transform that takes a target, and returns a transformed\n version.\n\n dataset_transform : callable, optional\n A function/transform that takes a dataset (ie. a task), and returns a\n transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`.\n\n class_augmentations : list of callable, optional\n A list of functions that augment the dataset with new classes. These\n classes are transformations of existing classes.\n\n download : bool (default: `False`)\n If `True`, downloads the original files and processes the dataset in the\n root directory (under the `one_hundred_plants_shape' folder). If the dataset\n is already available, this does not download/process the dataset again.\n\n process_features : bool (default: `False`)\n If `True`, normalizes each feature f according to (f-mean) / (std + 1e-10) where\n mean and std are the mean and standard deviation of the feature f of the meta-train dataset.\n\n References\n -----\n [1] Charles Mallah, James Cope, James Orwell.\n Plant Leaf Classification Using Probabilistic Integration of Shape, Texture and Margin Features.\n Signal Processing, Pattern Recognition and Applications, in press.\n\n [2] J. Cope, P. Remagnino, S. Barman, and P. Wilkin.\n Plant texture classification using gabor co-occurrences.\n Advances in Visual Computing, pages 699-677, 2010.\n\n [3] T. Beghin, J. Cope, P. Remagnino, and S. Barman.\n Shape and texture based plant leaf classification.\n In: Advanced Concepts for Intelligent Vision Systems, pages 345-353. Springer, 2010.\n\n \"\"\"\n dataset = PlantsShapeClassDataset(root,\n meta_train=meta_train,\n meta_val=meta_val,\n meta_test=meta_test,\n meta_split=meta_split,\n transform=transform,\n class_augmentations=class_augmentations,\n download=download,\n normalize=process_features)\n super(PlantsShape, self).__init__(dataset,\n num_classes_per_task,\n target_transform=target_transform,\n dataset_transform=dataset_transform)"
},
{
"identifier": "PlantsMargin",
"path": "torchmeta_local/datasets/one_hundred_plants_margin.py",
"snippet": "class PlantsMargin(CombinationMetaDataset):\n \"\"\"The PlantsMargin dataset \"\"\"\n def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False,\n meta_split=None, transform=None, target_transform=None, dataset_transform=None,\n class_augmentations=None, download=False, process_features=False):\n \"\"\"\n One-hundred plant species leaves dataset (Class = Margin) [1], [2], [3]\n open-ml-id: 1491\n https://archive.ics.uci.edu/ml/datasets/One-hundred+plant+species+leaves+data+set) - 2010\n\n\n (a) Original owners of colour Leaves Samples:\n\n James Cope, Thibaut Beghin, Paolo Remagnino, Sarah Barman.\n The colour images are not included.\n The Leaves were collected in the Royal Botanic Gardens, Kew, UK.\n email: [email protected]\n\n (b) This dataset consists of work carried out by James Cope, Charles Mallah, and James Orwell.\n Donor of database Charles Mallah: [email protected]; James Cope: [email protected]\n\n The original data directory contains the binary images (masks) of the leaf samples (colour images not included).\n There are three features for each image: Shape, Margin and Texture.\n For each feature, a 64 element vector is given per leaf sample.\n These vectors are taken as a contiguous descriptor (for shape) or histograms (for texture and margin).\n So, there are three different files, one for each feature problem:\n * 'data_Sha_64.txt' -> prediction based on shape\n * 'data_Tex_64.txt' -> prediction based on texture\n * 'data_Mar_64.txt' -> prediction based on margin [dataset provided here]\n\n Each row has a 64-element feature vector followed by the Class label.\n There is a total of 1600 samples with 16 samples per leaf class (100 classes), and no missing values.\n\n Three 64 element feature vectors per sample.\n\n Parameters\n ----------\n root : string\n Root directory where the dataset folder `one_hundred_plants_margin` exists.\n\n num_classes_per_task : int\n Number of classes per tasks. This corresponds to \"N\" in \"N-way\"\n classification.\n\n meta_train : bool (default: `False`)\n Use the meta-train split of the dataset. If set to `True`, then the\n arguments `meta_val` and `meta_test` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_val : bool (default: `False`)\n Use the meta-validation split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_test` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_test : bool (default: `False`)\n Use the meta-test split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_val` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_split : string in {'train', 'val', 'test'}, optional\n Name of the split to use. This overrides the arguments `meta_train`,\n `meta_val` and `meta_test` if all three are set to `False`.\n\n transform : callable, optional\n A function/transform that takes a numpy array or a pytorch array\n (depending when the transforms is applied), and returns a transformed\n version.\n\n target_transform : callable, optional\n A function/transform that takes a target, and returns a transformed\n version.\n\n dataset_transform : callable, optional\n A function/transform that takes a dataset (ie. a task), and returns a\n transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`.\n\n class_augmentations : list of callable, optional\n A list of functions that augment the dataset with new classes. These\n classes are transformations of existing classes.\n\n download : bool (default: `False`)\n If `True`, downloads the original files and processes the dataset in the\n root directory (under the `one_hundred_plants_margin' folder). If the dataset\n is already available, this does not download/process the dataset again.\n\n process_features : bool (default: `False`)\n If `True`, normalizes each feature f with (f-lower) / (upper - lower) where upper\n and lower are the min and max values of feature f of the meta-train dataset.\n\n References\n -----\n [1] Charles Mallah, James Cope, James Orwell.\n Plant Leaf Classification Using Probabilistic Integration of Shape, Texture and Margin Features.\n Signal Processing, Pattern Recognition and Applications, in press.\n\n [2] J. Cope, P. Remagnino, S. Barman, and P. Wilkin.\n Plant texture classification using gabor co-occurrences.\n Advances in Visual Computing, pages 699-677, 2010.\n\n [3] T. Beghin, J. Cope, P. Remagnino, and S. Barman.\n Shape and texture based plant leaf classification.\n In: Advanced Concepts for Intelligent Vision Systems, pages 345-353. Springer, 2010.\n\n \"\"\"\n dataset = PlantsMarginClassDataset(root,\n meta_train=meta_train,\n meta_val=meta_val,\n meta_test=meta_test,\n meta_split=meta_split,\n transform=transform,\n class_augmentations=class_augmentations,\n download=download,\n normalize=process_features)\n super(PlantsMargin, self).__init__(dataset,\n num_classes_per_task,\n target_transform=target_transform,\n dataset_transform=dataset_transform)"
},
{
"identifier": "Bach",
"path": "torchmeta_local/datasets/bach.py",
"snippet": "class Bach(CombinationMetaDataset):\n \"\"\"The Bach dataset \"\"\"\n def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False,\n meta_split=None, transform=None, target_transform=None, dataset_transform=None,\n class_augmentations=None, download=False, process_features=True, min_num_samples_per_class=1):\n \"\"\"\n Bach Choral Harmony dataset [1], [2]\n open-ml-id: 4552\n https://archive.ics.uci.edu/ml/datasets/Bach+Choral+Harmony\n\n Abstract: The data set is composed of 60 chorales (5665 events) by\n J.S. Bach (1675-1750). Each event of each chorale is labelled using 1\n among 101 chord labels and described through 14 features.\n\n Data Set Information:\n\n Pitch classes information has been extracted from MIDI sources downloaded\n from (JSB Chorales)[http://www.jsbchorales.net/]. Meter information has\n been computed through the Meter program which is part of the Melisma\n music analyser (Melisma)[http://www.link.cs.cmu.edu/music-analysis/].\n Chord labels have been manually annotated by a human expert.\n\n Attribute Information:\n\n 1. Choral ID: corresponding to the file names from (Bach Central)[http://www.bachcentral.com/].\n 2. Event number: index (starting from 1) of the event inside the chorale.\n 3-14. Pitch classes: YES/NO depending on whether a given pitch is present.\n Pitch classes/attribute correspondence is as follows:\n C -> 3\n C#/Db -> 4\n D -> 5\n ...\n B -> 14\n 15. Bass: Pitch class of the bass note\n 16. Meter: integers from 1 to 5. Lower numbers denote less accented events,\n higher numbers denote more accented events.\n 17. Chord label: Chord resonating during the given event.\n\n Notes\n ----------\n\n The features V1 and V2 are dropped during the processing. V1 is the Choral ID. V2 is\n the event number of the event inside the chorale.\n\n Parameters\n ----------\n root : string\n Root directory where the dataset folder `bach` exists.\n\n num_classes_per_task : int\n Number of classes per tasks. This corresponds to \"N\" in \"N-way\"\n classification.\n\n meta_train : bool (default: `False`)\n Use the meta-train split of the dataset. If set to `True`, then the\n arguments `meta_val` and `meta_test` must be set to `False`. Exactly one\n of these three arguments must be set to `True`.\n\n meta_val : bool (default: `False`)\n Use the meta-validation split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_test` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_test : bool (default: `False`)\n Use the meta-test split of the dataset. If set to `True`, then the\n arguments `meta_train` and `meta_val` must be set to `False`. Exactly\n one of these three arguments must be set to `True`.\n\n meta_split : string in {'train', 'val', 'test'}, optional\n Name of the split to use. This overrides the arguments `meta_train`,\n `meta_val` and `meta_test` if all three are set to `False`.\n\n transform : callable, optional\n A function/transform that takes a numpy array or a pytorch array\n (depending when the transforms is applied), and returns a transformed\n version.\n\n target_transform : callable, optional\n A function/transform that takes a target, and returns a transformed\n version.\n\n dataset_transform : callable, optional\n A function/transform that takes a dataset (ie. a task), and returns a\n transformed version of it. E.g. `torchmeta_local.transforms.ClassSplitter()`.\n\n class_augmentations : list of callable, optional\n A list of functions that augment the dataset with new classes. These\n classes are transformations of existing classes.\n\n download : bool (default: `False`)\n If `True`, downloads the original files and processes the dataset in the\n root directory (under the `bach' folder). If the dataset\n is already available, this does not download/process the dataset again.\n\n process_features : bool (default: `True`)\n If `True`, normalizes the numeric feature f according to (f-lower) / (upper - lower) where upper\n and lower are the min and max values of feature f of the meta-train dataset.\n And also one-hot encodes the categorical features.\n\n min_num_samples_per_class : int (default: 1)\n Minimal number of samples per class that need to be present for the class to be used.\n\n References\n -----\n\n [1] D. P. Radicioni and R. Esposito. Advances in Music Information Retrieval,\n chapter BREVE: an HMPerceptron-Based Chord Recognition System.\n Studies in Computational Intelligence,\n Zbigniew W. Ras and Alicja Wieczorkowska (Editors), Springer, 2010.\n\n [2] Esposito, R. and Radicioni, D. P., CarpeDiem: Optimizing the Viterbi\n Algorithm and Applications to Supervised Sequential Learning, Journal\n of Machine Learning Research, 10(Aug):1851-1880, 2009.\n \"\"\"\n dataset = BachClassDataset(root,\n meta_train=meta_train,\n meta_val=meta_val,\n meta_test=meta_test,\n meta_split=meta_split,\n transform=transform,\n class_augmentations=class_augmentations,\n download=download,\n process_features=process_features,\n min_num_samples_per_class=min_num_samples_per_class)\n super(Bach, self).__init__(dataset,\n num_classes_per_task,\n target_transform=target_transform,\n dataset_transform=dataset_transform)"
},
{
"identifier": "Categorical",
"path": "torchmeta_local/transforms/categorical.py",
"snippet": "class Categorical(TargetTransform):\n \"\"\"Target transform to return labels in `[0, num_classes)`.\n\n Parameters\n ----------\n num_classes : int, optional\n Number of classes. If `None`, then the number of classes is inferred\n from the number of individual labels encountered.\n\n Examples\n --------\n >>> dataset = Omniglot('data', num_classes_per_task=5, meta_train=True)\n >>> task = dataset.sample_task()\n >>> task[0]\n (<PIL.Image.Image image mode=L size=105x105 at 0x11EC797F0>,\n ('images_evaluation/Glagolitic/character12', None))\n\n >>> dataset = Omniglot('data', num_classes_per_task=5, meta_train=True,\n ... target_transform=Categorical(5))\n >>> task = dataset.sample_task()\n >>> task[0]\n (<PIL.Image.Image image mode=L size=105x105 at 0x11ED3F668>, 2)\n \"\"\"\n def __init__(self, num_classes=None):\n super(Categorical, self).__init__()\n self.num_classes = num_classes\n self._classes = None\n self._labels = None\n\n def reset(self):\n self._classes = None\n self._labels = None\n\n @property\n def classes(self):\n if self._classes is None:\n self._classes = defaultdict(None)\n if self.num_classes is None:\n default_factory = lambda: len(self._classes)\n else:\n default_factory = lambda: self.labels[len(self._classes)]\n self._classes.default_factory = default_factory\n if (self.num_classes is not None) and (len(self._classes) > self.num_classes):\n raise ValueError('The number of individual labels ({0}) is greater '\n 'than the number of classes defined by `num_classes` '\n '({1}).'.format(len(self._classes), self.num_classes))\n return self._classes\n\n @property\n def labels(self):\n if (self._labels is None) and (self.num_classes is not None):\n # TODO: Replace torch.randperm with seed-friendly counterpart\n self._labels = torch.randperm(self.num_classes).tolist()\n return self._labels\n\n def __call__(self, target):\n return self.classes[target]\n\n def __repr__(self):\n return '{0}({1})'.format(self.__class__.__name__, self.num_classes or '')"
},
{
"identifier": "ClassSplitter",
"path": "torchmeta_local/transforms/splitters.py",
"snippet": "def ClassSplitter(task=None, *args, **kwargs):\n return apply_wrapper(ClassSplitter_(*args, **kwargs), task)"
},
{
"identifier": "NumpyToTorch",
"path": "torchmeta_local/transforms/tabular_transforms.py",
"snippet": "class NumpyToTorch:\n \"\"\"Convert a numpy.ndarray to a pytorch.tensor.\"\"\"\n\n def __call__(self, numpy_array: np.ndarray) -> torch.tensor:\n \"\"\"\n Parameters\n ----------\n numpy_array : np.ndarray\n the numpy array\n\n Returns\n -------\n torch.tensor\n converted torch array with the same values as the numpy array\n \"\"\"\n return torch.from_numpy(numpy_array).contiguous()\n\n def __repr__(self):\n return self.__class__.__name__ + '()'"
}
] | import warnings
from torchmeta_local.datasets import Letter, PlantsTexture, PlantsShape, PlantsMargin, Bach
from torchmeta_local.transforms import Categorical, ClassSplitter
from torchmeta_local.transforms.tabular_transforms import NumpyToTorch | 8,523 |
__all__ = [
'letter',
'plants_texture',
'plants_shape',
'plants_margin',
'bach'
]
def helper_with_default_tabular(klass, folder, shots, ways, shuffle=True,
test_shots=None, seed=None, defaults=None, **kwargs):
"""
Parameters
----------
klass : CombinationMetaDataset
the class corresponding to the meta-dataset, e.g., Covertype
folder : string
Root directory where the dataset folder exists, e.g., `covertype_task_id_2118`.
shots : int
Number of (training) examples per class in each task. This corresponds
to `k` in `k-shot` classification.
ways : int
Number of classes per task. This corresponds to `N` in `N-way`
classification.
shuffle : bool (default: `True`)
Shuffle the examples when creating the tasks.
test_shots : int, optional
Number of test examples per class in each task. If `None`, then the
number of test examples is equal to the number of training examples per
class.
seed : int, optional
Random seed to be used in the meta-dataset.
kwargs
Additional arguments passed to the `TieredImagenet` class.
Returns
-------
klass
The meta-dataset with ClassSplitter applied, e.g., Covertype.
"""
if defaults is None:
defaults = {}
if 'num_classes_per_task' in kwargs:
warnings.warn('Both arguments `ways` and `num_classes_per_task` were '
'set in the helper function for the number of classes per task. '
'Ignoring the argument `ways`.', stacklevel=2)
ways = kwargs['num_classes_per_task']
if 'transform' not in kwargs:
kwargs['transform'] = defaults.get('transform', NumpyToTorch())
if 'target_transform' not in kwargs:
kwargs['target_transform'] = defaults.get('target_transform',
Categorical(ways))
if 'class_augmentations' not in kwargs:
kwargs['class_augmentations'] = defaults.get('class_augmentations', None)
if test_shots is None:
test_shots = shots
dataset = klass(folder,
num_classes_per_task=ways,
**kwargs)
dataset = ClassSplitter(dataset,
shuffle=shuffle,
num_train_per_class=shots,
num_test_per_class=test_shots)
dataset.seed(seed)
return dataset
def letter(folder: str, shots: int, ways: int, shuffle: bool=True,
test_shots: int=None, seed: int=None, **kwargs) -> Letter:
"""
Wrapper that creates a meta-dataset for the Letter tabular dataset.
Notes
--------
Letter has 26 classes in total with default splits train/val/test : 15/5/6.
See also
--------
`datasets.Letter` : CombinationMetaDataset for the Letter dataset.
"""
return helper_with_default_tabular(Letter, folder, shots, ways, shuffle=shuffle,
test_shots=test_shots, seed=seed, defaults=None, **kwargs)
def plants_texture(folder: str, shots: int, ways: int, shuffle: bool=True,
test_shots: int=None, seed: int=None, **kwargs) -> PlantsTexture:
"""
Wrapper that creates a meta-dataset for the PlantsTexture tabular dataset.
Notes
--------
PlantsTexture has 100 classes in total with default splits train/val/test : 70/15/15.
See also
--------
`datasets.PlantsTexture` : CombinationMetaDataset for the PlantsTexture dataset.
"""
return helper_with_default_tabular(PlantsTexture, folder, shots, ways, shuffle=shuffle,
test_shots=test_shots, seed=seed, defaults=None, **kwargs)
def plants_shape(folder: str, shots: int, ways: int, shuffle: bool=True,
|
__all__ = [
'letter',
'plants_texture',
'plants_shape',
'plants_margin',
'bach'
]
def helper_with_default_tabular(klass, folder, shots, ways, shuffle=True,
test_shots=None, seed=None, defaults=None, **kwargs):
"""
Parameters
----------
klass : CombinationMetaDataset
the class corresponding to the meta-dataset, e.g., Covertype
folder : string
Root directory where the dataset folder exists, e.g., `covertype_task_id_2118`.
shots : int
Number of (training) examples per class in each task. This corresponds
to `k` in `k-shot` classification.
ways : int
Number of classes per task. This corresponds to `N` in `N-way`
classification.
shuffle : bool (default: `True`)
Shuffle the examples when creating the tasks.
test_shots : int, optional
Number of test examples per class in each task. If `None`, then the
number of test examples is equal to the number of training examples per
class.
seed : int, optional
Random seed to be used in the meta-dataset.
kwargs
Additional arguments passed to the `TieredImagenet` class.
Returns
-------
klass
The meta-dataset with ClassSplitter applied, e.g., Covertype.
"""
if defaults is None:
defaults = {}
if 'num_classes_per_task' in kwargs:
warnings.warn('Both arguments `ways` and `num_classes_per_task` were '
'set in the helper function for the number of classes per task. '
'Ignoring the argument `ways`.', stacklevel=2)
ways = kwargs['num_classes_per_task']
if 'transform' not in kwargs:
kwargs['transform'] = defaults.get('transform', NumpyToTorch())
if 'target_transform' not in kwargs:
kwargs['target_transform'] = defaults.get('target_transform',
Categorical(ways))
if 'class_augmentations' not in kwargs:
kwargs['class_augmentations'] = defaults.get('class_augmentations', None)
if test_shots is None:
test_shots = shots
dataset = klass(folder,
num_classes_per_task=ways,
**kwargs)
dataset = ClassSplitter(dataset,
shuffle=shuffle,
num_train_per_class=shots,
num_test_per_class=test_shots)
dataset.seed(seed)
return dataset
def letter(folder: str, shots: int, ways: int, shuffle: bool=True,
test_shots: int=None, seed: int=None, **kwargs) -> Letter:
"""
Wrapper that creates a meta-dataset for the Letter tabular dataset.
Notes
--------
Letter has 26 classes in total with default splits train/val/test : 15/5/6.
See also
--------
`datasets.Letter` : CombinationMetaDataset for the Letter dataset.
"""
return helper_with_default_tabular(Letter, folder, shots, ways, shuffle=shuffle,
test_shots=test_shots, seed=seed, defaults=None, **kwargs)
def plants_texture(folder: str, shots: int, ways: int, shuffle: bool=True,
test_shots: int=None, seed: int=None, **kwargs) -> PlantsTexture:
"""
Wrapper that creates a meta-dataset for the PlantsTexture tabular dataset.
Notes
--------
PlantsTexture has 100 classes in total with default splits train/val/test : 70/15/15.
See also
--------
`datasets.PlantsTexture` : CombinationMetaDataset for the PlantsTexture dataset.
"""
return helper_with_default_tabular(PlantsTexture, folder, shots, ways, shuffle=shuffle,
test_shots=test_shots, seed=seed, defaults=None, **kwargs)
def plants_shape(folder: str, shots: int, ways: int, shuffle: bool=True, | test_shots: int=None, seed: int=None, **kwargs) -> PlantsShape: | 2 | 2023-11-30 20:07:46+00:00 | 12k |
tosiyuki/LLaVA-JP | train_llava.py | [
{
"identifier": "conversation",
"path": "llava/conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n PLAIN = auto()\n TWO = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):"
},
{
"identifier": "LlavaGpt2ForCausalLM",
"path": "llava/model/llava_gpt2.py",
"snippet": "class LlavaGpt2ForCausalLM(GPT2LMHeadModel, LlavaMetaForCausalLM):\n config_class = LlavaConfig\n base_model = \"gpt2\"\n\n def __init__(self, config):\n super(LlavaGpt2ForCausalLM, self).__init__(config)\n self.model = LlavaGpt2Model(config)\n #self.model = LlavaMetaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_model(self):\n return self.model\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[torch.FloatTensor] = None,\n return_dict: Optional[bool] = None,\n **kwargs\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n\n if inputs_embeds is None:\n (\n input_ids,\n position_ids,\n attention_mask,\n past_key_values,\n inputs_embeds,\n labels\n ) = self.prepare_inputs_labels_for_multimodal(\n input_ids,\n position_ids,\n attention_mask,\n past_key_values,\n labels,\n images\n )\n\n return super().forward(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n labels=labels,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict\n )\n\n def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):\n images = kwargs.pop(\"images\", None)\n _inputs = super().prepare_inputs_for_generation(\n input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs\n )\n if images is not None:\n _inputs['images'] = images\n return _inputs"
},
{
"identifier": "LlavaGptNeoxForCausalLM",
"path": "llava/model/llava_gpt_neox.py",
"snippet": "class LlavaGptNeoxForCausalLM(PreTrainedModel, LlavaMetaForCausalLM):\n config_class = LlavaConfig\n base_model = \"gpt_neox\"\n\n def __init__(self, config):\n super(LlavaGptNeoxForCausalLM, self).__init__(config)\n self.model = LlavaGptNeoxModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_model(self):\n return self.model\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[torch.FloatTensor] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n\n if inputs_embeds is None:\n (\n input_ids,\n position_ids,\n attention_mask,\n past_key_values,\n inputs_embeds,\n labels\n ) = self.prepare_inputs_labels_for_multimodal(\n input_ids,\n position_ids,\n attention_mask,\n past_key_values,\n labels,\n images\n )\n print(inputs_embeds.size())\n\n return super().forward(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n labels=labels,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict\n )\n\n def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):\n images = kwargs.pop(\"images\", None)\n _inputs = super().prepare_inputs_for_generation(\n input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs\n )\n if images is not None:\n _inputs['images'] = images\n return _inputs"
},
{
"identifier": "LlavaLlamaForCausalLM",
"path": "llava/model/llava_llama.py",
"snippet": "class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):\n config_class = LlavaConfig\n base_model = \"llama\"\n\n def __init__(self, config):\n super(LlavaLlamaForCausalLM, self).__init__(config)\n self.model = LlavaLlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_model(self):\n return self.model\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[torch.FloatTensor] = None,\n return_dict: Optional[bool] = None,\n **kwargs\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n\n if inputs_embeds is None:\n (\n input_ids,\n position_ids,\n attention_mask,\n past_key_values,\n inputs_embeds,\n labels\n ) = self.prepare_inputs_labels_for_multimodal(\n input_ids,\n position_ids,\n attention_mask,\n past_key_values,\n labels,\n images\n )\n\n return super().forward(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n labels=labels,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict\n )\n\n def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):\n images = kwargs.pop(\"images\", None)\n _inputs = super().prepare_inputs_for_generation(\n input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs\n )\n if images is not None:\n _inputs['images'] = images\n return _inputs"
},
{
"identifier": "LazySupervisedDataset",
"path": "llava/train/dataset.py",
"snippet": "class LazySupervisedDataset(Dataset):\n \"\"\"Dataset for supervised fine-tuning.\"\"\"\n\n def __init__(self, data_path: str,\n tokenizer: transformers.PreTrainedTokenizer,\n data_args: DataArguments):\n super(LazySupervisedDataset, self).__init__()\n \n list_data_dict = json.load(open(data_path, \"r\"))\n\n print(\"Formatting inputs...Skip in lazy mode\")\n self.tokenizer = tokenizer\n self.list_data_dict = list_data_dict\n self.data_args = data_args\n\n def __len__(self):\n return len(self.list_data_dict)\n\n @property\n def lengths(self):\n length_list = []\n for sample in self.list_data_dict:\n img_tokens = 128 if 'image' in sample else 0\n length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens)\n return length_list\n\n @property\n def modality_lengths(self):\n length_list = []\n for sample in self.list_data_dict:\n cur_len = sum(len(conv['value'].split()) for conv in sample['conversations'])\n cur_len = cur_len if 'images' in sample else -cur_len\n length_list.append(cur_len)\n return length_list\n\n def __getitem__(self, i) -> Dict[str, torch.Tensor]:\n sources = self.list_data_dict[i]\n if isinstance(i, int):\n sources = [sources]\n assert len(sources) == 1, \"Don't know why it is wrapped to a list\" # FIXME\n if 'image' in sources[0]:\n image_file = self.list_data_dict[i]['image']\n image_folder = self.data_args.image_folder\n processor = self.data_args.image_processor\n image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')\n if self.data_args.image_aspect_ratio == 'pad':\n def expand2square(pil_img, background_color):\n width, height = pil_img.size\n if width == height:\n return pil_img\n elif width > height:\n result = Image.new(pil_img.mode, (width, width), background_color)\n result.paste(pil_img, (0, (width - height) // 2))\n return result\n else:\n result = Image.new(pil_img.mode, (height, height), background_color)\n result.paste(pil_img, ((height - width) // 2, 0))\n return result\n image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))\n image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n else:\n image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n sources = preprocess_multimodal(\n copy.deepcopy([e[\"conversations\"] for e in sources]),\n self.data_args\n )\n else:\n sources = copy.deepcopy([e[\"conversations\"] for e in sources])\n data_dict = preprocess(\n sources,\n self.tokenizer,\n has_image=('image' in self.list_data_dict[i]))\n if isinstance(i, int):\n data_dict = dict(input_ids=data_dict[\"input_ids\"][0],\n labels=data_dict[\"labels\"][0])\n\n # image exist in the data\n if 'image' in self.list_data_dict[i]:\n data_dict['images'] = image\n elif self.data_args.is_multimodal:\n # image does not exist in the data, but the model is multimodal\n crop_size = self.data_args.image_processor.crop_size\n data_dict['images'] = torch.zeros(3, crop_size['height'], crop_size['width'])\n return data_dict"
},
{
"identifier": "DataCollatorForSupervisedDataset",
"path": "llava/train/dataset.py",
"snippet": "class DataCollatorForSupervisedDataset(object):\n \"\"\"Collate examples for supervised fine-tuning.\"\"\"\n\n tokenizer: transformers.PreTrainedTokenizer\n\n def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:\n input_ids, labels = tuple([instance[key] for instance in instances]\n for key in (\"input_ids\", \"labels\"))\n input_ids = torch.nn.utils.rnn.pad_sequence(\n input_ids,\n batch_first=True,\n padding_value=self.tokenizer.pad_token_id)\n labels = torch.nn.utils.rnn.pad_sequence(labels,\n batch_first=True,\n padding_value=IGNORE_INDEX)\n input_ids = input_ids[:, :self.tokenizer.model_max_length]\n labels = labels[:, :self.tokenizer.model_max_length]\n batch = dict(\n input_ids=input_ids,\n labels=labels,\n attention_mask=input_ids.ne(self.tokenizer.pad_token_id),\n )\n\n if 'images' in instances[0]:\n images = [instance['images'] for instance in instances]\n if all(x is not None and x.shape == images[0].shape for x in images):\n batch['images'] = torch.stack(images)\n else:\n batch['images'] = images\n\n return batch"
},
{
"identifier": "ModelArguments",
"path": "llava/train/arguments_dataclass.py",
"snippet": "class ModelArguments:\n base_model: Optional[str] = field(default=\"gpt2\",\n metadata={\"help\": \"gpt2 or gpt_neox or llama\"})\n model_name_or_path: Optional[str] = field(default=\"rinna/japanese-gpt2-xsmall\")\n version: Optional[str] = field(default=\"plain\")\n freeze_backbone: bool = field(default=False) # LLMをFreezeするか\n tune_mm_mlp_adapter: bool = field(default=False) # 事前学習のときはmm_mlp_adapterだけ保存する.\n vision_tower: Optional[str] = field(default=\"openai/clip-vit-large-patch14-336\")\n mm_vision_select_layer: Optional[int] = field(default=-2) # default to the last two layer\n pretrain_mm_mlp_adapter: Optional[str] = field(default=None) # fine-tuningのときには設定\n mm_projector_type: Optional[str] = field(default='mlp2x_gelu') # 2層の線形層\n mm_vision_select_feature: Optional[str] = field(default=\"patch\")"
},
{
"identifier": "DataArguments",
"path": "llava/train/arguments_dataclass.py",
"snippet": "class DataArguments:\n data_path: str = field(default=\"\",\n metadata={\"help\": \"Path to the training data.\"})\n lazy_preprocess: bool = False\n is_multimodal: bool = False\n image_folder: Optional[str] = field(default=\"/home/toshi/work/llava_jp/input/LLaVA-CC3M-Pretrain-595K/images\",\n metadata={\"help\": \"Path to image data.\"})\n image_aspect_ratio: str = 'square'"
},
{
"identifier": "TrainingArguments",
"path": "llava/train/arguments_dataclass.py",
"snippet": "class TrainingArguments(transformers.TrainingArguments):\n cache_dir: Optional[str] = field(default=None)\n optim: str = field(default=\"adamw_torch\")\n model_max_length: int = field(\n default=1024,\n metadata={\n \"help\":\n \"Maximum sequence length. Sequences will be right padded (and possibly truncated).\"\n },\n )\n double_quant: bool = field(\n default=True,\n metadata={\"help\": \"Compress the quantization statistics through double quantization.\"}\n )\n quant_type: str = field(\n default=\"nf4\",\n metadata={\"help\": \"Quantization data type to use. Should be one of `fp4` or `nf4`.\"}\n )\n bits: int = field(\n default=16,\n metadata={\"help\": \"How many bits to use.\"}\n )\n lora_enable: bool = False\n lora_r: int = 64\n lora_alpha: int = 16\n lora_dropout: float = 0.05\n lora_weight_path: str = \"\"\n lora_bias: str = \"none\"\n mm_projector_lr: Optional[float] = None\n group_by_modality_length: bool = field(default=False) # dataset sampler option\n\n fp16: bool = field(default=False)\n bf16: bool = field(default=False)\n output_dir: str = field(default=\"./output_llava/checkpoints/llava-v1.5-japanese-gpt2-xsmall\")\n num_train_epochs: int = field(default=1)\n per_device_train_batch_size: int = field(default=32)\n per_device_eval_batch_size: int = field(default=4)\n gradient_accumulation_steps: int = field(default=1)\n evaluation_strategy: str = field(default=\"no\")\n save_strategy: str = field(default=\"steps\")\n save_steps: int = field(default=24000)\n save_total_limit: int = field(default=1)\n learning_rate: float = field(default=1e-3)\n weight_decay: float = field(default=0.)\n warmup_ratio: float = field(default=0.03)\n logging_steps: int = field(default=1)\n model_max_length: int = field(default=1024)\n gradient_checkpointing: bool = field(default=True)\n dataloader_num_workers: int = field(default=16)\n lr_scheduler_type: str = field(default=\"cosine\")\n seed: int = field(default=42)"
},
{
"identifier": "LLaVATrainer",
"path": "llava/train/llava_trainer.py",
"snippet": "class LLaVATrainer(Trainer):\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:\n if self.train_dataset is None or not has_length(self.train_dataset):\n return None\n\n if self.args.group_by_modality_length:\n lengths = self.train_dataset.modality_lengths\n return LengthGroupedSampler(\n self.args.train_batch_size,\n world_size=self.args.world_size * self.args.gradient_accumulation_steps,\n lengths=lengths,\n group_by_modality=True,\n )\n else:\n return super()._get_train_sampler()\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n opt_model = self.model\n\n if self.optimizer is None:\n decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n if self.args.mm_projector_lr is not None:\n projector_parameters = [name for name, _ in opt_model.named_parameters() if \"mm_projector\" in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n \"lr\": self.args.mm_projector_lr,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in projector_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n \"lr\": self.args.mm_projector_lr,\n },\n ]\n else:\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [\n p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)\n\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n if optimizer_cls.__name__ == \"Adam8bit\":\n import bitsandbytes\n\n manager = bitsandbytes.optim.GlobalOptimManager.get_instance()\n\n skipped = 0\n for module in opt_model.modules():\n if isinstance(module, nn.Embedding):\n skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())\n logger.info(f\"skipped {module}: {skipped/2**20}M params\")\n manager.register_module_override(module, \"weight\", {\"optim_bits\": 32})\n logger.debug(f\"bitsandbytes: will optimize {module} in fp32\")\n logger.info(f\"skipped: {skipped/2**20}M params\")\n\n return self.optimizer\n\n def _save_checkpoint(self, model, trial, metrics=None):\n if getattr(self.args, 'tune_mm_mlp_adapter', False):\n from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n\n run_dir = self._get_output_dir(trial=trial)\n output_dir = os.path.join(run_dir, checkpoint_folder)\n\n # Only save Adapter\n #keys_to_match = ['mm_projector', 'vision_resampler']\n keys_to_match = ['mm_projector']\n weight_to_save = get_mm_adapter_state(self.model.named_parameters(), keys_to_match)\n #weight_to_save = self.model.named_parameters().detach().cpu().clone()\n\n if self.args.local_rank == 0 or self.args.local_rank == -1:\n self.model.config.save_pretrained(output_dir)\n torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))\n else:\n super(LLaVATrainer, self)._save_checkpoint(model, trial, metrics)\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n if getattr(self.args, 'tune_mm_mlp_adapter', False):\n pass\n else:\n super(LLaVATrainer, self)._save(output_dir, state_dict)"
}
] | import os
import pathlib
import torch
import transformers
from typing import Dict
from llava import conversation as conversation_lib
from llava.model.llava_gpt2 import LlavaGpt2ForCausalLM
from llava.model.llava_gpt_neox import LlavaGptNeoxForCausalLM
from llava.model.llava_llama import LlavaLlamaForCausalLM
from llava.train.dataset import LazySupervisedDataset, DataCollatorForSupervisedDataset
from llava.train.arguments_dataclass import ModelArguments, DataArguments, TrainingArguments
from llava.train.llava_trainer import LLaVATrainer
from transformers import BitsAndBytesConfig
from peft import prepare_model_for_kbit_training
from peft import LoraConfig, get_peft_model
from peft.tuners.lora import LoraLayer | 7,573 | names = name.split('.')
lora_module_names.add(names[0] if len(names) == 1 else names[-1])
if 'lm_head' in lora_module_names: # needed for 16-bit
lora_module_names.remove('lm_head')
return list(lora_module_names)
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
train_dataset = LazySupervisedDataset(tokenizer=tokenizer,
data_path=data_args.data_path,
data_args=data_args)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank
compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
bnb_model_from_pretrained_args = {}
if training_args.bits in [4, 8]:
bnb_model_from_pretrained_args.update(dict(
device_map="auto",
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
quantization_config=BitsAndBytesConfig(
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
llm_int8_skip_modules=["mm_projector"],
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=training_args.double_quant,
bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'}
)
))
if model_args.base_model == "gpt2":
model = LlavaGpt2ForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
elif model_args.base_model == "gpt_neox":
model = LlavaGptNeoxForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
elif model_args.base_model == "llama":
model = LlavaLlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
else:
print(f"{model_args.base_model} is not found")
exit(-1)
model.config.use_cache = False
if model_args.freeze_backbone:
model.model.requires_grad_(False)
if training_args.bits in [4, 8]:
model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing)
if training_args.gradient_checkpointing:
# Adapterの重みを調整するときに使うみたい
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
if training_args.lora_enable:
print("target_modules")
if model_args.base_model == "gpt2":
target_modules = ["c_attn"]
elif model_args.base_model == "gpt_neox":
target_modules = ["query_key_value"]
elif model_args.base_model == "llama":
target_modules = find_all_linear_names(model)
else:
print(f"{model_args.base_model} is not found")
exit(-1)
lora_config = LoraConfig(
r=training_args.lora_r,
lora_alpha=training_args.lora_alpha,
target_modules=target_modules,
lora_dropout=training_args.lora_dropout,
bias=training_args.lora_bias,
task_type="CAUSAL_LM",
)
if training_args.bits == 16:
if training_args.bf16:
model.to(torch.bfloat16)
if training_args.fp16:
model.to(torch.float16)
rank0_print("Adding LoRA adapters...")
model = get_peft_model(model, lora_config)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=True,
)
tokenizer.pad_token = tokenizer.unk_token
|
def rank0_print(*args):
if local_rank == 0:
print(*args)
# Borrowed from peft.utils.get_peft_model_state_dict
def get_peft_state(named_params, bias):
if bias == "none":
to_return = {k: t for k, t in named_params if "lora_" in k}
elif bias == "all":
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
maybe_lora_bias = {}
lora_bias_names = set()
for k, t in named_params:
if "lora_" in k:
to_return[k] = t
bias_name = k.split("lora_")[0] + "bias"
lora_bias_names.add(bias_name)
elif "bias" in k:
maybe_lora_bias[k] = t
for k, t in maybe_lora_bias:
if bias_name in lora_bias_names:
to_return[bias_name] = t
else:
raise NotImplementedError
to_return = {k: v.detach().cpu().clone() for k, v in to_return.items()}
return to_return
def get_peft_state_non_lora(named_params, require_grad_only=True):
to_return = {k: t for k, t in named_params if "lora_" not in k}
if require_grad_only:
to_return = {k: t for k, t in to_return.items() if t.requires_grad}
to_return = {k: v.detach().cpu().clone() for k, v in to_return.items()}
return to_return
def get_mm_adapter_state(named_params, keys_to_match):
to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
to_return = {k: v.detach().cpu().clone() for k, v in to_return.items()}
return to_return
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
output_dir: str):
"""Collects the state dict and dump to disk."""
if getattr(trainer.args, "tune_mm_mlp_adapter", False):
# Only save Adapter
keys_to_match = ['mm_projector']
if getattr(trainer.args, "use_im_start_end", False):
keys_to_match.extend(['embed_tokens', 'embed_in'])
weight_to_save = get_mm_adapter_state(trainer.model.named_parameters(), keys_to_match)
trainer.model.config.save_pretrained(output_dir)
current_folder = output_dir.split('/')[-1]
parent_folder = os.path.dirname(output_dir)
if trainer.args.local_rank == 0 or trainer.args.local_rank == -1:
if current_folder.startswith('checkpoint-'):
mm_projector_folder = os.path.join(parent_folder, "mm_projector")
os.makedirs(mm_projector_folder, exist_ok=True)
torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin'))
else:
torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
return
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {
key: value.cpu()
for key, value in state_dict.items()
}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
def find_all_linear_names(model):
cls = torch.nn.Linear
lora_module_names = set()
multimodal_keywords = ['mm_projector', 'vision_tower', 'vision_resampler']
for name, module in model.named_modules():
if any(mm_keyword in name for mm_keyword in multimodal_keywords):
continue
if isinstance(module, cls):
names = name.split('.')
lora_module_names.add(names[0] if len(names) == 1 else names[-1])
if 'lm_head' in lora_module_names: # needed for 16-bit
lora_module_names.remove('lm_head')
return list(lora_module_names)
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
data_args) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
train_dataset = LazySupervisedDataset(tokenizer=tokenizer,
data_path=data_args.data_path,
data_args=data_args)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank
compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
bnb_model_from_pretrained_args = {}
if training_args.bits in [4, 8]:
bnb_model_from_pretrained_args.update(dict(
device_map="auto",
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
quantization_config=BitsAndBytesConfig(
load_in_4bit=training_args.bits == 4,
load_in_8bit=training_args.bits == 8,
llm_int8_skip_modules=["mm_projector"],
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=training_args.double_quant,
bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'}
)
))
if model_args.base_model == "gpt2":
model = LlavaGpt2ForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
elif model_args.base_model == "gpt_neox":
model = LlavaGptNeoxForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
elif model_args.base_model == "llama":
model = LlavaLlamaForCausalLM.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
**bnb_model_from_pretrained_args
)
else:
print(f"{model_args.base_model} is not found")
exit(-1)
model.config.use_cache = False
if model_args.freeze_backbone:
model.model.requires_grad_(False)
if training_args.bits in [4, 8]:
model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing)
if training_args.gradient_checkpointing:
# Adapterの重みを調整するときに使うみたい
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
if training_args.lora_enable:
print("target_modules")
if model_args.base_model == "gpt2":
target_modules = ["c_attn"]
elif model_args.base_model == "gpt_neox":
target_modules = ["query_key_value"]
elif model_args.base_model == "llama":
target_modules = find_all_linear_names(model)
else:
print(f"{model_args.base_model} is not found")
exit(-1)
lora_config = LoraConfig(
r=training_args.lora_r,
lora_alpha=training_args.lora_alpha,
target_modules=target_modules,
lora_dropout=training_args.lora_dropout,
bias=training_args.lora_bias,
task_type="CAUSAL_LM",
)
if training_args.bits == 16:
if training_args.bf16:
model.to(torch.bfloat16)
if training_args.fp16:
model.to(torch.float16)
rank0_print("Adding LoRA adapters...")
model = get_peft_model(model, lora_config)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=True,
)
tokenizer.pad_token = tokenizer.unk_token | conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version] | 4 | 2023-12-01 03:26:16+00:00 | 12k |
jags111/ComfyUI_Jags_Audiotools | SampleDiffusion.py | [
{
"identifier": "load_audio",
"path": "libs/util/util.py",
"snippet": "def load_audio(device, audio_path: str, sample_rate):\n \n if not os.path.exists(audio_path):\n raise RuntimeError(f\"Audio file not found: {audio_path}\")\n\n audio, file_sample_rate = torchaudio.load(audio_path)\n\n if file_sample_rate != sample_rate:\n resample = torchaudio.transforms.Resample(file_sample_rate, sample_rate)\n audio = resample(audio)\n\n return audio.to(device)"
},
{
"identifier": "crop_audio",
"path": "libs/util/util.py",
"snippet": "def crop_audio(source: torch.Tensor, chunk_size: int, crop_offset: int = 0) -> torch.Tensor:\n n_channels, n_samples = source.shape\n \n offset = 0\n if (crop_offset > 0):\n offset = min(crop_offset, n_samples - chunk_size)\n elif (crop_offset == -1):\n offset = torch.randint(0, max(0, n_samples - chunk_size) + 1, []).item()\n \n chunk = source.new_zeros([n_channels, chunk_size])\n chunk [:, :min(n_samples, chunk_size)] = source[:, offset:offset + chunk_size]\n \n return chunk"
},
{
"identifier": "RequestHandler",
"path": "libs/dance_diffusion/api.py",
"snippet": "class RequestType(str, enum.Enum):\nclass Request:\nclass Response:\nclass RequestHandler:\n def __init__(\n self,\n request_type: RequestType,\n model_path: str,\n model_type: ModelType,\n model_chunk_size: int,\n model_sample_rate: int,\n **kwargs\n ):\n def __init__(\n self,\n result: torch.Tensor\n ):\n def __init__(\n self, \n device_accelerator: torch.device, \n device_offload: torch.device = None, \n optimize_memory_use: bool = False,\n use_autocast: bool = True\n ):\n def process_request(\n self,\n request: Request,\n callback: Callable = None\n ) -> Response:\n def load_model(self, model_type, model_path, chunk_size, sample_rate):\n def handle_generation(self, request: Request, callback: Callable) -> Response:\n def handle_variation(self, request: Request, callback: Callable) -> torch.Tensor:\n def handle_interpolation(self, request: Request, callback: Callable) -> torch.Tensor:\n def handle_inpainting(self, request: Request, callback: Callable) -> torch.Tensor:\n def handle_extension(self, request: Request, callback: Callable) -> torch.Tensor:"
},
{
"identifier": "SamplerType",
"path": "libs/diffusion_library/sampler.py",
"snippet": "class SamplerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_DDIM = 'V_DDIM'\n V_PRK = 'V_PRK'\n V_PIE = 'V_PIE'\n V_PLMS = 'V_PLMS'\n V_PLMS2 = 'V_PLMS2'\n V_IPLMS = 'V_IPLMS'\n \n K_EULER = 'K_EULER'\n K_EULERA = 'K_EULERA'\n K_HEUN = 'K_HEUN'\n K_DPM2 = 'K_DPM2'\n K_DPM2A = 'K_DPM2A'\n K_LMS = 'K_LMS'\n K_DPMF = 'K_DPMF'\n K_DPMA = 'K_DPMA'\n K_DPMPP2SA = 'K_DPMPP2SA'\n K_DPMPP2M = 'K_DPMPP2M'\n K_DPMPPSDE = 'K_DPMPPSDE'\n\n @classmethod\n def is_v_sampler(cls, value):\n return value[0] == 'V'\n\n def sample(self, model_fn, x_t, steps, callback, **sampler_args) -> torch.Tensor:\n if self == SamplerType.V_DDPM:\n if sampler_args.get('is_reverse'):\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_DDIM:\n if sampler_args.get('is_reverse'): # HACK: Technically incorrect since DDIM implies eta > 0.0\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('eta', 0.1),\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_PRK:\n return vsampling.prk_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PIE:\n return vsampling.pie_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS:\n return vsampling.plms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS2:\n return vsampling.plms2_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_IPLMS:\n return vsampling.iplms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.K_EULER:\n return ksampling.sample_euler(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_EULERA:\n return ksampling.sample_euler_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_HEUN:\n return ksampling.sample_heun(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2:\n return ksampling.sample_dpm_2(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2A:\n return ksampling.sample_dpm_2_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_LMS:\n return ksampling.sample_lms(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 4)\n )\n elif self == SamplerType.K_DPMF:# sample_dpm_fast\n return ksampling.sample_dpm_fast(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('n', 3),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMA:\n return ksampling.sample_dpm_adaptive(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 3),\n sampler_args.get('rtol', 0.05),\n sampler_args.get('atol', 0.0078),\n sampler_args.get('h_init', 0.05),\n sampler_args.get('pcoeff', 0.0),\n sampler_args.get('icoeff', 1.0),\n sampler_args.get('dcoeff', 0.0),\n sampler_args.get('accept_safety', 0.81),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('return_info', False)\n )\n elif self == SamplerType.K_DPMPP2SA:\n return ksampling.sample_dpmpp_2s_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMPP2M:\n return ksampling.sample_dpmpp_2m(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False)\n )\n elif self == SamplerType.K_DPMPPSDE:\n return ksampling.sample_dpmpp_sde(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('r', 1/2)\n )\n else:\n raise Exception(f\"No sample implementation for sampler_type '{self}'\")"
},
{
"identifier": "SchedulerType",
"path": "libs/diffusion_library/scheduler.py",
"snippet": "class SchedulerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_SPLICED_DDPM_COSINE = 'V_SPLICED_DDPM_COSINE'\n V_LOG = 'V_LOG'\n V_CRASH = 'V_CRASH'\n \n K_KARRAS = 'K_KARRAS'\n K_EXPONENTIAL = 'K_EXPONENTIAL'\n K_POLYEXPONENTIAL = 'K_POLYEXPONENTIAL'\n K_VP = 'K_VP'\n \n @classmethod\n def is_v_scheduler(cls, value):\n return value[0] == 'V'\n \n def get_step_list(self, n: int, device: str, **schedule_args):\n #if SchedulerType.is_v_scheduler(self):\n # n -= 1\n\n if self == SchedulerType.V_DDPM:\n return torch.nn.functional.pad(vscheduling.get_ddpm_schedule(torch.linspace(1, 0, n)), [0,1], value=0.0).to(device)\n elif self == SchedulerType.V_SPLICED_DDPM_COSINE:\n return vscheduling.get_spliced_ddpm_cosine_schedule(torch.linspace(1, 0, n + 1)).to(device)\n elif self == SchedulerType.V_LOG:\n return torch.nn.functional.pad(\n vscheduling.get_log_schedule(\n torch.linspace(1, 0, n),\n schedule_args.get('min_log_snr', -10.0),\n schedule_args.get('max_log_snr', 10.0)\n ),\n [0,1],\n value=0.0\n ).to(device)\n elif self == SchedulerType.V_CRASH:\n sigma = torch.sin(torch.linspace(1, 0, n + 1) * math.pi / 2) ** 2\n alpha = (1 - sigma ** 2) ** 0.5\n return vscheduling.alpha_sigma_to_t(alpha, sigma).to(device)\n elif self == SchedulerType.K_KARRAS:\n return kscheduling.get_sigmas_karras(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 7.0),\n device = device\n )\n elif self == SchedulerType.K_EXPONENTIAL:\n return kscheduling.get_sigmas_exponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n device = device\n )\n elif self == SchedulerType.K_POLYEXPONENTIAL:\n return kscheduling.get_sigmas_polyexponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 1.0),\n device = device\n )\n elif self == SchedulerType.K_VP:\n return kscheduling.get_sigmas_vp(\n n,\n schedule_args.get('beta_d', 1.205),\n schedule_args.get('beta_min', 0.09),\n schedule_args.get('eps_s', 0.001),\n device = device\n )\n else:\n raise Exception(f\"No get_step_list implementation for scheduler_type '{self}'\")"
},
{
"identifier": "DDModelWrapper",
"path": "libs/dance_diffusion/dd/model.py",
"snippet": "class DDModelWrapper(ModelWrapperBase):\n def __init__(self):\n \n super().__init__()\n \n self.module:DanceDiffusionInference = None\n self.model:Callable = None\n \n def load(\n self,\n path:str,\n device_accelerator:torch.device,\n optimize_memory_use:bool=False,\n chunk_size:int=None,\n sample_rate:int=None\n ):\n \n default_model_config = dict(\n version = [0, 0, 1],\n model_info = dict(\n name = 'Dance Diffusion Model',\n description = 'v1.0',\n type = ModelType.DD,\n native_chunk_size = 65536,\n sample_rate = 48000,\n ),\n diffusion_config = dict(\n n_attn_layers = 4\n )\n )\n \n file = torch.load(path, map_location='cpu')\n \n model_config = file.get('model_config')\n if not model_config:\n print(f\"Model file {path} is invalid. Please run the conversion script.\")\n print(f\" - Default model config will be used, which may be inaccurate.\")\n model_config = default_model_config\n \n model_info = model_config.get('model_info')\n diffusion_config = model_config.get('diffusion_config')\n\n self.path = path\n self.chunk_size = model_info.get('native_chunk_size')if not chunk_size else chunk_size\n self.sample_rate = model_info.get('sample_rate')if not sample_rate else sample_rate\n \n self.module = DanceDiffusionInference(\n n_attn_layers=diffusion_config.get('n_attn_layers'),\n sample_size=chunk_size,\n sample_rate=sample_rate,\n latent_dim=0,\n )\n \n self.module.load_state_dict(\n file[\"state_dict\"], \n strict=False\n )\n self.module.eval().requires_grad_(False)\n \n self.model = self.module.diffusion_ema if (optimize_memory_use) else self.module.diffusion_ema.to(device_accelerator)"
},
{
"identifier": "DDInference",
"path": "libs/dance_diffusion/dd/inference.py",
"snippet": "class DDInference(InferenceBase):\n \n def __init__(\n self,\n device_accelerator: torch.device = None,\n device_offload: torch.device = None,\n optimize_memory_use: bool = False,\n use_autocast: bool = True,\n model: ModelWrapperBase = None\n ):\n super().__init__(device_accelerator, device_offload, optimize_memory_use, use_autocast, model)\n \n def generate(\n self,\n callback: Callable = None,\n batch_size: int = None,\n seed: int = None,\n steps: int = None,\n scheduler: SchedulerType = None,\n scheduler_args: dict = None,\n sampler: SamplerType = None,\n sampler_args: dict = None,\n **kwargs\n ):\n self.generator.manual_seed(seed)\n \n step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)#step_list = step_list[:-1] if sampler in [SamplerType.V_PRK, SamplerType.V_PLMS, SamplerType.V_PIE, SamplerType.V_PLMS2, SamplerType.V_IPLMS] else step_list\n \n if SamplerType.is_v_sampler(sampler):\n x_T = torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)\n model = self.model.model\n else:\n x_T = step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)\n model = VDenoiser(self.model.model)\n \n with self.offload_context(self.model.model):\n return sampler.sample(\n model,\n x_T,\n step_list,\n callback,\n **sampler_args\n ).float()\n \n \n def generate_variation(\n self,\n callback: Callable = None,\n batch_size: int = None,\n seed: int = None,\n audio_source: torch.Tensor = None,\n expansion_map: list[int] = None,\n noise_level: float = None,\n steps: int = None,\n scheduler: SchedulerType = None,\n scheduler_args = None,\n sampler: SamplerType = None,\n sampler_args = None,\n **kwargs\n ) -> torch.Tensor:\n self.generator.manual_seed(seed)\n \n audio_source = self.expand(audio_source, expansion_map)\n \n if SamplerType.is_v_sampler(sampler):\n step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)\n step_list = step_list[step_list < noise_level]\n alpha_T, sigma_T = t_to_alpha_sigma(step_list[0])\n x_T = alpha_T * audio_source + sigma_T * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator)\n model = self.model.model\n else:\n scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level)\n step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)\n x_T = audio_source + step_list[0] * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator)\n model = VDenoiser(self.model.model)\n \n with self.offload_context(self.model.model):\n return sampler.sample(\n model,\n x_T,\n step_list,\n callback,\n **sampler_args\n ).float()\n \n \n def generate_interpolation(\n self,\n callback: Callable = None,\n batch_size: int = None,\n # seed: int = None,\n interpolation_positions: list[float] = None,\n audio_source: torch.Tensor = None,\n audio_target: torch.Tensor = None,\n expansion_map: list[int] = None,\n noise_level: float = None,\n steps: int = None,\n scheduler: SchedulerType = None,\n scheduler_args = None,\n sampler: SamplerType = None,\n sampler_args = None,\n **kwargs\n ) -> torch.Tensor:\n \n if SamplerType.is_v_sampler(sampler):\n step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)\n step_list = step_list[step_list < noise_level]\n step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling\n model = self.model.model\n else:\n scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level)\n step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)\n step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling\n model = VDenoiser(self.model.model)\n \n if self.optimize_memory_use and batch_size < 2:\n x_0_source = audio_source\n x_0_target = audio_target\n \n with self.offload_context(self.model.model):\n x_T_source = sampler.sample(\n model,\n x_0_source,\n step_list.flip(0),\n callback,\n **sampler_args\n )\n \n with self.offload_context(self.model.model):\n x_T_target = sampler.sample(\n model,\n x_0_target,\n step_list.flip(0),\n callback,\n **sampler_args\n )\n \n x_T = torch.cat([x_T_source, x_T_target], dim=0)\n else:\n x_0 = torch.cat([audio_source, audio_target], dim=0)\n \n with self.offload_context(self.model.model):\n x_T = sampler.sample(\n model,\n x_0,\n step_list.flip(0),\n callback,\n **sampler_args\n )\n \n if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack\n step_list[-1] = 0.0\n else:\n step_list = torch.cat([step_list, step_list.new_zeros([1])])\n \n x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator)\n \n for pos in range(len(interpolation_positions)):\n x_Int[pos] = tensor_slerp_2D(x_T[0], x_T[1], interpolation_positions[pos])\n \n with self.offload_context(self.model.model):\n return sampler.sample(\n model,\n x_Int,\n step_list,\n callback,\n **sampler_args\n ).float()\n \n\n def generate_inpainting(\n self,\n callback: Callable = None,\n batch_size: int = None,\n seed: int = None,\n audio_source: torch.Tensor = None,\n expansion_map: list[int] = None,\n mask: torch.Tensor = None,\n steps: int = None,\n scheduler: SchedulerType = None,\n scheduler_args = None,\n sampler: SamplerType = None,\n sampler_args = None,\n inpainting_args = None,\n **kwargs\n ) -> torch.Tensor:\n \n self.generator.manual_seed(seed)\n \n method = inpainting_args.get('method')\n \n if(method == 'repaint'):\n raise Exception(\"Repaint currently not supported due to changed requirements\")\n \n elif(method == 'posterior_guidance'):\n step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)\n \n if SamplerType.is_v_sampler(sampler):\n raise Exception('V-Sampler currently not supported for posterior guidance. Please choose a K-Sampler.')\n else:\n x_T = audio_source + step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)\n model = PosteriorSampling(\n VDenoiser(self.model.model),\n x_T,\n audio_source,\n mask,\n inpainting_args.get('posterior_guidance_scale')\n )\n \n with self.offload_context(self.model.model):\n return sampler.sample(\n model,\n x_T,\n step_list,\n callback,\n **sampler_args\n ).float()\n \n \n def generate_extension(\n self,\n callback: Callable = None,\n batch_size: int = None,\n seed: int = None,\n audio_source: torch.Tensor = None,\n expansion_map: list[int] = None,\n steps: int = None,\n scheduler: SchedulerType = None,\n scheduler_args = None,\n sampler: SamplerType = None,\n sampler_args = None,\n inpainting_args = None,\n keep_start: bool = None,\n **kwargs\n ) -> torch.Tensor:\n \n half_chunk_size = self.model.chunk_size // 2\n chunk = torch.cat([audio_source[:, :, -half_chunk_size:], torch.zeros([batch_size, 2, half_chunk_size], device=self.device_accelerator)], dim=2)\n #chunk = audio_source\n \n mask = torch.cat(\n [torch.ones([batch_size, 2, half_chunk_size], dtype=torch.bool, device=self.device_accelerator),\n torch.zeros([batch_size, 2, half_chunk_size], dtype=torch.bool, device=self.device_accelerator)],\n dim=2 \n )\n \n output = self.generate_inpainting(\n callback,\n batch_size,\n seed,\n chunk,\n expansion_map,\n mask,\n steps,\n scheduler,\n scheduler_args,\n sampler,\n sampler_args,\n inpainting_args\n )\n \n if (keep_start):\n return torch.cat(\n [audio_source,\n output[:, :, -half_chunk_size:]],\n dim=2\n )\n else:\n return output[:, :, -half_chunk_size:]"
}
] | import subprocess, sys, os
import torch
import random
import folder_paths
import importlib
import yaml
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import audio_diffusion_pytorch
import diffusion
import folder_paths as comfy_paths
import k_diffusion
import soundfile as sf
import torchaudio
import hashlib
from pathlib import Path
from io import BytesIO
from server import PromptServer
from aiohttp import web
from folder_paths import models_dir, get_filename_list
from comfy.model_management import get_torch_device
from libs.util.util import load_audio, crop_audio
from libs.dance_diffusion.api import RequestHandler, Request, ModelType
from libs.diffusion_library.sampler import SamplerType
from libs.diffusion_library.scheduler import SchedulerType
from libs.dance_diffusion.dd.model import DDModelWrapper
from libs.dance_diffusion.dd.inference import DDInference
from scipy.fft import fft
from pydub import AudioSegment
from itertools import cycle
from pygame import mixer
from pygame import mixer
| 9,223 | "id_string": ("STRING", {"default": 'ComfyUI'}),
"tame": (['Enabled', 'Disabled'],)
},
"optional": {
},
}
RETURN_TYPES = ("STRING", "AUDIO")
RETURN_NAMES = ("path","🎙️audio" )
FUNCTION = "audio_save"
OUTPUT_NODE = True
CATEGORY = "🎙️Jags_Audio"
def audio_save(self, audio, output_path=None, filename_prefix="ComfyUI", filename_delimiter='_', filename_number_padding=4, filename_number_start='false', sample_rate='_', id_string='_', tame='Enabled'):
delimiter = filename_delimiter
number_padding = filename_number_padding if filename_number_padding > 1 else 4
return (SaveAudio(audio_out=(0.5 * audio).clamp(-1,1) if(tame == 'Enabled') else audio, output_path=output_path, sample_rate=sample_rate, id_str=id_string), )
class LoadAudio():
@classmethod
def INPUT_TYPES(s):
audio_extensions = ['mp3','wav']
input_dir = folder_paths.get_input_directory()
files = []
for f in os.listdir(input_dir):
if os.path.isfile(os.path.join(input_dir, f)):
file_parts = f.split('.')
if len(file_parts) > 1 and (file_parts[-1] in audio_extensions):
files.append(f)
return {
"required": {
"audio": (sorted(files),),
"sample_rate": ("INT", {"default": 44100, "min": 1, "max": 10000000000, "step": 1}),
},
"optional": {
},
}
RETURN_TYPES = ("AUDIO", "INT" )
RETURN_NAMES = ("🎙️audio","sample_rate")
FUNCTION = "LoadAudio"
OUTPUT_NODE = True
CATEGORY = "🎙️Jags_Audio"
def LoadAudio(self, audio,):
file = folder_paths.get_annotated_filepath(audio)
# TODO: support more formats
if (file.lower().endswith('.mp3')):
audio_file = AudioSegment.from_mp3(file)
else:
audio_file = AudioSegment.from_file(file, format="wav")
audio_data = AudioData(audio_file)
return (audio_data,)
#file_path = f'{comfy_dir}/custom_nodes/SampleDiffusion/audio_input/{file_path}'
@classmethod
def IS_CHANGED(self, audio, **kwargs):
audio_path = folder_paths.get_annotated_filepath(audio)
m = hashlib.sha256()
with open(audio_path, 'rb') as f:
m.update(f.read())
return m.digest().hex()
@classmethod
def VALIDATE_INPUTS(self, audio, **kwargs):
if not folder_paths.exists_annotated_filepath(audio):
return "Invalid audio file: {}".format(audio)
return True
"""
alternates
"""
#--------------------------------------------------------------------------------
class LoadAudioModelDD():
@classmethod
def INPUT_TYPES(cls):
"""
Input Types
"""
global models_folder
models = os.listdir(models_folder)
models = [x for x in models if x.endswith('.ckpt')]
return {
"required": {
""
"model": (models, {}),
"chunk_size": ("INT", {"default": 65536, "min": 32768, "max": 10000000000, "step": 32768}),
"sample_rate": ("INT", {"default": 44100, "min": 1, "max": 10000000000, "step": 1}),
"optimize_memory_use": (['Enabled', 'Disabled'], {"default": 'Enabled'}),
"autocast": (['Enabled', 'Disabled'], {"default": 'Enabled'}),
},
"optional": {
},
}
RETURN_TYPES = ("DD_MODEL", )
RETURN_NAMES = ("audio_model", )
FUNCTION = "DoLoadAudioModelDD"
OUTPUT_NODE = True
CATEGORY = "🎙️Jags_Audio/Audiotools"
def DoLoadAudioModelDD(self, model, chunk_size, sample_rate, optimize_memory_use, autocast):
global models_folder
model = os.path.join(models_folder, model)
device = get_torch_device()
wrapper = DDModelWrapper()
wrapper.load(model, device, optimize_memory_use, chunk_size, sample_rate)
| # Imports
#import v_diffusion_pytorch
def get_comfy_dir():
dirs = __file__.split('\\')
comfy_index = None
for i, dir in enumerate(dirs):
if dir == "ComfyUI":
comfy_index = i
break
if comfy_index is not None:
# Join the list up to the "ComfyUI" folder
return '\\'.join(dirs[:comfy_index+1])
else:
return None
comfy_dir = get_comfy_dir()
# ****************************************************************************
PromptServer.instance.app._client_max_size = 250 * 1024 * 1024 # 250 MB
# Add route for getting audio, duplicates view image but allows audio_input
"""
@PromptServer.instance.routes.get("/ComfyUI_Jags_Audiotools/audio")
async def view_image(request):
if "filename" in request.rel_url.query:
type = request.rel_url.query.get("type", "audio_input")
if type not in ["output", "input", "temp", "audio_input"]:
return web.Response(status=400)
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), type)
if "subfolder" in request.rel_url.query:
full_output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"])
if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir:
return web.Response(status=403)
output_dir = full_output_dir
filename = request.rel_url.query["filename"]
filename = os.path.basename(filename)
file = os.path.join(output_dir, filename)
if os.path.isfile(file):
return web.FileResponse(file, headers={"Content-Disposition": f"filename=\"{filename}\""})
return web.Response(status=404)
"""
config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yaml")
if not os.path.exists(config):
with open(config, "w") as f:
yaml.dump({"model_folder": f"{os.path.join(models_dir, 'audio_diffusion')}"}, f)
with open(config, "r") as f:
config = yaml.safe_load(f)
models_folder = config["model_folder"]
# init and sample_diffusion lib load
libs = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "libs")
#if not os.path.exists(os.path.join(comfy_dir, 'custom_nodes/ComfyUI_Jags_Audiotools/libs')):
#os.makedirs(os.path.join(comfy_dir, 'custom_nodes/ComfyUI_Jags_Audiotools/libs'))
#libs = os.path.join(comfy_dir, 'custom_nodes/ComfyUI_Jags_Audiotools/libs')
#if not os.path.exists(os.path.join(comfy_dir, libs)):
# os.system (os.path.join(comfy_dir, libs))
#sys.path.append(os.path.join(comfy_dir, libs ))
# PIL to Tensor
def pil2tensor(image):
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
# ****************************************************************************
# sound play functionality for audio nodes
# needs further testing
def save_audio(audio_out, output_path: str, sample_rate, id_str:str = None):
out_files = []
if not os.path.exists(output_path):
os.makedirs(output_path)
ix = 1
for sample in audio_out:
while True:
output_file = os.path.join(output_path, f"sample_{id_str}_{ix}.wav" if id_str else f"sample_{ix}.wav")
if not os.path.exists(output_file):
break
ix += 1
open(output_file, "a").close()
output = sample.cpu()
torchaudio.save(output_file, output, sample_rate)
out_files.append(output_file)
ix += 1
return out_files
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
try:
except ModuleNotFoundError:
# install pixelsort in current venv
subprocess.check_call([sys.executable, "-m", "pip", "install", "pygame"])
mixer.init()
def PlaySound(path, volume):
mixer.music.load(path)
mixer.music.set_volume(volume)
mixer.music.play()
#testing the audio file for playback
# ****************************************************************************
# * NODES *
# ****************************************************************************
class AudioData:
def __init__(self, audio_file) -> None:
# Extract the sample rate
sample_rate = audio_file.frame_rate
# Get the number of audio channels
num_channels = audio_file.channels
# Extract the audio data as a NumPy array
audio_data = np.array(audio_file.get_array_of_samples())
self.audio_data = audio_data
self.sample_rate = sample_rate
self.num_channels = num_channels
def get_channel_audio_data(self, channel: int):
if channel < 0 or channel >= self.num_channels:
raise IndexError(f"Channel '{channel}' out of range. total channels is '{self.num_channels}'.")
return self.audio_data[channel::self.num_channels]
def get_channel_fft(self, channel: int):
audio_data = self.get_channel_audio_data(channel)
return fft(audio_data)
class AudioFFTData:
def __init__(self, audio_data, sample_rate) -> None:
self.fft = fft(audio_data)
self.length = len(self.fft)
self.frequency_bins = np.fft.fftfreq(self.length, 1 / sample_rate)
def get_max_amplitude(self):
return np.max(np.abs(self.fft))
def get_normalized_fft(self) -> float:
max_amplitude = self.get_max_amplitude()
return np.abs(self.fft) / max_amplitude
def get_indices_for_frequency_bands(self, lower_band_range: int, upper_band_range: int):
return np.where((self.frequency_bins >= lower_band_range) & (self.frequency_bins < upper_band_range))
def __len__(self):
return self.length
# ****************************************************************************
class AudioInference():
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
"""
Input Types
"""
return {
"required": {
"audio_model": ("DD_MODEL", ),
"mode": (['Generation', 'Variation'],),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 10000000000, "step": 1}),
"steps": ("INT", {"default": 50, "min": 1, "max": 10000000000, "step": 1}),
"sampler": (SamplerType._member_names_, {"default": "V_IPLMS"}),
"sigma_min": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1280, "step": 0.01}),
"sigma_max": ("FLOAT", {"default": 50, "min": 0.0, "max": 1280, "step": 0.01}),
"rho": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 128.0, "step": 0.01}),
"scheduler": (SchedulerType._member_names_, {"default": "V_CRASH"}),
"noise_level": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}),
"seed": ("INT", {"default": -1}),
},
"optional": {
"input_audio": ("AUDIO", {}),
"input_audio_path": ("STRING", {"default": '', "forceInput": True}),
},
}
RETURN_TYPES = ("LIST", "AUDIO", "INT")
RETURN_NAMES = ("out_paths", "🎙️audio", "sample_rate")
FUNCTION = "do_sample"
CATEGORY = "🎙️Jags_Audio/AudioInference"
def do_sample(self, audio_model, mode, batch_size, steps, sampler, sigma_min, sigma_max, rho, scheduler, input_audio_path='', input_audio=None, noise_level=0.7, seed=-1):
wrapper, inference = audio_model
device_type_accelerator = get_torch_device()
device_accelerator = torch.device(device_type_accelerator)
device_offload = get_torch_device()
crop = lambda audio: crop_audio(audio, wrapper.chunk_size, 0)
if input_tensor is None:
input_audio_path = None if input_audio_path == '' else input_audio_path
load_input = lambda source: crop(load_audio(device_accelerator, source, wrapper.sample_rate)) if source is not None else None
audio_source = load_input(input_audio_path)
else:
if len(input_tensor.shape) == 3:
# remove first (batch) dimension
input_tensor = input_tensor[0]
if input_tensor.shape[0] != 2:
channels, sample_length = input_tensor.shape
input_tensor = input_tensor.view(1, sample_length).repeat(2, 1)
input_tensor = input_tensor.to(get_torch_device())
audio_source = crop(input_tensor)
request_handler = RequestHandler(device_accelerator, device_offload, optimize_memory_use=False, use_autocast=True)
seed = seed if(seed!=-1) else torch.randint(0, 4294967294, [1], device=device_type_accelerator).item()
print(f"Using accelerator: {device_type_accelerator}, Seed: {seed}.")
request = Request(
request_type=mode,
model_path=wrapper.path,
model_type=ModelType.DD,
model_chunk_size=wrapper.chunk_size,
model_sample_rate=wrapper.sample_rate,
model_wrapper=wrapper,
model_inference=inference,
seed=seed,
batch_size=batch_size,
audio_source=audio_source,
audio_target=None,
mask=None,
noise_level=noise_level,
interpolation_positions=None,
resamples=None,
keep_start=True,
steps=steps,
sampler_type=SamplerType[sampler],
sampler_args={'use_tqdm': True},
scheduler_type=SchedulerType[scheduler],
scheduler_args={
'sigma_min': sigma_min,
'sigma_max': sigma_max,
'rho': rho,
}
)
response = request_handler.process_request(request)#, lambda **kwargs: print(f"{kwargs['step'] / kwargs['x']}"))
paths = save_audio(response.result, f"{comfy_dir}/temp", wrapper.sample_rate, f"{seed}_{random.randint(0, 100000)}")
return (paths, response.result, wrapper.sample_rate)
class SaveAudio():
def __init__(self):
self.output_dir = comfy_paths.output_directory
self.type = os.path.basename(self.output_dir)
@classmethod
def INPUT_TYPES(cls):
"""
Save Audio files
"""
return {
"required": {
"audio": ("AUDIO", ),
"output_path": ("STRING", {"default": '[time(%Y-%m-%d)]', "multiline": False}),
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
"filename_delimiter": ("STRING", {"default":"_"}),
"filename_number_padding": ("INT", {"default":4, "min":1, "max":9, "step":1}),
"filename_number_start": (["false", "true"],),
"sample_rate": ("INT", {"default": 44100, "min": 1, "max": 10000000000, "step": 1}),
"id_string": ("STRING", {"default": 'ComfyUI'}),
"tame": (['Enabled', 'Disabled'],)
},
"optional": {
},
}
RETURN_TYPES = ("STRING", "AUDIO")
RETURN_NAMES = ("path","🎙️audio" )
FUNCTION = "audio_save"
OUTPUT_NODE = True
CATEGORY = "🎙️Jags_Audio"
def audio_save(self, audio, output_path=None, filename_prefix="ComfyUI", filename_delimiter='_', filename_number_padding=4, filename_number_start='false', sample_rate='_', id_string='_', tame='Enabled'):
delimiter = filename_delimiter
number_padding = filename_number_padding if filename_number_padding > 1 else 4
return (SaveAudio(audio_out=(0.5 * audio).clamp(-1,1) if(tame == 'Enabled') else audio, output_path=output_path, sample_rate=sample_rate, id_str=id_string), )
class LoadAudio():
@classmethod
def INPUT_TYPES(s):
audio_extensions = ['mp3','wav']
input_dir = folder_paths.get_input_directory()
files = []
for f in os.listdir(input_dir):
if os.path.isfile(os.path.join(input_dir, f)):
file_parts = f.split('.')
if len(file_parts) > 1 and (file_parts[-1] in audio_extensions):
files.append(f)
return {
"required": {
"audio": (sorted(files),),
"sample_rate": ("INT", {"default": 44100, "min": 1, "max": 10000000000, "step": 1}),
},
"optional": {
},
}
RETURN_TYPES = ("AUDIO", "INT" )
RETURN_NAMES = ("🎙️audio","sample_rate")
FUNCTION = "LoadAudio"
OUTPUT_NODE = True
CATEGORY = "🎙️Jags_Audio"
def LoadAudio(self, audio,):
file = folder_paths.get_annotated_filepath(audio)
# TODO: support more formats
if (file.lower().endswith('.mp3')):
audio_file = AudioSegment.from_mp3(file)
else:
audio_file = AudioSegment.from_file(file, format="wav")
audio_data = AudioData(audio_file)
return (audio_data,)
#file_path = f'{comfy_dir}/custom_nodes/SampleDiffusion/audio_input/{file_path}'
@classmethod
def IS_CHANGED(self, audio, **kwargs):
audio_path = folder_paths.get_annotated_filepath(audio)
m = hashlib.sha256()
with open(audio_path, 'rb') as f:
m.update(f.read())
return m.digest().hex()
@classmethod
def VALIDATE_INPUTS(self, audio, **kwargs):
if not folder_paths.exists_annotated_filepath(audio):
return "Invalid audio file: {}".format(audio)
return True
"""
alternates
"""
#--------------------------------------------------------------------------------
class LoadAudioModelDD():
@classmethod
def INPUT_TYPES(cls):
"""
Input Types
"""
global models_folder
models = os.listdir(models_folder)
models = [x for x in models if x.endswith('.ckpt')]
return {
"required": {
""
"model": (models, {}),
"chunk_size": ("INT", {"default": 65536, "min": 32768, "max": 10000000000, "step": 32768}),
"sample_rate": ("INT", {"default": 44100, "min": 1, "max": 10000000000, "step": 1}),
"optimize_memory_use": (['Enabled', 'Disabled'], {"default": 'Enabled'}),
"autocast": (['Enabled', 'Disabled'], {"default": 'Enabled'}),
},
"optional": {
},
}
RETURN_TYPES = ("DD_MODEL", )
RETURN_NAMES = ("audio_model", )
FUNCTION = "DoLoadAudioModelDD"
OUTPUT_NODE = True
CATEGORY = "🎙️Jags_Audio/Audiotools"
def DoLoadAudioModelDD(self, model, chunk_size, sample_rate, optimize_memory_use, autocast):
global models_folder
model = os.path.join(models_folder, model)
device = get_torch_device()
wrapper = DDModelWrapper()
wrapper.load(model, device, optimize_memory_use, chunk_size, sample_rate)
| inference = DDInference(device, device, optimize_memory_use, autocast, wrapper)
| 6 | 2023-11-28 09:09:59+00:00 | 12k |
rafapablos/w4c23-rainai | train.py | [
{
"identifier": "UNetModule",
"path": "w4c23/models/unet/lightning.py",
"snippet": "class UNetModule(BaseModule):\n def __init__(self, model_params: dict, params: dict):\n super().__init__(model_params, params)\n\n self.input_crop = model_params[\"input_crop\"]\n self.padding = model_params[\"padding\"]\n self.conditioning_lead_time = model_params[\"conditioning_lead_time\"]\n\n self.example_input_array = torch.rand(\n self.bs,\n self.in_channels,\n self.history_length,\n self.input_crop,\n self.input_crop,\n )\n\n self.center_crop = torchvision.transforms.CenterCrop(self.input_crop)\n\n # UNet Model\n if self.conditioning_lead_time:\n extra_channels = 1\n classes = self.num_classes\n else:\n extra_channels = 0\n classes = self.forecast_length * self.num_classes\n self.model = UNet(\n in_channels=extra_channels + self.history_length * self.in_channels,\n classes=classes,\n )\n\n # Combine time and channels\n self.stack_time = lambda x: rearrange(x, \"b c t h w -> b (c t) h w\")\n\n # UNET processes 4D inputs / outputs\n if self.conditioning_lead_time:\n # Stack forecast timesteps as different samples in the batch\n self.stack_time_in_batch = lambda x: rearrange(\n x, \"b t c h w -> (b t) c h w\"\n )\n\n self.unstack_time_from_batch = lambda x: rearrange(\n x,\n \"(b t) c h w -> b c t h w\",\n b=self.bs,\n t=self.forecast_length,\n )\n else:\n self.unstack_time_from_channel = lambda x: rearrange(\n x,\n \"b (c t) h w -> b c t h w\",\n t=self.forecast_length,\n c=self.num_classes,\n )\n\n self.save_hyperparameters()\n\n def add_leadtime_input(self, input):\n \"\"\"\n Add forecast lead time to the input as an additional channel and geenrate sample for every lead time\n Input: B,T_HxC,H,W\n Output: B,T_F,T_HxC+1,H,W\n T_H is history timesteps, T_F is forecast timesteps\n \"\"\"\n b, _, h, w = input.shape\n\n # Repeat sample for every forecast timestep\n input = repeat(input, \"b c h w -> b t c h w\", t=self.forecast_length)\n\n # Create conditioning lead time channel\n clt = torch.arange(self.forecast_length, device=self.device)\n clt = repeat(clt, \"t -> b t c h w\", b=b, c=1, h=h, w=w)\n\n # Add conditioning lead time to input\n return torch.cat([input, clt], dim=2)\n\n def transform_input(self, input):\n # TODO - Change to data loader\n input = self.center_crop(input)\n return input\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # Pad input to be divisible by 32\n if self.padding:\n x = torchvision.transforms.Pad(\n self.padding, fill=0, padding_mode=\"constant\"\n )(x)\n\n # Combine timestep as different channels\n # B,C,T,H,W -> B,TxC,H,W (t is history)\n x = self.stack_time(x)\n\n if self.conditioning_lead_time:\n # Add conditioning leadtime with an extra channel and sample for every lead time\n # B,T_HxC,H,W -> B,T_F,T_HxC+1,H,W\n x = self.add_leadtime_input(x)\n # Combine forecast timesteps as different samples in batch\n x = self.stack_time_in_batch(x)\n # Run through UNet\n x = self.model(x)\n # Unstack channels to obtain sequences of timesteps\n # BxT,C,H,W -> B,C,T,H,W (t is forecast)\n x = self.unstack_time_from_batch(x)\n else:\n # Obtain output from UNet with channel for each bucket and forecast timestep\n x = self.model(x)\n # Unstack channels to obtain bucket channels for each timestep in different dimensions\n # B,TxC,H,W -> B,C,T,H,W (t is forecast)\n x = self.unstack_time_from_channel(x)\n\n # Take original center region\n if self.padding:\n x = x[:, :, :, self.padding : -self.padding, self.padding : -self.padding]\n\n # Apply activation function (i.e. softmax if probabilistic loss)\n if self.activation_fn:\n x = self.activation_fn(x)\n return x"
},
{
"identifier": "SWINModule",
"path": "w4c23/models/swin/lightning.py",
"snippet": "class SWINModule(BaseModule):\n def __init__(self, model_params: dict, params: dict):\n super().__init__(model_params, params)\n\n self.input_crop = model_params[\"input_crop\"]\n self.padding = model_params[\"padding\"]\n self.conditioning_lead_time = model_params[\"conditioning_lead_time\"]\n\n self.example_input_array = torch.rand(\n self.bs,\n self.in_channels,\n self.history_length,\n self.input_crop,\n self.input_crop,\n )\n\n self.center_crop = torchvision.transforms.CenterCrop(self.input_crop)\n\n # SWIN Model\n if self.conditioning_lead_time:\n extra_channels = 1\n classes = self.num_classes\n else:\n extra_channels = 0\n classes = self.forecast_length * self.num_classes\n self.model = SwinTransformer(\n input_size=self.input_crop,\n input_channels=extra_channels + self.history_length * self.in_channels,\n num_classes=classes,\n patch_size=[4, 4],\n embed_dim=128,\n depths=[2, 2, 18, 2],\n num_heads=[4, 8, 16, 32], # [3, 6, 12, 24],\n window_size=[7, 7],\n stochastic_depth_prob=0.0,\n )\n\n # Combine time and channels\n self.stack_time = lambda x: rearrange(x, \"b c t h w -> b (c t) h w\")\n\n # SWIN processes 4D inputs / outputs\n if self.conditioning_lead_time:\n # Stack forecast timesteps as different samples in the batch\n self.stack_time_in_batch = lambda x: rearrange(\n x, \"b t c h w -> (b t) c h w\"\n )\n\n self.unstack_time_from_batch = lambda x: rearrange(\n x,\n \"(b t) c h w -> b c t h w\",\n b=self.bs,\n t=self.forecast_length,\n )\n else:\n self.unstack_time_from_channel = lambda x: rearrange(\n x,\n \"b (c t) h w -> b c t h w\",\n t=self.forecast_length,\n c=self.num_classes,\n )\n\n self.save_hyperparameters()\n\n def add_leadtime_input(self, input):\n \"\"\"\n Add forecast lead time to the input as an additional channel and geenrate sample for every lead time\n Input: B,T_HxC,H,W\n Output: B,T_F,T_HxC+1,H,W\n T_H is history timesteps, T_F is forecast timesteps\n \"\"\"\n b, _, h, w = input.shape\n\n # Repeat sample for every forecast timestep\n input = repeat(input, \"b c h w -> b t c h w\", t=self.forecast_length)\n\n # Create conditioning lead time channel\n clt = torch.arange(self.forecast_length, device=self.device)\n clt = repeat(clt, \"t -> b t c h w\", b=b, c=1, h=h, w=w)\n\n # Add conditioning lead time to input\n return torch.cat([input, clt], dim=2)\n\n def transform_input(self, input):\n # TODO - Change to data loader\n input = self.center_crop(input)\n return input\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # Pad input to be divisible by 32\n if self.padding:\n x = torchvision.transforms.Pad(\n self.padding, fill=0, padding_mode=\"constant\"\n )(x)\n\n # Combine timestep as different channels\n # B,C,T,H,W -> B,TxC,H,W (t is history)\n x = self.stack_time(x)\n\n if self.conditioning_lead_time:\n # Add conditioning leadtime with an extra channel and sample for every lead time\n # B,T_HxC,H,W -> B,T_F,T_HxC+1,H,W\n x = self.add_leadtime_input(x)\n # Combine forecast timesteps as different samples in batch\n x = self.stack_time_in_batch(x)\n # Run through UNet\n x = self.model(x)\n # Unstack channels to obtain sequences of timesteps\n # BxT,C,H,W -> B,C,T,H,W (t is forecast)\n x = self.unstack_time_from_batch(x)\n else:\n # Obtain output from UNet with channel for each bucket and forecast timestep\n x = self.model(x)\n # Unstack channels to obtain bucket channels for each timestep in different dimensions\n # B,TxC,H,W -> B,C,T,H,W (t is forecast)\n x = self.unstack_time_from_channel(x)\n\n # Take original center region\n if self.padding:\n x = x[:, :, :, self.padding : -self.padding, self.padding : -self.padding]\n\n # Apply activation function (i.e. softmax if probabilistic loss)\n if self.activation_fn:\n x = self.activation_fn(x)\n\n # print(x[:, 0, :, :, :].sum())\n return x"
},
{
"identifier": "LogMetrics",
"path": "w4c23/callbacks/log_metrics.py",
"snippet": "class LogMetrics(pl.Callback):\n def __init__(self, num_leadtimes, probabilistic, buckets, logging):\n super().__init__()\n self.num_leadtimes = num_leadtimes\n self.probabilistic = probabilistic\n\n if buckets != \"none\":\n self.buckets = BUCKET_CONSTANTS[buckets]\n else:\n self.buckets = None\n\n self.logging = logging\n self.thresholds = [0.2, 1, 5, 10, 15]\n\n # # Code for checking if a metric can be optimized\n # check_forward_full_state_property(\n # metrics.MeanSquaredError,\n # input_args={\n # \"prediction\": torch.Tensor([0.5, 2.5]),\n # \"label\": torch.Tensor([1.0, 2.0]),\n # \"mask\": torch.zeros([2], dtype=bool),\n # },\n # )\n\n def _threshold_str(self, threshold):\n \"\"\"Remove .0 and change . by -\"\"\"\n return f\"{threshold:g}\".replace(\".\", \"-\")\n\n def setup(self, trainer, pl_module, stage):\n # Setup scalar metrics\n scalar_metrics = {}\n scalar_metrics[\"mse\"] = metrics.MeanSquaredError()\n scalar_metrics[\"mae\"] = metrics.MeanAverageError()\n\n for threshold in self.thresholds:\n csi = metrics.CriticalSuccessIndex(threshold=threshold)\n scalar_metrics[f\"csi_{self._threshold_str(threshold)}\"] = csi\n scalar_metrics[\"avg_csi\"] = metrics.AverageCriticalSuccessIndex(\n thresholds=self.thresholds\n )\n\n if self.probabilistic:\n scalar_metrics[\"crps\"] = metrics.ContinuousRankedProbabilityScore(\n self.buckets\n )\n\n # Create metric collections and put metrics on module to automatically place on correct device\n val_scalar_metrics = torchmetrics.MetricCollection(scalar_metrics)\n pl_module.val_metrics = val_scalar_metrics.clone(prefix=\"val/\")\n\n # Lead time metrics\n lead_time_metrics = {}\n lead_time_metrics[f\"mse\"] = metrics.MeanSquaredError(\n num_leadtimes=self.num_leadtimes\n )\n for threshold in self.thresholds:\n csi = metrics.CriticalSuccessIndex(\n threshold=threshold, num_leadtimes=self.num_leadtimes\n )\n lead_time_metrics[f\"csi_{self._threshold_str(threshold)}\"] = csi\n lead_time_metrics[\"avg_csi\"] = metrics.AverageCriticalSuccessIndex(\n thresholds=self.thresholds, num_leadtimes=self.num_leadtimes\n )\n pl_module.lead_time_metrics = torchmetrics.MetricCollection(lead_time_metrics)\n\n def on_validation_batch_end(\n self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0\n ):\n \"\"\"Called after each validation batch with scalar and lead time metrics\"\"\"\n _, label, metadata = batch\n pl_module.val_metrics(outputs, label, metadata[\"target\"][\"mask\"])\n pl_module.lead_time_metrics(outputs, label, metadata[\"target\"][\"mask\"])\n\n def on_validation_epoch_end(self, trainer, pl_module):\n # Log validation scalar metrics\n pl_module.log_dict(\n pl_module.val_metrics, on_step=False, on_epoch=True, sync_dist=True\n )\n\n # Compute and log lead time metrics\n lead_time_metrics = pl_module.lead_time_metrics.compute()\n lead_time_metrics_dict = {}\n wandb_data = []\n for metric_name, arr in lead_time_metrics.items():\n # Add to logging dictionary\n for leadtime, value in enumerate(arr):\n lead_time_metrics_dict[f\"val_time/{metric_name}_{leadtime+1}\"] = value\n # Save to file (tensorboard)\n if self.logging == \"tensorboard\":\n file_path = os.path.join(\n pl_module.logger.log_dir, f\"val_lead_time_{metric_name}.pt\"\n )\n torch.save(arr.cpu(), file_path)\n # Generate table for wandb\n elif self.logging == \"wandb\":\n columns = [\"metric\"] + [f\"t_{i+1}\" for i in range(len(arr))]\n wandb_data.append([metric_name] + arr.tolist())\n\n # Save table in wandb\n if self.logging == \"wandb\":\n pl_module.logger.log_table(\n key=\"leadtimes\", columns=columns, data=wandb_data\n )\n\n # Save lead time metrics over time\n pl_module.log_dict(\n lead_time_metrics_dict, on_step=False, on_epoch=True, sync_dist=True\n )\n pl_module.lead_time_metrics.reset()"
},
{
"identifier": "ImageLogger",
"path": "w4c23/callbacks/log_images.py",
"snippet": "class ImageLogger(pl.Callback):\n def __init__(self, logging):\n super().__init__()\n self.logging = logging\n self.step = 0\n\n def on_validation_batch_end(\n self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx=0\n ):\n if batch_idx != 0:\n return\n\n summary_writer = pl_module.logger.experiment\n\n _, label, metadata = batch\n batch_mask = metadata[\"target\"][\"mask\"].cpu()\n batch_labels = label.cpu()\n batch_preds = outputs.intensity.cpu()\n batch_size = batch_preds.size(0)\n\n for i in range(batch_size):\n figure = self.plotSampleComparison(batch_labels, batch_preds, batch_mask, i)\n if self.logging == \"tensorboard\":\n summary_writer.add_figure(f\"val_examples/{i}\", figure, self.step)\n else:\n summary_writer.log({f\"val_examples/{i}\": figure})\n\n self.step += 1\n\n def plotSampleComparison(self, target, prediction, mask, sample_index):\n # Mask out values\n prediction[mask] = np.nan\n target[mask] = np.nan\n\n forecast_length = target.shape[2]\n fig, axes = plt.subplots(nrows=2, ncols=forecast_length, figsize=(20, 3))\n\n # Add color bar\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.82, 0.17, 0.02, 0.65])\n\n # Plot labels\n for t in range(forecast_length):\n target_sample = target[sample_index, 0, t]\n plot_precip_field(\n target_sample, colorbar=t == 0, ax=axes[0][t], cax=cbar_ax\n )\n\n # Plot predictions\n for t in range(forecast_length):\n pred_sample = prediction[sample_index, 0, t]\n plot_precip_field(pred_sample, colorbar=False, ax=axes[1][t])\n return fig"
},
{
"identifier": "get_cuda_memory_usage",
"path": "w4c23/utils/data_utils.py",
"snippet": "def get_cuda_memory_usage(gpus):\n \"\"\"Get the GPU memory usage\n\n Args:\n gpus (list): list of GPUs\n \"\"\"\n for gpu in gpus:\n r = torch.cuda.memory_reserved(gpu)\n a = torch.cuda.memory_allocated(gpu)\n f = r - a # free inside reserved\n print(\"GPU\", gpu, \"CUDA memory reserved:\", r, \"allocated:\", a, \"free:\", f)"
},
{
"identifier": "tensor_to_submission_file",
"path": "w4c23/utils/data_utils.py",
"snippet": "def tensor_to_submission_file(predictions, predict_params):\n \"\"\"saves prediction tesnor to submission .h5 file\n\n Args:\n predictions (numpy array): data cube of predictions\n predict_params (dict): dictionary of parameters for prediction\n \"\"\"\n\n path = os.path.join(\n predict_params[\"submission_out_dir\"], str(predict_params[\"year_to_predict\"])\n )\n if not os.path.exists(path):\n os.makedirs(path)\n\n submission_file_name = predict_params[\"region_to_predict\"] + \".pred.h5\"\n submission_path = os.path.join(path, submission_file_name)\n h5f = h5py.File(submission_path, \"w\")\n h5f.create_dataset(\"submission\", data=predictions.squeeze())\n h5f.close()"
},
{
"identifier": "load_config",
"path": "w4c23/utils/config.py",
"snippet": "def load_config(config_path):\n \"\"\"Load confgiuration file\n\n Args:\n config_path (String): path to configuration file\n\n Returns:\n dict: configuration file\n \"\"\"\n with open(config_path) as file:\n config = yaml.load(file, Loader)\n return config"
},
{
"identifier": "RainData",
"path": "w4c23/utils/w4c_dataloader.py",
"snippet": "class RainData(Dataset):\n def __init__(\n self,\n data_split,\n project_root=\"\",\n data_root=\"\",\n input_product=\"REFL-BT\",\n compute_seq=True,\n output_product=\"RATE\",\n sat_bands=[],\n preprocess_OPERA=None,\n size_target_center=None,\n full_opera_context=None,\n preprocess_HRIT=None,\n path_to_sample_ids=\"\",\n len_seq_in=4,\n len_seq_predict=32,\n regions=[\"boxi_0015\"],\n regions_def={},\n generate_samples=False,\n latlon_path=\"\",\n altitudes_path=\"\",\n splits_path=None,\n swap_time_ch=False,\n years=None,\n from_raw=True, # TODO - Check and update ZARR if needed\n static_data=False,\n max_samples=None,\n stats_path=None,\n **kwargs,\n ):\n start = timer()\n # Data Dimensions\n self.len_seq_in = len_seq_in\n self.len_seq_predict = len_seq_predict\n self.channel_dim = 1 # where to concat channels in structure\n\n # type of data & processing variables\n self.sat_bands = sat_bands\n self.regions = regions\n self.input_product = input_product\n self.output_product = output_product\n self.preprocess_target = preprocess_OPERA\n self.size_target_center = size_target_center\n self.full_opera_context = full_opera_context\n self.preprocess_input = preprocess_HRIT\n self.path_to_sample_ids = path_to_sample_ids\n self.regions_def = regions_def\n self.generate_samples = generate_samples\n self.path_to_sample_ids = path_to_sample_ids\n self.swap_time_ch = swap_time_ch\n self.years = years\n self.static_data = static_data\n\n # data splits to load (training/validation/test)\n self.root = project_root\n self.data_root = data_root\n self.data_split = data_split\n\n self.sample_method = importance_sampling\n\n self.zarr_file = None\n zarr_path = (\n f\"../../../../media/data/weather4cast2023/{self.data_split}_data_sub.zarr\"\n )\n if not from_raw and os.path.exists(zarr_path):\n self.zarr_file = zarr.open_group(zarr_path, \"r\")\n\n else:\n self.splits_df = load_timestamps(splits_path)\n # prepare all elements to load - sample idx will use the object 'self.idx'\n self.idxs = load_sample_ids(\n self.data_split,\n self.splits_df,\n self.len_seq_in,\n self.len_seq_predict,\n self.regions,\n self.generate_samples,\n self.years,\n self.path_to_sample_ids,\n )\n\n # LOAD DATASET\n self.in_ds = load_dataset(\n self.data_root, self.data_split, self.regions, years, self.input_product\n )\n if self.data_split not in [\"test\", \"heldout\"]:\n self.out_ds = load_dataset(\n self.data_root,\n self.data_split,\n self.regions,\n years,\n self.output_product,\n )\n else:\n self.out_ds = []\n\n # Load static data if any\n if self.static_data:\n self.static_ds = load_static_dataset(\n self.data_root,\n self.regions,\n )\n\n # Sample samples based on importance sampling\n if max_samples[self.data_split] is not None:\n self.idxs = self.sample_method(\n self.idxs,\n max_samples[self.data_split],\n stats_path[self.data_split],\n self.out_ds,\n self.preprocess_target,\n )\n\n def __len__(self):\n \"\"\"total number of samples (sequences of in:4-out:1 in our case) to train\"\"\"\n # print(len(self.idxs), \"-------------------\", self.data_split)\n if self.zarr_file:\n return len(self.zarr_file[self.output_product])\n return len(self.idxs)\n\n def load_in(self, in_seq, seq_r, metadata, loaded_input=False):\n in0 = time.time()\n input_data, in_masks = get_sequence(\n in_seq,\n self.data_root,\n self.data_split,\n seq_r,\n self.input_product,\n self.sat_bands,\n self.preprocess_input,\n self.swap_time_ch,\n self.in_ds,\n )\n\n if VERBOSE:\n print(np.shape(input_data), time.time() - in0, \"in sequence time\")\n return input_data, metadata\n\n def load_out(self, out_seq, seq_r, metadata):\n t1 = time.time()\n # GROUND TRUTH (OUTPUT)\n if self.data_split not in [\"test\", \"heldout\"]:\n output_data, out_masks = get_sequence(\n out_seq,\n self.data_root,\n self.data_split,\n seq_r,\n self.output_product,\n [],\n self.preprocess_target,\n self.swap_time_ch,\n self.out_ds,\n )\n\n # collapse time to channels\n metadata[\"target\"][\"mask\"] = out_masks\n else: # Just return [] if its test/heldout data\n output_data = np.array([])\n if VERBOSE:\n print(time.time() - t1, \"out sequence\")\n return output_data, metadata\n\n def load_in_out(self, in_seq, out_seq=None, seq_r=None):\n metadata = {\n \"input\": {\"mask\": [], \"timestamps\": in_seq},\n \"target\": {\"mask\": [], \"timestamps\": out_seq},\n }\n\n t0 = time.time()\n input_data, metadata = self.load_in(in_seq, seq_r, metadata)\n output_data, metadata = self.load_out(out_seq, seq_r, metadata)\n\n # Add static data as metadata\n if self.static_data:\n metadata[\"input\"][\"topo\"] = self.static_ds[seq_r][\"HRIT\"][\"topo\"]\n metadata[\"input\"][\"lat-long\"] = self.static_ds[seq_r][\"HRIT\"][\"lat-long\"]\n metadata[\"target\"][\"topo\"] = self.static_ds[seq_r][\"OPERA\"][\"topo\"]\n metadata[\"target\"][\"lat-long\"] = self.static_ds[seq_r][\"OPERA\"][\"lat-long\"]\n\n if VERBOSE:\n print(time.time() - t0, \"seconds\")\n return input_data, output_data, metadata\n\n def __getitem__(self, idx):\n if self.zarr_file:\n radar = torch.tensor(self.zarr_file[self.output_product][idx])\n sat = torch.tensor(self.zarr_file[self.input_product][idx])\n mask = torch.tensor(self.zarr_file.MASK[idx])\n return sat, radar, {\"target\": {\"mask\": mask}}\n\n \"\"\"load 1 sequence (1 sample)\"\"\"\n in_seq = self.idxs[idx][0]\n out_seq = self.idxs[idx][1]\n seq_r = self.idxs[idx][2]\n # # print(\"=== DEBUG in_seq: \",in_seq, file=sys.stderr);\n # print(\"=== DEBUG in_seq: \",in_seq);\n return self.load_in_out(in_seq, out_seq, seq_r)"
}
] | import os
import torch
import datetime
import argparse
import boto3
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import DataLoader
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from w4c23.models.unet import UNetModule
from w4c23.models.swin import SWINModule
from w4c23.callbacks.log_metrics import LogMetrics
from w4c23.callbacks.log_images import ImageLogger
from w4c23.utils.data_utils import get_cuda_memory_usage, tensor_to_submission_file
from w4c23.utils.config import load_config
from w4c23.utils.w4c_dataloader import RainData | 8,334 | ]
if params["train"]["early_stopping"]:
callback_funcs.append(
EarlyStopping(
monitor="val/loss", patience=params["train"]["patience"], mode="min"
)
)
# Add metrics and image logging
if params["experiment"]["logging"] != "none":
callback_funcs.append(
LogMetrics(
num_leadtimes=params["model"]["forecast_length"],
probabilistic=params["train"]["probabilistic"],
buckets=params["model"]["buckets"],
logging=params["experiment"]["logging"],
)
)
callback_funcs.append(ImageLogger(params["experiment"]["logging"]))
# Training accelerators
accelerator = None
if gpus[0] == -1:
gpus = None
else:
accelerator = "cuda"
print(f"====== process started on the following GPUs: {gpus} ======")
trainer = pl.Trainer(
devices=gpus,
max_epochs=max_epochs,
deterministic=params["model"]["deterministic"],
logger=logger,
callbacks=callback_funcs,
precision=params["experiment"]["precision"],
gradient_clip_val=params["model"]["gradient_clip_val"],
gradient_clip_algorithm=params["model"]["gradient_clip_algorithm"],
accelerator=accelerator,
strategy="ddp_find_unused_parameters_false",
profiler="simple",
log_every_n_steps=5,
accumulate_grad_batches=params["train"]["accumulate_grad_batches"]
if "accumulate_grad_batches" in params["train"].keys()
else 1,
)
return trainer
def do_predict(trainer, model, predict_params, test_data):
scores = trainer.predict(model, dataloaders=test_data)
scores = torch.concat(scores)
tensor_to_submission_file(scores, predict_params)
def do_val(trainer, model, val_data):
scores = trainer.validate(model, dataloaders=val_data)
print(scores[0])
def train(params, gpus, mode, checkpoint_path, model):
"""Main training/evaluation method."""
# Remove extra regions/years in predict mode and disable logging
if mode == "predict":
params["dataset"]["regions"] = [params["predict"]["region_to_predict"]]
params["dataset"]["years"] = [params["predict"]["year_to_predict"]]
params["experiment"]["logging"] = "none"
# ------------
# Model & data
# ------------
get_cuda_memory_usage(gpus)
data = DataModule(params["dataset"], params["train"], mode)
model = load_model(model, params, checkpoint_path)
# ------------
# Trainer
# ------------
trainer = get_trainer(gpus, params)
# ------------
# Train & final validation
# ------------
if mode == "train":
print("------------------")
print("--- TRAIN MODE ---")
print("------------------")
trainer.fit(model, data)
# ------------
# Validation
# ------------
if mode == "val":
print("---------------------")
print("--- VALIDATE MODE ---")
print("---------------------")
do_val(trainer, model, data.val_dataloader())
# ------------
# Prediction
# ------------
if mode == "predict" or mode == "heldout":
print("--------------------")
print("--- PREDICT MODE ---")
print("--------------------")
print(
"REGIONS: ",
params["dataset"]["regions"],
params["predict"]["region_to_predict"],
)
if params["predict"]["region_to_predict"] not in params["dataset"]["regions"]:
print(
'EXITING... "regions" and "regions to predict" must indicate the same region name in your config file.'
)
else:
do_predict(trainer, model, params["predict"], data.test_dataloader())
def update_params_based_on_args(options):
config_p = os.path.join("configurations", options.config_path)
| # Weather4cast 2023 Starter Kit
#
# This Starter Kit builds on and extends the Weather4cast 2022 Starter Kit,
# the original license for which is included below.
#
# In line with the provisions of this license, all changes and additional
# code are also released unde the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# Weather4cast 2022 Starter Kit
#
# Copyright (C) 2022
# Institute of Advanced Research in Artificial Intelligence (IARAI)
# This file is part of the Weather4cast 2022 Starter Kit.
#
# The Weather4cast 2022 Starter Kit is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# The Weather4cast 2022 Starter Kit is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Aleksandra Gruca, Pedro Herruzo, David Kreil, Stephen Moran
pl.seed_everything(42, workers=True)
class DataModule(pl.LightningDataModule):
"""Class to handle training/validation/predict/heldout splits."""
def __init__(self, params, training_params, mode):
super().__init__()
self.params = params
self.training_params = training_params
if mode in ["train"]:
print("Loading TRAINING/VALIDATION dataset")
self.train_ds = RainData("training", **self.params)
self.val_ds = RainData("validation", **self.params)
print(f"Training dataset size: {len(self.train_ds)}")
if mode in ["val"]:
print("Loading VALIDATION dataset")
self.val_ds = RainData("validation", **self.params)
if mode in ["predict"]:
print("Loading PREDICTION/TEST dataset")
self.test_ds = RainData("test", **self.params)
if mode in ["heldout"]:
print("Loading HELD-OUT dataset")
self.test_ds = RainData("heldout", **self.params)
def __load_dataloader(self, dataset, shuffle=True, pin=True):
dl = DataLoader(
dataset,
batch_size=self.training_params["batch_size"],
num_workers=self.training_params["n_workers"],
shuffle=shuffle,
pin_memory=pin,
)
return dl
def train_dataloader(self):
return self.__load_dataloader(self.train_ds, shuffle=True, pin=True)
def val_dataloader(self):
return self.__load_dataloader(self.val_ds, shuffle=False, pin=True)
def test_dataloader(self):
return self.__load_dataloader(self.test_ds, shuffle=False, pin=True)
def load_model(Model, params, checkpoint_path=""):
"""Load a model from a checkpoint or from scratch if checkpoint_path=''"""
p = {**params["experiment"], **params["dataset"], **params["train"]}
if checkpoint_path == "":
print("-> Modelling from scratch! (no checkpoint loaded)")
model = Model(params["model"], p)
else:
print(f"-> Loading model checkpoint: {checkpoint_path}")
model = Model.load_from_checkpoint(
checkpoint_path, model_params=params["model"], params=p
)
return model
def get_trainer(gpus, params):
date_time = datetime.datetime.now().strftime("%m%d-%H:%M")
version = params["experiment"]["name"]
version = version + "_" + date_time
max_epochs = params["train"]["max_epochs"]
print("Training for", max_epochs, "epochs")
# Set logger for wandb or tensorboard
if params["experiment"]["logging"] == "wandb":
# Get wandb key (this is only required for wandb logging in aws)
if params["experiment"]["aws"]:
client = boto3.client("ssm", region_name="eu-central-1")
try:
os.environ["WANDB_API_KEY"] = client.get_parameter(
Name="salami-training-w4c23-wandb-api-key", WithDecryption=True
)["Parameter"]["Value"]
print("WandB should be running in online mode")
except Exception as e: # pylint: disable=bare-except
print(e)
print("WandB could not get an API key and is running in offline mode")
os.environ["WANDB_MODE"] = "offline"
logger = pl_loggers.WandbLogger(
project="w4c23",
name=params["experiment"]["sub_folder"] + "_" + version,
log_model="all",
save_dir=params["experiment"]["experiment_folder"],
)
elif params["experiment"]["logging"] == "tensorboard":
logger = pl_loggers.TensorBoardLogger(
save_dir=params["experiment"]["experiment_folder"],
name=params["experiment"]["sub_folder"],
version=version,
log_graph=True,
default_hp_metric=False,
)
else:
logger = False
# Callbacks
# Model saving and early stopping
callback_funcs = [
ModelCheckpoint(
monitor="val/loss",
save_top_k=2,
save_last=True,
filename="epoch={epoch}-step={step}-val_loss={val/loss:.6f}",
auto_insert_metric_name=False,
),
]
if params["train"]["early_stopping"]:
callback_funcs.append(
EarlyStopping(
monitor="val/loss", patience=params["train"]["patience"], mode="min"
)
)
# Add metrics and image logging
if params["experiment"]["logging"] != "none":
callback_funcs.append(
LogMetrics(
num_leadtimes=params["model"]["forecast_length"],
probabilistic=params["train"]["probabilistic"],
buckets=params["model"]["buckets"],
logging=params["experiment"]["logging"],
)
)
callback_funcs.append(ImageLogger(params["experiment"]["logging"]))
# Training accelerators
accelerator = None
if gpus[0] == -1:
gpus = None
else:
accelerator = "cuda"
print(f"====== process started on the following GPUs: {gpus} ======")
trainer = pl.Trainer(
devices=gpus,
max_epochs=max_epochs,
deterministic=params["model"]["deterministic"],
logger=logger,
callbacks=callback_funcs,
precision=params["experiment"]["precision"],
gradient_clip_val=params["model"]["gradient_clip_val"],
gradient_clip_algorithm=params["model"]["gradient_clip_algorithm"],
accelerator=accelerator,
strategy="ddp_find_unused_parameters_false",
profiler="simple",
log_every_n_steps=5,
accumulate_grad_batches=params["train"]["accumulate_grad_batches"]
if "accumulate_grad_batches" in params["train"].keys()
else 1,
)
return trainer
def do_predict(trainer, model, predict_params, test_data):
scores = trainer.predict(model, dataloaders=test_data)
scores = torch.concat(scores)
tensor_to_submission_file(scores, predict_params)
def do_val(trainer, model, val_data):
scores = trainer.validate(model, dataloaders=val_data)
print(scores[0])
def train(params, gpus, mode, checkpoint_path, model):
"""Main training/evaluation method."""
# Remove extra regions/years in predict mode and disable logging
if mode == "predict":
params["dataset"]["regions"] = [params["predict"]["region_to_predict"]]
params["dataset"]["years"] = [params["predict"]["year_to_predict"]]
params["experiment"]["logging"] = "none"
# ------------
# Model & data
# ------------
get_cuda_memory_usage(gpus)
data = DataModule(params["dataset"], params["train"], mode)
model = load_model(model, params, checkpoint_path)
# ------------
# Trainer
# ------------
trainer = get_trainer(gpus, params)
# ------------
# Train & final validation
# ------------
if mode == "train":
print("------------------")
print("--- TRAIN MODE ---")
print("------------------")
trainer.fit(model, data)
# ------------
# Validation
# ------------
if mode == "val":
print("---------------------")
print("--- VALIDATE MODE ---")
print("---------------------")
do_val(trainer, model, data.val_dataloader())
# ------------
# Prediction
# ------------
if mode == "predict" or mode == "heldout":
print("--------------------")
print("--- PREDICT MODE ---")
print("--------------------")
print(
"REGIONS: ",
params["dataset"]["regions"],
params["predict"]["region_to_predict"],
)
if params["predict"]["region_to_predict"] not in params["dataset"]["regions"]:
print(
'EXITING... "regions" and "regions to predict" must indicate the same region name in your config file.'
)
else:
do_predict(trainer, model, params["predict"], data.test_dataloader())
def update_params_based_on_args(options):
config_p = os.path.join("configurations", options.config_path) | params = load_config(config_p) | 6 | 2023-11-29 11:22:50+00:00 | 12k |
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems | TestCasesMicrogrids/UnitCommitmentHybridACDC.py | [
{
"identifier": "PBIC_A2D",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PBIC_A2D = QUG + 1"
},
{
"identifier": "PBIC_D2A",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PBIC_D2A = PBIC_A2D + 1"
},
{
"identifier": "PESS_CH0",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PESS_CH0 = QBIC + 1"
},
{
"identifier": "PESS_DC0",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PESS_DC0 = PESS_CH0 + NESS"
},
{
"identifier": "PG0",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PG0 = 0"
},
{
"identifier": "PPV0",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PPV0 = EESS0 + 1"
},
{
"identifier": "PUG",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PUG = QG0 + NG"
},
{
"identifier": "NX_MG",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "NX_MG = PDC + 1"
},
{
"identifier": "QBIC",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "QBIC = PBIC_D2A + 1"
},
{
"identifier": "QG0",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "QG0 = PG0 + NG"
},
{
"identifier": "QUG",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "QUG = PUG + 1"
},
{
"identifier": "NESS",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "NESS = 1"
},
{
"identifier": "NRES",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "NRES = 1"
},
{
"identifier": "EESS0",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "EESS0 = PESS_DC0 + 1"
},
{
"identifier": "PAC",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PAC = PPV0 + 1 # The AC load sheding"
},
{
"identifier": "PDC",
"path": "TestCasesMicrogrids/idx_format_hybrid_AC_DC.py",
"snippet": "PDC = PAC + 1 # The DC load sheding"
},
{
"identifier": "BendersDecomposition",
"path": "StochasticOptimization/ccg_benders_decomposition.py",
"snippet": "class BendersDecomposition():\n def __init__(self):\n self.name = \"Benders decomposition using C&CG method\"\n\n def main(self, c=None, A=None, b=None, Aeq=None, beq=None, lb=None, ub=None, vtype=None, ps=None, qs=None, Ws=None,\n hs=None, Ts=None):\n \"\"\"\n The standard input format for Benders decomposition problem\n :param c: Cost parameter for the first stage optimization\n :param A: Inequality constraint matrix for the first stage optimization\n :param b: Inequality constraint parameters for the first stage optimization\n :param Aeq: Equality constraint matrix for the first stage optimization\n :param beq: Equality constraint parameters for the first stage optimization\n :param vtype: The type for the first stage optimization problems\n :param ps: Probability for the second stage optimization problem under scenario s\n :param qs: Cost parameters for the second stage optimization problem, a list of arrays\n :param Ws: Equality constraint parameters for the second stage optimization, a list of arrays\n :param hs: Equality constraint parameters for the second stage optimization\n :param Ts: Equality constraint matrix between the first stage and the second stage optimization\n :return: The obtained solution for the first stage optimization\n \"\"\"\n # 1) Try to solve the first stage optimization problem\n model_first_stage = {\"c\": c,\n \"Aeq\": Aeq,\n \"beq\": beq,\n \"A\": A,\n \"b\": b,\n \"lb\": lb,\n \"ub\": ub,\n \"vtypes\": vtype}\n\n sol_first_stage = BendersDecomposition.master_problem(self, model_first_stage)\n\n if sol_first_stage[\"status\"] == 0:\n print(\"The master problem is infeasible!\")\n return\n else:\n print(\"The master problem is feasible, the process continutes!\")\n\n self.N = len(ps) # The number of second stage decision variables\n\n self.nx_second_stage = Ws[0].shape[1]\n self.nx_first_stage = lb.shape[0]\n M = 10 ** 12\n model_second_stage = [0] * self.N\n\n for i in range(self.N):\n model_second_stage[i] = {\"c\": qs[i],\n \"A\": Ws[i],\n \"hs\": hs[i],\n \"Ts\": Ts[i],\n \"lb\": None,\n \"ub\": None}\n # 2) Reformulate the first stage optimization problem\n # 2.1) Estimate the boundary of the first stage optimization problem.\n # 2.2) Add additional variables to the first stage optimization problem\n # Using the multiple cuts version\n model_master = deepcopy(model_first_stage)\n model_master[\"c\"] = vstack([model_first_stage[\"c\"], ps])\n if model_master[\"Aeq\"] is not None:\n model_master[\"Aeq\"] = hstack([model_first_stage[\"Aeq\"], zeros((model_first_stage[\"Aeq\"].shape[0], self.N))])\n if model_master[\"A\"] is not None:\n model_master[\"A\"] = hstack([model_first_stage[\"A\"], zeros((model_first_stage[\"A\"].shape[0], self.N))])\n\n if model_master[\"lb\"] is not None:\n model_master[\"lb\"] = vstack([model_first_stage[\"lb\"], -ones((self.N, 1)) * M])\n else:\n model_master[\"lb\"] = -ones((self.N + self.nx_first_stage, 1)) * M\n\n if model_master[\"ub\"] is not None:\n model_master[\"ub\"] = vstack([model_first_stage[\"ub\"], ones((self.N, 1)) * M])\n else:\n model_master[\"ub\"] = ones((self.N + self.nx_first_stage, 1)) * M\n\n if model_master[\"vtypes\"] is not None:\n model_master[\"vtypes\"] = model_first_stage[\"vtypes\"] + [\"c\"] * self.N\n else:\n model_master[\"vtypes\"] = [\"c\"] * (self.nx_first_stage + self.N)\n\n # 3) Reformulate the second stage optimization problem\n # 3.1) Formulate the dual problem for each problem under dual problems\n # The dual problem is solved\n x_first_stage = array(sol_first_stage[\"x\"][0:self.nx_first_stage]).reshape(self.nx_first_stage, 1)\n model_second_stage = self.sub_problems_update(model_second_stage, x_first_stage)\n n_processors = os.cpu_count()\n with Pool(n_processors) as p:\n sol_second_stage = list(p.map(sub_problem_dual, model_second_stage))\n A_cuts = zeros((self.N, self.nx_first_stage + self.N))\n b_cuts = zeros((self.N, 1))\n for i in range(self.N):\n # Solve the dual problem\n A_cuts[i, 0:self.nx_first_stage] = -transpose(\n transpose(model_second_stage[i][\"Ts\"]).dot(sol_second_stage[i][\"x\"]))\n b_cuts[i, 0] = -transpose(sol_second_stage[i][\"x\"]).dot(model_second_stage[i][\"hs\"])\n if sol_second_stage[i][\"status\"] == 1: # if the primal problem is feasible, add feasible cuts\n A_cuts[i, self.nx_first_stage + i] = -1\n\n Upper = [inf]\n Lower = sol_first_stage[\"objvalue\"]\n Gap = [self.gap_calculaiton(Upper[0], Lower)]\n eps = 10 ** -6\n iter_max = 10000\n iter = 0\n # 4) Begin the iteration\n index = arange(0, self.N, 1)\n while iter < iter_max:\n # Update the master problem\n if model_master[\"A\"] is not None:\n model_master[\"A\"] = vstack([model_master[\"A\"], A_cuts])\n else:\n model_master[\"A\"] = A_cuts\n if model_master[\"b\"] is not None:\n model_master[\"b\"] = vstack([model_master[\"b\"], b_cuts])\n else:\n model_master[\"b\"] = b_cuts\n ## add primal cuts to the master problem\n nx = model_master[\"c\"].shape[0]\n model_master[\"c\"] = vstack([model_master[\"c\"], zeros((self.nx_second_stage, 1))])\n if model_second_stage[index[0]][\"lb\"] is not None:\n model_master[\"lb\"] = vstack([model_master[\"lb\"], model_second_stage[index[0]][\"lb\"]])\n else:\n model_master[\"lb\"] = vstack([model_master[\"lb\"], -M*ones((self.nx_second_stage, 1))])\n if model_second_stage[index[0]][\"ub\"] is not None:\n model_master[\"ub\"] = vstack([model_master[\"ub\"], model_second_stage[index[0]][\"ub\"]])\n else:\n model_master[\"ub\"] = vstack([model_master[\"ub\"], M*ones((self.nx_second_stage, 1))])\n model_master[\"vtypes\"] += [\"c\"] * self.nx_second_stage\n # add the inequality constraints\n nx_temp = model_master[\"c\"].shape[0]\n\n nineq = model_second_stage[index[0]][\"A\"].shape[0]\n\n A_temp = zeros((nineq, nx_temp))\n A_temp[:, 0:self.nx_first_stage] = -model_second_stage[index[0]][\"Ts\"]\n A_temp[:, nx:] = -model_second_stage[index[0]][\"A\"]\n\n if model_master[\"A\"] is not None:\n model_master[\"A\"] = hstack(\n [model_master[\"A\"], zeros((model_master[\"A\"].shape[0], self.nx_second_stage))])\n model_master[\"A\"] = vstack([model_master[\"A\"], A_temp])\n model_master[\"b\"] = vstack([model_master[\"b\"], -model_second_stage[index[0]][\"hs\"]])\n else:\n model_master[\"A\"] = A_temp\n model_master[\"b\"] = -model_second_stage[index[0]][\"hs\"]\n # add the inequality constraints\n # model_master[\"A\"] = hstack([model_master[\"A\"], zeros((model_master[\"A\"].shape[0], self.nx_second_stage))])\n primal_cuts = zeros((1, nx_temp))\n primal_cuts[0, self.nx_first_stage + index[0]] = -1\n primal_cuts[0, nx:] = model_second_stage[index[0]][\"c\"].reshape(self.nx_second_stage)\n model_master[\"A\"] = vstack([model_master[\"A\"], primal_cuts])\n model_master[\"b\"] = vstack([model_master[\"b\"], zeros((1, 1))])\n try:\n index = delete(index, 0)\n except:\n print(\"All scenarios have been added to the master problem!\")\n pass\n\n # solve the master problem\n sol_first_stage = self.master_problem(model_master)\n Lower = sol_first_stage[\"objvalue\"]\n\n # update the second stage solution\n x_first_stage = array(sol_first_stage[\"x\"][0:self.nx_first_stage]).reshape(self.nx_first_stage, 1)\n model_second_stage = self.sub_problems_update(model_second_stage, x_first_stage)\n\n objvalue_second_stage = zeros((self.N, 1))\n\n A_cuts = zeros((self.N, nx_temp))\n b_cuts = zeros((self.N, 1))\n\n with Pool(n_processors) as p:\n sol_second_stage = list(p.map(sub_problem_dual, model_second_stage))\n\n\n for i in range(self.N):\n # Solve the dual problem\n A_cuts[i, 0:self.nx_first_stage] = -transpose(\n transpose(model_second_stage[i][\"Ts\"]).dot(sol_second_stage[i][\"x\"]))\n b_cuts[i, 0] = -transpose(sol_second_stage[i][\"x\"]).dot(model_second_stage[i][\"hs\"])\n\n if sol_second_stage[i][\"status\"] == 1: # if the primal problem is feasible, add feasible cuts\n A_cuts[i, self.nx_first_stage + i] = -1\n objvalue_second_stage[i, 0] = sol_second_stage[i][\"objvalue\"]\n else:\n objvalue_second_stage[i, 0] = inf\n\n Upper = transpose(x_first_stage).dot(model_first_stage[\"c\"]) + transpose(objvalue_second_stage).dot(ps)\n print(\"The upper bound is {0}\".format(Upper[0][0]))\n\n Gap.append(BendersDecomposition.gap_calculaiton(self, Upper[0], Lower))\n print(\"The gap is {0}\".format(Gap[-1][0]))\n print(\"The lower bound is {0}\".format(Lower))\n iter += 1\n\n if Gap[-1][0] < eps:\n break\n\n with Pool(n_processors) as p:\n sol_second_stage = list(p.map(sub_problem, model_second_stage))\n\n # x_first_stage = sol_first_stage[\"x\"][0:self.nx_first_stage]\n #\n # x_second_stage = zeros((self.N, self.nx_second_stage))\n x_second_stage = [0] * self.N\n\n for i in range(self.N):\n x_second_stage[i] = array(sol_second_stage[i][\"x\"])\n\n sol = {\"objvalue\": Upper,\n \"x_first_stage\": x_first_stage,\n \"x_second_stage\": x_second_stage, }\n\n # plt.plot(Gap)\n # plt.show()\n return sol\n\n def master_problem(self, model):\n \"\"\"\n Solve the master problem\n :param model:\n :return:\n \"\"\"\n (x, objvalue, status) = lp(model[\"c\"], Aeq=model[\"Aeq\"], beq=model[\"beq\"], A=model[\"A\"], b=model[\"b\"],\n xmin=model[\"lb\"], xmax=model[\"ub\"], vtypes=model[\"vtypes\"])\n\n sol = {\"x\": x,\n \"objvalue\": objvalue,\n \"status\": status}\n\n return sol\n\n def sub_problems_update(self, model, x):\n \"\"\"\n\n :param model: The second stage models\n :param hs: The equality constraints under each stage\n :param Ts: The coupling constraints between the first stage and second stage constraints\n :return: hs-Ts*x\n \"\"\"\n for i in range(self.N):\n model[i][\"b\"] = model[i][\"hs\"] - model[i][\"Ts\"].dot(x)\n\n return model\n\n def gap_calculaiton(self, upper, lower):\n\n if lower != 0:\n gap = abs((upper - lower) / lower * 100)\n else:\n gap = inf\n\n if gap == inf:\n gap = [inf]\n\n return gap"
}
] | from TestCasesMicrogrids.idx_format_hybrid_AC_DC import PBIC_A2D, PBIC_D2A, PESS_CH0, PESS_DC0, PG0, PPV0, PUG, \
NX_MG, QBIC, QG0, QUG, NESS, NRES, EESS0, PAC, PDC
from numpy import zeros, ones, concatenate, eye, tile, array
from scipy.sparse import vstack, lil_matrix
from Solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as milp
from StochasticOptimization.ccg_benders_decomposition import BendersDecomposition
from random import random
from TestCasesMicrogrids.cases_unit_commitment import micro_grid | 7,294 |
c_ig[i] = mg["DG"][i]["FUEL"] * mg["DG"][i]["PMIN"] * mg["DG"][i]["FUEL_PRICE"]
cg[i] = mg["DG"][i]["FUEL"] * mg["DG"][i]["FUEL_PRICE"]
cfuel[i] = mg["DG"][i]["FUEL_PRICE"]
# Formulate the boundaries
lb = concatenate([tile(concatenate([alpha_l, beta_l, Ig_l, pg_l, rg_l, fuel_l]), T)])
ub = concatenate([tile(concatenate([alpha_u, beta_u, Ig_u, pg_u, rg_u, fuel_u]), T)])
# Objective value
c = concatenate([tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cfuel]), T)])
# Variable types
vtypes = (["b"] * ng * 3 + ["c"] * ng * 3) * T
## Constraint sets
# 1) Pg+Rg<=PguIg
A = lil_matrix((ng * T, nv_first_stage))
b = zeros(ng * T)
for t in range(T):
for j in range(ng):
A[t * ng + j, t * _nv_first_stage + ng * self.PG + j] = 1
A[t * ng + j, t * _nv_first_stage + ng * self.RG + j] = 1
A[t * ng + j, t * _nv_first_stage + ng * self.IG + j] = -pg_u[j]
# 2) Pg-Rg>=IgPgl
A_temp = lil_matrix((ng * T, nv_first_stage))
b_temp = zeros(ng * T)
for t in range(T):
for j in range(ng):
A_temp[t * ng + j, t * _nv_first_stage + ng * self.PG + j] = -1
A_temp[t * ng + j, t * _nv_first_stage + ng * self.RG + j] = 1
A_temp[t * ng + j, t * _nv_first_stage + ng * self.IG + j] = pg_l[j]
A = vstack([A, A_temp])
b = concatenate([b, b_temp])
# 3) Start-up and shut-down constraints of DGs
UP_LIMIT = zeros(ng).astype(int)
DOWN_LIMIT = zeros(ng).astype(int)
for i in range(ng):
UP_LIMIT[i] = T - MIN_UP[i]
DOWN_LIMIT[i] = T - MIN_DOWN[i]
# 3.1) Up limit
A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage))
b_temp = zeros(sum(UP_LIMIT))
for i in range(ng):
for t in range(MIN_UP[i], T):
for k in range(t - MIN_UP[i], t):
A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + ng * self.ALPHA + i] = 1
A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * self.IG + i] = -1
A = vstack([A, A_temp])
b = concatenate([b, b_temp])
# # 3.2) Down limit
A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage))
b_temp = ones(sum(DOWN_LIMIT))
for i in range(ng):
for t in range(MIN_DOWN[i], T):
for k in range(t - MIN_DOWN[i], t):
A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng * self.BETA + i] = 1
A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * self.IG + i] = 1
A = vstack([A, A_temp])
b = concatenate([b, b_temp])
# 4) Status transformation of each unit
Aeq = lil_matrix((T * ng, nv_first_stage))
beq = zeros(T * ng)
for i in range(ng):
for t in range(T):
Aeq[i * T + t, t * _nv_first_stage + ng * self.ALPHA + i] = 1
Aeq[i * T + t, t * _nv_first_stage + ng * self.BETA + i] = -1
Aeq[i * T + t, t * _nv_first_stage + ng * self.IG + i] = -1
if t != 0:
Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * self.IG + i] = 1
else:
beq[i * T + t] = -Ig0[i]
model_first_stage = {"c": c.reshape((nv_first_stage, 1)),
"lb": lb.reshape((nv_first_stage, 1)),
"ub": ub.reshape((nv_first_stage, 1)),
"vtypes": vtypes,
"A": A.tolil(),
"b": b.reshape((len(b), 1)),
"Aeq": Aeq.tolil(),
"beq": beq.reshape((len(beq), 1)), }
# (sol_first_stage, obj_first_stage, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"],
# beq=model_first_stage["beq"],
# A=model_first_stage["A"], b=model_first_stage["b"],
# vtypes=model_first_stage["vtypes"],
# xmax=model_first_stage["ub"], xmin=model_first_stage["lb"])
# sol = self.first_stage_solution_validation(sol_first_stage)
return model_first_stage
def second_stage_problem_formulation(self, mg, u):
"""
Second-stage problem formulation for hybrid AC/DC MGs
:param mg:
:return: model of second stage problem, i.e., real-time scheduling of hybrid AC/DC microgrids
"""
T = self.T
ng = self.ng
ness = self.ness
nres = self.nres
nv_uncertainty = self.nv_uncertainty
nv_first_stage = self.nv_first_stage
self._nv_second_stage = NX_MG
## 1) boundary information and objective function
_nv_second_stage = NX_MG
nv_second_stage = NX_MG * T
self.nv_second_stage = nv_second_stage
lb = zeros(nv_second_stage)
ub = zeros(nv_second_stage)
c = zeros(nv_second_stage)
q = zeros(nv_second_stage)
vtypes = ["c"] * nv_second_stage
for t in range(T):
## 1.1) lower boundary
for i in range(ng):
lb[t * NX_MG + PG0 + i] = 0
lb[t * NX_MG + QG0 + i] = mg["DG"][i]["QMIN"]
lb[t * NX_MG + PUG] = 0
lb[t * NX_MG + QUG] = mg["UG"]["QMIN"]
lb[t * NX_MG + PBIC_D2A] = 0
| """
Unit commitment problem for hybrid AC/DC MGs
@author: Zhao Tianyang
@e-mail: [email protected]
Supporting documents:
Resilient Energy Management for Hybrid AC/DC MGs
"""
class UnitCommitment():
"""
Unit commitment problem for hybrid AC/DC MGs for stochastic optimization
"""
def __init__(self, mg):
self.T = len(mg["PD"]["AC"])
self.ng = len(mg["DG"])
self.NX = NX_MG
self.ness = NESS
self.nres = NRES
self.nv_uncertainty = self.T * 2 + self.T * NRES
def stochastic_optimization(self, mg, ns=4):
model_first_stage = self.first_stage_problem_formulation(mg=mg)
# Formulate the second stage scenarios
us = zeros((ns, self.nv_uncertainty))
ws = ones(ns) / ns
for i in range(ns):
us[i, 0: 2 * self.T] = concatenate([mg["PD"]["AC"], mg["PD"]["DC"]])
for j in range(self.nres):
us[i, (2 + j) * self.T:(3 + j) * self.T] = concatenate([mg["PV"][j]["PROFILE"]])
u_mean = us[0, :]
u_delta = us[0, :] * 0.1
for i in range(ns):
for j in range(self.nv_uncertainty):
us[i, j] += (2 * random() - 1) * u_delta[j]
model_second_stage = [0] * ns
for i in range(ns):
model_second_stage[i] = self.second_stage_problem_formulation(mg=mg, u=us[i, :])
# # Merge the first-stage and second_stage problems
lb = model_first_stage["lb"]
ub = model_first_stage["ub"]
vtypes = model_first_stage["vtypes"]
c = model_first_stage["c"]
if model_first_stage["Aeq"] is not None:
neq = model_first_stage["Aeq"].shape[0]
else:
neq = 0
if model_first_stage["A"] is not None:
nineq = model_first_stage["A"].shape[0]
else:
nineq = 0
nv_first_stage = self.nv_first_stage
nv_second_stage = self.nv_second_stage
nv_index = zeros(ns + 1).astype(int)
neq_index = zeros(ns + 1).astype(int)
nineq_index = zeros(ns + 1).astype(int)
neq_index[0] = neq
nineq_index[0] = nineq
nv_index[0] = nv_first_stage
beq = model_first_stage["beq"]
b = model_first_stage["b"]
for i in range(ns):
if model_second_stage[i]["Geq"] is not None:
neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Geq"].shape[0]
else:
neq_index[i + 1] = neq_index[i]
if model_second_stage[i]["G"] is not None:
nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["G"].shape[0]
else:
nineq_index[i + 1] = nineq_index[i]
nv_index[i + 1] = nv_index[i] + nv_second_stage
c = concatenate([c, ws[i] * model_second_stage[i]["c"]])
lb = concatenate([lb, model_second_stage[i]["lb"]])
ub = concatenate([ub, model_second_stage[i]["ub"]])
vtypes += model_second_stage[i]["vtypes"]
if model_second_stage[i]["Meq"] is not None:
beq = concatenate([beq, model_second_stage[i]["heq"] - model_second_stage[i]["Meq"] * us[i, :].reshape(
(self.nv_uncertainty, 1))])
if model_second_stage[i]["M"] is not None:
b = concatenate([b, model_second_stage[i]["h"] - model_second_stage[i]["M"] * us[i, :].reshape(
(self.nv_uncertainty, 1))])
Aeq_full = lil_matrix((neq_index[-1], nv_index[-1]))
Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"]
for i in range(ns):
# For the first stage
Aeq_full[neq_index[i]:neq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Eeq"]
# For the second stage
Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Geq"]
A_full = lil_matrix((nineq_index[-1], nv_index[-1]))
A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"]
for i in range(ns):
A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["E"]
A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["G"]
# 3) Obtain the results for first-stage and second stage optimization problems
# 3.1) Obtain the integrated solution
(sol, obj, success) = milp(c, Aeq=Aeq_full.tolil(), beq=beq[:, 0], A=A_full.tolil(), b=b[:,0],
xmin=lb[:,0], xmax=ub[:,0], vtypes=vtypes)
# # 4) Verify the first-stage and second stage optization problem
# # 4.1) First-stage solution
sol_first_stage = sol[0:self.nv_second_stage]
sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage)
# 4.2) Second-stage solution
sol_second_stage = {}
for i in range(ns):
sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])]
sol_second_stage_checked = {}
for i in range(ns):
sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i])
return sol_first_stage, sol_second_stage_checked
def first_stage_problem_formulation(self, mg):
ng = self.ng
self.ALPHA = 0
self.BETA = 1
self.IG = 2
self.PG = 3
self.RG = 4
self.FUEL = 5
T = self.T
_nv_first_stage = ng * 6
nv_first_stage = _nv_first_stage * T
self._nv_first_stage = _nv_first_stage
self.nv_first_stage = nv_first_stage
# Obtain the initial status, start-up and shut down of generators
Ig0 = zeros(ng)
MIN_DOWN = zeros(ng)
MIN_UP = zeros(ng)
for i in range(ng):
Ig0[i] = mg["DG"][i]["I0"]
MIN_DOWN[i] = mg["DG"][i]["MU"]
MIN_UP[i] = mg["DG"][i]["MD"]
Ig0 = Ig0.astype(int)
MIN_DOWN = MIN_DOWN.astype(int)
MIN_UP = MIN_UP.astype(int)
# The decision variables includes the start-up, shut down, generator output, reserve capacity, fuel refilling plan
alpha_l = zeros(ng)
beta_l = zeros(ng)
Ig_l = zeros(ng)
pg_l = zeros(ng) # Boundary for DGs within distribution networks
rg_l = zeros(ng)
fuel_l = zeros(ng)
alpha_u = ones(ng)
beta_u = ones(ng)
Ig_u = ones(ng)
pg_u = zeros(ng)
rg_u = zeros(ng)
fuel_u = zeros(ng)
c_alpha = zeros(ng)
c_beta = zeros(ng)
c_ig = zeros(ng)
cg = zeros(ng)
cr = zeros(ng)
cfuel = zeros(ng)
for i in range(ng):
pg_u[i] = mg["DG"][i]["PMAX"]
rg_u[i] = mg["DG"][i]["PMAX"]
fuel_u[i] = mg["DG"][i]["TANK"] - mg["DG"][i]["TANK0"]
c_ig[i] = mg["DG"][i]["FUEL"] * mg["DG"][i]["PMIN"] * mg["DG"][i]["FUEL_PRICE"]
cg[i] = mg["DG"][i]["FUEL"] * mg["DG"][i]["FUEL_PRICE"]
cfuel[i] = mg["DG"][i]["FUEL_PRICE"]
# Formulate the boundaries
lb = concatenate([tile(concatenate([alpha_l, beta_l, Ig_l, pg_l, rg_l, fuel_l]), T)])
ub = concatenate([tile(concatenate([alpha_u, beta_u, Ig_u, pg_u, rg_u, fuel_u]), T)])
# Objective value
c = concatenate([tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cfuel]), T)])
# Variable types
vtypes = (["b"] * ng * 3 + ["c"] * ng * 3) * T
## Constraint sets
# 1) Pg+Rg<=PguIg
A = lil_matrix((ng * T, nv_first_stage))
b = zeros(ng * T)
for t in range(T):
for j in range(ng):
A[t * ng + j, t * _nv_first_stage + ng * self.PG + j] = 1
A[t * ng + j, t * _nv_first_stage + ng * self.RG + j] = 1
A[t * ng + j, t * _nv_first_stage + ng * self.IG + j] = -pg_u[j]
# 2) Pg-Rg>=IgPgl
A_temp = lil_matrix((ng * T, nv_first_stage))
b_temp = zeros(ng * T)
for t in range(T):
for j in range(ng):
A_temp[t * ng + j, t * _nv_first_stage + ng * self.PG + j] = -1
A_temp[t * ng + j, t * _nv_first_stage + ng * self.RG + j] = 1
A_temp[t * ng + j, t * _nv_first_stage + ng * self.IG + j] = pg_l[j]
A = vstack([A, A_temp])
b = concatenate([b, b_temp])
# 3) Start-up and shut-down constraints of DGs
UP_LIMIT = zeros(ng).astype(int)
DOWN_LIMIT = zeros(ng).astype(int)
for i in range(ng):
UP_LIMIT[i] = T - MIN_UP[i]
DOWN_LIMIT[i] = T - MIN_DOWN[i]
# 3.1) Up limit
A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage))
b_temp = zeros(sum(UP_LIMIT))
for i in range(ng):
for t in range(MIN_UP[i], T):
for k in range(t - MIN_UP[i], t):
A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + ng * self.ALPHA + i] = 1
A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * self.IG + i] = -1
A = vstack([A, A_temp])
b = concatenate([b, b_temp])
# # 3.2) Down limit
A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage))
b_temp = ones(sum(DOWN_LIMIT))
for i in range(ng):
for t in range(MIN_DOWN[i], T):
for k in range(t - MIN_DOWN[i], t):
A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng * self.BETA + i] = 1
A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * self.IG + i] = 1
A = vstack([A, A_temp])
b = concatenate([b, b_temp])
# 4) Status transformation of each unit
Aeq = lil_matrix((T * ng, nv_first_stage))
beq = zeros(T * ng)
for i in range(ng):
for t in range(T):
Aeq[i * T + t, t * _nv_first_stage + ng * self.ALPHA + i] = 1
Aeq[i * T + t, t * _nv_first_stage + ng * self.BETA + i] = -1
Aeq[i * T + t, t * _nv_first_stage + ng * self.IG + i] = -1
if t != 0:
Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * self.IG + i] = 1
else:
beq[i * T + t] = -Ig0[i]
model_first_stage = {"c": c.reshape((nv_first_stage, 1)),
"lb": lb.reshape((nv_first_stage, 1)),
"ub": ub.reshape((nv_first_stage, 1)),
"vtypes": vtypes,
"A": A.tolil(),
"b": b.reshape((len(b), 1)),
"Aeq": Aeq.tolil(),
"beq": beq.reshape((len(beq), 1)), }
# (sol_first_stage, obj_first_stage, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"],
# beq=model_first_stage["beq"],
# A=model_first_stage["A"], b=model_first_stage["b"],
# vtypes=model_first_stage["vtypes"],
# xmax=model_first_stage["ub"], xmin=model_first_stage["lb"])
# sol = self.first_stage_solution_validation(sol_first_stage)
return model_first_stage
def second_stage_problem_formulation(self, mg, u):
"""
Second-stage problem formulation for hybrid AC/DC MGs
:param mg:
:return: model of second stage problem, i.e., real-time scheduling of hybrid AC/DC microgrids
"""
T = self.T
ng = self.ng
ness = self.ness
nres = self.nres
nv_uncertainty = self.nv_uncertainty
nv_first_stage = self.nv_first_stage
self._nv_second_stage = NX_MG
## 1) boundary information and objective function
_nv_second_stage = NX_MG
nv_second_stage = NX_MG * T
self.nv_second_stage = nv_second_stage
lb = zeros(nv_second_stage)
ub = zeros(nv_second_stage)
c = zeros(nv_second_stage)
q = zeros(nv_second_stage)
vtypes = ["c"] * nv_second_stage
for t in range(T):
## 1.1) lower boundary
for i in range(ng):
lb[t * NX_MG + PG0 + i] = 0
lb[t * NX_MG + QG0 + i] = mg["DG"][i]["QMIN"]
lb[t * NX_MG + PUG] = 0
lb[t * NX_MG + QUG] = mg["UG"]["QMIN"]
lb[t * NX_MG + PBIC_D2A] = 0 | lb[t * NX_MG + PBIC_A2D] = 0 | 0 | 2023-11-27 15:57:53+00:00 | 12k |
DongGeun-Yoon/DGDM | runners/BBDMRunner.py | [
{
"identifier": "Registers",
"path": "Register.py",
"snippet": "class Registers:\n def __init__(self):\n raise RuntimeError(\"Registries is not intended to be instantiated\")\n\n datasets = Register('datasets')\n runners = Register('runners')"
},
{
"identifier": "BrownianBridgeModel",
"path": "model/BrownianBridgeModel.py",
"snippet": "class BrownianBridgeModel(nn.Module):\n def __init__(self, config):\n super().__init__()\n model_config = config.model\n self.model_config = model_config\n # data parameters\n self.in_frames = config.data.dataset_config.in_frames\n self.out_frames = config.data.dataset_config.out_frames\n \n # model hyperparameters\n model_params = model_config.BB.params\n self.mt_type = model_params.mt_type\n self.min_timesteps = model_params.min_timesteps\n self.num_timesteps = model_params.num_timesteps\n self.max_var = model_params.max_var if model_params.__contains__(\"max_var\") else 1\n self.eta = model_params.eta if model_params.__contains__(\"eta\") else 1\n self.skip_sample = model_params.skip_sample\n self.sample_type = model_params.sample_type\n self.sample_step = model_params.sample_step\n self.truncate_step = model_params.truncate_step\n self.steps = None\n self.register_schedule()\n\n # loss and objective\n self.loss_type = model_params.loss_type\n self.objective = model_params.objective\n\n # UNet\n self.channels = model_params.UNetParams.channels\n self.condition_key = model_params.UNetParams.condition_key\n self.denoise_fn = Unet3D(**vars(model_params.UNetParams))\n\n if self.condition_key == \"predictor\":\n model_config.CondParams.predictor.shape_in = (config.data.dataset_config.in_frames, self.channels)\n model_config.CondParams.predictor.out_frames = config.data.dataset_config.out_frames\n self.cond_stage_model = Determinisitic(**vars(model_config.CondParams.predictor))\n # use pretrained model \n if model_config.CondParams.pretrained:\n ckt = torch.load(model_config.CondParams.pretrained)\n self.cond_stage_model.load_state_dict(ckt)\n \n if not model_config.CondParams.train:\n for p in self.cond_stage_model.parameters():\n p.requires_grad = False\n \n def register_schedule(self):\n T = self.num_timesteps\n self.per_frame = True if self.mt_type == \"frame\" else False\n \n if self.mt_type == \"linear\":\n m_min, m_max = 0.001, 0.999\n m_t = np.linspace(m_min, m_max, T)\n elif self.mt_type == \"sin\":\n m_t = np.arange(T) / T\n m_t[0] = 0.0005\n m_t = 0.5 * np.sin(np.pi * (m_t - 0.5)) + 0.5\n elif self.mt_type == \"frame\":\n m_min, m_max = 0.001, 0.999\n min_step = self.min_timesteps\n max_step = self.num_timesteps\n num_frame = self.out_frames\n self.frame_steps = np.linspace(min_step, max_step, num_frame)\n m_t = np.zeros((T, num_frame))\n m_t = m_t.reshape(T, num_frame)\n for i in range(num_frame): \n step = int(self.frame_steps[i])\n m = np.linspace(m_min, m_max, int(step))\n m_t[-step:, i] = m \n else:\n raise NotImplementedError\n m_tminus = np.append(0, m_t[:-1])\n\n variance_t = 2. * (m_t - m_t ** 2) * self.max_var\n variance_tminus = np.append(0., variance_t[:-1])\n\n to_torch = partial(torch.tensor, dtype=torch.float32)\n self.register_buffer('m_t', to_torch(m_t))\n self.register_buffer('m_tminus', to_torch(m_tminus))\n self.register_buffer('variance_t', to_torch(variance_t))\n \n if self.skip_sample:\n midsteps = torch.arange(self.num_timesteps - 1, 1,\n step=-((self.num_timesteps - 1) // (self.sample_step - 2)), dtype=torch.long)\n self.steps = torch.cat((midsteps, torch.Tensor([1, 0]).long()), dim=0)\n else:\n self.steps = torch.arange(self.num_timesteps-1, -1, -1)\n\n def apply(self, weight_init):\n self.denoise_fn.apply(weight_init)\n return self\n\n def get_parameters(self):\n return self.denoise_fn.parameters()\n\n def forward(self, x, y, context=None): \n pred, context = self.cond_stage_model(y)\n b, f, c, h, w, device = *x.shape, x.device\n x = rearrange(x, 'b t c h w -> b c t h w')\n y = rearrange(y, 'b t c h w -> b c t h w')\n t = torch.randint(0, self.num_timesteps, (b,), device=device).long()\n return self.p_losses(x, y, context, t, pred=pred)\n\n def p_losses(self, x0, y, context, t, noise=None, pred=None):\n b, f, c, h, w = x0.shape\n noise = default(noise, lambda: torch.randn_like(x0))\n\n x_t, objective = self.q_sample(x0, torch.tile(y[:,:,-1:], [1,1,self.out_frames,1,1]), t, noise)\n objective_recon = self.denoise_fn(x_t, timesteps=t, context=context)\n\n pred = rearrange(pred, 'b t c h w -> b c t h w')\n if self.loss_type == 'l1':\n recloss = (objective - objective_recon).abs().mean()\n pred_loss = (x0 - pred).abs().mean()\n elif self.loss_type == 'l2':\n recloss = F.mse_loss(objective, objective_recon)\n pred_loss = F.mse_loss(pred, x0)\n else:\n raise NotImplementedError()\n\n x0_recon = self.predict_x0_from_objective(x_t, y, t, objective_recon)\n log_dict = {\"loss\": recloss, \"x0_recon\": x0_recon}\n return recloss, log_dict, pred_loss\n\n def q_sample(self, x0, y, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x0))\n m_t = extract(self.m_t, t, x0.shape, self.per_frame)\n var_t = extract(self.variance_t, t, x0.shape, self.per_frame)\n sigma_t = torch.sqrt(var_t)\n\n objective = m_t * (y - x0) + sigma_t * noise\n return (\n (1. - m_t) * x0 + m_t * y + sigma_t * noise,\n objective\n )\n\n def predict_x0_from_objective(self, x_t, y, t, objective_recon):\n x0_recon = x_t - objective_recon\n return x0_recon\n\n @torch.no_grad()\n def q_sample_loop(self, x0, y):\n imgs = [x0]\n for i in tqdm(range(self.num_timesteps), desc='q sampling loop', total=self.num_timesteps):\n t = torch.full((y.shape[0],), i, device=x0.device, dtype=torch.long)\n img, _ = self.q_sample(x0, y, t)\n imgs.append(img)\n return imgs\n\n @torch.no_grad()\n def p_sample(self, x_t, y, context, i, clip_denoised=False, mix_up=None):\n b, *_, device = *x_t.shape, x_t.device\n if self.steps[i] == 0:\n t = torch.full((x_t.shape[0],), self.steps[i], device=x_t.device, dtype=torch.long)\n objective_recon = self.denoise_fn(x_t, timesteps=t, context=context)\n x0_recon = self.predict_x0_from_objective(x_t, y, t, objective_recon=objective_recon)\n if clip_denoised:\n x0_recon.clamp_(-1., 1.)\n return x0_recon, x0_recon\n else:\n t = torch.full((x_t.shape[0],), self.steps[i], device=x_t.device, dtype=torch.long)\n n_t = torch.full((x_t.shape[0],), self.steps[i+1], device=x_t.device, dtype=torch.long)\n\n m_t = extract(self.m_t, t, x_t.shape, self.per_frame)\n m_nt = extract(self.m_t, n_t, x_t.shape, self.per_frame)\n var_t = extract(self.variance_t, t, x_t.shape, self.per_frame)\n var_nt = extract(self.variance_t, n_t, x_t.shape, self.per_frame)\n sigma2_t = (var_t - var_nt * (1. - m_t) ** 2 / (1. - m_nt) ** 2) * var_nt / var_t\n sigma_t = torch.sqrt(sigma2_t) * self.eta\n noise = torch.randn_like(x_t)\n\n if i == self.truncate_step:\n x_t = (1. - m_t) * mix_up + m_t * y + sigma_t * noise\n \n objective_recon = self.denoise_fn(x_t, timesteps=t, context=context)\n x0_recon = self.predict_x0_from_objective(x_t, y, t, objective_recon=objective_recon)\n if clip_denoised:\n x0_recon.clamp_(-1., 1.)\n\n x_tminus_mean = (1. - m_nt) * x0_recon + m_nt * y + torch.sqrt((var_nt - sigma2_t) / var_t) * \\\n (x_t - (1. - m_t) * x0_recon - m_t * y)\n x_t = torch.where(m_nt == 0, x_t, x_tminus_mean + sigma_t * noise)\n return x_t, x0_recon\n\n @torch.no_grad()\n def p_sample_loop(self, y, context=None, clip_denoised=True, sample_mid_step=False, mix_up=None):\n img = torch.tile(y[:,:,-1:], [1,1,self.out_frames,1,1])\n y = img\n \n if sample_mid_step:\n imgs, one_step_imgs = [y], []\n for i in tqdm(range(len(self.steps)), desc=f'sampling loop time step', total=len(self.steps)):\n img, x0_recon = self.p_sample(x_t=imgs[-1], y=y, context=context, i=i, clip_denoised=clip_denoised)\n imgs.append(img)\n one_step_imgs.append(x0_recon)\n return imgs, one_step_imgs\n else:\n for i in tqdm(range(self.truncate_step, len(self.steps)), desc=f'sampling loop time step', total=len(self.steps) - self.truncate_step):\n img, _ = self.p_sample(x_t=img, y=y, context=context, i=i, clip_denoised=clip_denoised, mix_up=mix_up)\n return img\n\n @torch.no_grad()\n def sample(self, y, context=None, clip_denoised=True, sample_mid_step=False):\n pred, context = self.cond_stage_model(y)\n pred = rearrange(pred, 'b t c h w -> b c t h w')\n y = rearrange(y, 'b t c h w -> b c t h w')\n tmpt = self.p_sample_loop(y, context, clip_denoised, sample_mid_step, mix_up=pred)\n tmpt = rearrange(tmpt, 'b c t h w -> (b t) c h w')\n pred = rearrange(pred, 'b c t h w -> (b t) c h w')\n return tmpt, pred"
},
{
"identifier": "BaseRunner",
"path": "runners/BaseRunner.py",
"snippet": "class BaseRunner(ABC):\n def __init__(self, config):\n\n # Accelerator\n self.accelerator = Accelerator(\n split_batches=True,\n #mixed_precision='fp16'\n ) \n\n self.net = None # Neural Network\n self.optimizer = None # optimizer\n self.scheduler = None # scheduler\n self.config = config # config from configuration file\n\n # set training params\n self.global_epoch = 0 # global epoch\n if config.args.sample_at_start:\n self.global_step = -1 # global step\n else:\n self.global_step = 0\n\n self.GAN_buffer = {} # GAN buffer for Generative Adversarial Network\n self.topk_checkpoints = {} # Top K checkpoints\n\n # set log and save destination\n self.config.result = argparse.Namespace()\n self.config.result.image_path, \\\n self.config.result.ckpt_path, \\\n self.config.result.log_path, \\\n self.config.result.sample_path, \\\n self.config.result.sample_to_eval_path = make_save_dirs(self.config.args,\n prefix=self.config.data.dataset_name,\n suffix=self.config.model.model_name)\n\n self.save_config() # save configuration file\n self.writer = SummaryWriter(self.config.result.log_path) # initialize SummaryWriter\n\n # dataloader\n train_set, valid_set, test_set = get_dataset(self.config.data, True)\n self.train_loader = DataLoader(train_set, \n batch_size=self.config.data.train.batch_size,\n num_workers=4,\n drop_last=True,\n )\n self.val_loader = DataLoader(valid_set, \n batch_size=self.config.data.val.batch_size,\n num_workers=4,\n drop_last=True,\n )\n self.test_loader = DataLoader(test_set, \n batch_size=self.config.data.test.batch_size,\n num_workers=4,\n drop_last=False,\n )\n # initialize model\n self.net, self.optimizer, self.scheduler = self.initialize_model_optimizer_scheduler(self.config)\n self.print_model_summary(self.net)\n\n # Accelerator\n self.net, self.optimizer, self.scheduler = self.accelerator.prepare(self.net, self.optimizer, self.scheduler)\n self.train_loader, self.val_loader = self.accelerator.prepare(self.train_loader, self.val_loader)\n\n # initialize EMA\n self.use_ema = self.config.model.EMA.use_ema\n if self.use_ema:\n self.ema = EMA(self.config.model.EMA.ema_decay)\n self.update_ema_interval = self.config.model.EMA.update_ema_interval\n self.start_ema_step = self.config.model.EMA.start_ema_step\n self.ema.register(self.net)\n\n # load model from checkpoint\n self.load_model_from_checkpoint()\n\n # save configuration file\n def save_config(self):\n save_path = os.path.join(self.config.result.ckpt_path, 'config.yaml')\n save_config = self.config\n with open(save_path, 'w') as f:\n yaml.dump(save_config, f)\n\n def initialize_model_optimizer_scheduler(self, config, is_test=False):\n \"\"\"\n get model, optimizer, scheduler\n :param args: args\n :param config: config\n :param is_test: is_test\n :return: net: Neural Network, nn.Module;\n optimizer: a list of optimizers;\n scheduler: a list of schedulers or None;\n \"\"\"\n net = self.initialize_model(config)\n optimizer, scheduler = None, None\n if not is_test:\n optimizer, scheduler = self.initialize_optimizer_scheduler(net, config)\n return net, optimizer, scheduler\n\n # load model, EMA, optimizer, scheduler from checkpoint\n def load_model_from_checkpoint(self):\n device = self.accelerator.device\n model_states = None\n if self.config.model.__contains__('model_load_path') and self.config.model.model_load_path is not None:\n print(f\"load model {self.config.model.model_name} from {self.config.model.model_load_path}\")\n model_states = torch.load(self.config.model.model_load_path, map_location=device)\n\n self.global_epoch = model_states['epoch']\n self.global_step = model_states['step']\n\n # load model\n try:\n self.net.module.load_state_dict(model_states['model'])\n except:\n self.net.load_state_dict(model_states['model'])\n\n # load ema\n if self.use_ema:\n self.ema.shadow = remove_prefix_state_dict(model_states['ema'])\n self.ema.reset_device(self.net)\n\n # load optimizer and scheduler\n if self.config.args.train:\n if self.config.model.__contains__('optim_sche_load_path') and self.config.model.optim_sche_load_path is not None:\n optimizer_scheduler_states = torch.load(self.config.model.optim_sche_load_path, map_location=device)\n for i in range(len(self.optimizer)):\n self.optimizer[i].load_state_dict(optimizer_scheduler_states['optimizer'][i])\n\n if self.scheduler is not None:\n for i in range(len(self.optimizer)):\n self.scheduler[i].load_state_dict(optimizer_scheduler_states['scheduler'][i])\n return model_states\n\n def get_checkpoint_states(self, stage='epoch_end'):\n optimizer_state = []\n for i in range(len(self.optimizer)):\n optimizer_state.append(self.optimizer[i].state_dict())\n\n scheduler_state = []\n for i in range(len(self.scheduler)):\n scheduler_state.append(self.scheduler[i].state_dict())\n\n optimizer_scheduler_states = {\n 'optimizer': optimizer_state,\n 'scheduler': scheduler_state\n }\n\n model_states = {'step': self.global_step}\n try:\n model_states['model'] = self.net.module.state_dict()\n except:\n model_states['model'] = self.net.state_dict()\n\n if stage == 'exception':\n model_states['epoch'] = self.global_epoch\n else:\n model_states['epoch'] = self.global_epoch + 1\n\n if self.use_ema:\n model_states['ema'] = self.ema.shadow\n return model_states, optimizer_scheduler_states\n\n # EMA part\n def step_ema(self):\n with_decay = False if self.global_step < self.start_ema_step else True\n self.ema.update(self.net, with_decay=with_decay)\n\n def apply_ema(self):\n if self.use_ema:\n self.ema.apply_shadow(self.net)\n\n def restore_ema(self):\n if self.use_ema:\n self.ema.restore(self.net)\n\n # Evaluation and sample part\n @torch.no_grad()\n def validation_epoch(self, val_loader, epoch):\n self.apply_ema()\n self.net.eval()\n\n pbar = tqdm(val_loader, total=len(val_loader), smoothing=0.01)\n step = 0\n loss_sum = 0.\n dloss_sum = 0.\n for val_batch in pbar:\n loss = self.loss_fn(net=self.net,\n batch=val_batch,\n epoch=epoch,\n step=step,\n opt_idx=0,\n stage='val',\n write=False)\n loss_sum += loss\n if len(self.optimizer) > 1:\n loss = self.loss_fn(net=self.net,\n batch=val_batch,\n epoch=epoch,\n step=step,\n opt_idx=1,\n stage='val',\n write=False)\n dloss_sum += loss\n step += 1\n average_loss = loss_sum / step\n self.writer.add_scalar(f'val_epoch/loss', average_loss, epoch)\n if len(self.optimizer) > 1:\n average_dloss = dloss_sum / step\n self.writer.add_scalar(f'val_dloss_epoch/loss', average_dloss, epoch)\n self.restore_ema()\n return average_loss\n\n @torch.no_grad()\n def sample_step(self, train_batch, val_batch):\n self.apply_ema()\n self.net.eval()\n sample_path = make_dir(os.path.join(self.config.result.image_path, str(self.global_step)))\n try:\n self.sample(self.net.module, train_batch, sample_path, stage='train')\n self.sample(self.net.module, val_batch, sample_path, stage='val')\n except:\n self.sample(self.net, train_batch, sample_path, stage='train')\n self.sample(self.net, val_batch, sample_path, stage='val')\n self.restore_ema()\n\n # abstract methods\n @abstractmethod\n def print_model_summary(self, net):\n pass\n\n @abstractmethod\n def initialize_model(self, config):\n \"\"\"\n initialize model\n :param config: config\n :return: nn.Module\n \"\"\"\n pass\n\n @abstractmethod\n def initialize_optimizer_scheduler(self, net, config):\n \"\"\"\n initialize optimizer and scheduler\n :param net: nn.Module\n :param config: config\n :return: a list of optimizers; a list of schedulers\n \"\"\"\n pass\n\n @abstractmethod\n def loss_fn(self, net, batch, epoch, step, opt_idx=0, stage='train', write=True):\n \"\"\"\n loss function\n :param net: nn.Module\n :param batch: batch\n :param epoch: global epoch\n :param step: global step\n :param opt_idx: optimizer index, default is 0; set it to 1 for GAN discriminator\n :param stage: train, val, [test]\n :param write: write loss information to SummaryWriter\n :return: a scalar of loss\n \"\"\"\n pass\n\n @abstractmethod\n def sample(self, net, batch, sample_path, stage='train'):\n \"\"\"\n sample a single batch\n :param net: nn.Module\n :param batch: batch\n :param sample_path: path to save samples\n :param stage: train, val, test\n :return:\n \"\"\"\n pass\n\n @abstractmethod\n def sample_to_eval(self, net, test_loader, sample_path):\n \"\"\"\n sample among the test dataset to calculate evaluation metrics\n :param net: nn.Module\n :param test_loader: test dataloader\n :param sample_path: path to save samples\n :return:\n \"\"\"\n pass\n\n def on_save_checkpoint(self, net, train_loader, val_loader, epoch, step):\n \"\"\"\n additional operations whilst saving checkpoint\n :param net: nn.Module\n :param train_loader: train data loader\n :param val_loader: val data loader\n :param epoch: epoch\n :param step: step\n :return:\n \"\"\"\n pass\n\n def train(self):\n print(self.__class__.__name__)\n\n epoch_length = len(self.train_loader)\n start_epoch = self.global_epoch\n print(f\"start training {self.config.model.model_name} on {self.config.data.dataset_name}, {len(self.train_loader)} iters per epoch\")\n try:\n accumulate_grad_batches = self.config.training.accumulate_grad_batches\n for epoch in range(start_epoch, self.config.training.n_epochs):\n\n pbar = tqdm(self.train_loader, total=len(self.train_loader), smoothing=0.01)\n self.global_epoch = epoch\n start_time = time.time()\n for train_batch in pbar:\n self.global_step += 1\n self.net.train()\n losses = []\n for i in range(len(self.optimizer)):\n loss = self.loss_fn(net=self.net,\n batch=train_batch,\n epoch=epoch,\n step=self.global_step,\n opt_idx=i,\n stage='train')\n self.accelerator.backward(loss)\n if self.global_step % accumulate_grad_batches == 0:\n self.optimizer[i].step()\n self.optimizer[i].zero_grad()\n \n self.writer.add_scalar(f'loss/LR', self.optimizer[0].param_groups[0]['lr'], self.global_step)\n if self.scheduler is not None:\n self.scheduler[i].step(loss)\n \n losses.append(loss.detach().mean())\n self.accelerator.wait_for_everyone()\n if self.use_ema and self.global_step % (self.update_ema_interval*accumulate_grad_batches) == 0:\n self.step_ema()\n if len(self.optimizer) > 1:\n pbar.set_description(\n (\n f'Epoch: [{epoch + 1} / {self.config.training.n_epochs}] '\n f'iter: {self.global_step} loss-1: {losses[0]:.4f} loss-2: {losses[1]:.4f}'\n )\n )\n else:\n pbar.set_description(\n (\n f'Epoch: [{epoch + 1} / {self.config.training.n_epochs}] '\n f'iter: {self.global_step} loss: {losses[0]:.4f}'\n )\n )\n\n with torch.no_grad():\n if self.global_step % int(self.config.training.sample_interval * epoch_length) == 0:\n if self.accelerator.is_main_process:\n val_batch = next(iter(self.val_loader))\n self.sample_step(val_batch=val_batch, train_batch=train_batch)\n torch.cuda.empty_cache()\n self.accelerator.wait_for_everyone()\n\n end_time = time.time()\n elapsed_rounded = int(round((end_time-start_time)))\n print(\"training time: \" + str(datetime.timedelta(seconds=elapsed_rounded)))\n\n # validation\n # if (epoch + 1) % self.config.training.validation_interval == 0 or (epoch + 1) == self.config.training.n_epochs:\n if (epoch) % self.config.training.validation_interval == 0 or (epoch + 1) == self.config.training.n_epochs:\n with torch.no_grad():\n print(\"validating epoch...\")\n average_loss = self.validation_epoch(self.val_loader, epoch)\n torch.cuda.empty_cache()\n print(\"validating epoch success\")\n\n # save checkpoint\n if (epoch + 1) % self.config.training.save_interval == 0 or (epoch + 1) == self.config.training.n_epochs:\n if self.accelerator.is_main_process:\n with torch.no_grad():\n print(\"saving latest checkpoint...\")\n self.on_save_checkpoint(self.net, self.train_loader, self.val_loader, epoch, self.global_step)\n model_states, optimizer_scheduler_states = self.get_checkpoint_states(stage='epoch_end')\n\n # save latest checkpoint\n temp = 0\n while temp < epoch + 1:\n if False:\n remove_file(os.path.join(self.config.result.ckpt_path, f'latest_model_{temp}.pth'))\n remove_file(os.path.join(self.config.result.ckpt_path, f'latest_optim_sche_{temp}.pth'))\n temp += 1\n torch.save(model_states, os.path.join(self.config.result.ckpt_path, f'latest_model_{epoch + 1}.pth'))\n torch.save(optimizer_scheduler_states, os.path.join(self.config.result.ckpt_path, f'latest_optim_sche_{epoch + 1}.pth'))\n torch.save(model_states, os.path.join(self.config.result.ckpt_path, f'last_model.pth'))\n torch.save(optimizer_scheduler_states, os.path.join(self.config.result.ckpt_path, f'last_optim_sche.pth'))\n\n # save top_k checkpoints\n model_ckpt_name = os.path.join(self.config.result.ckpt_path, f'top_model_epoch={epoch + 1}.pth')\n optim_sche_ckpt_name = os.path.join(self.config.result.ckpt_path, f'top_optim_sche_epoch={epoch + 1}.pth')\n\n if self.config.args.save_top:\n top_key = 'top'\n if top_key not in self.topk_checkpoints:\n self.topk_checkpoints[top_key] = {\"loss\": average_loss,\n 'model_ckpt_name': model_ckpt_name,\n 'optim_sche_ckpt_name': optim_sche_ckpt_name}\n\n print(f\"saving top checkpoint: average_loss={average_loss} epoch={epoch + 1}\")\n torch.save(model_states, model_ckpt_name, _use_new_zipfile_serialization=False)\n torch.save(optimizer_scheduler_states, optim_sche_ckpt_name, _use_new_zipfile_serialization=False)\n else:\n if average_loss < self.topk_checkpoints[top_key][\"loss\"]:\n print(\"remove \" + self.topk_checkpoints[top_key][\"model_ckpt_name\"])\n remove_file(self.topk_checkpoints[top_key]['model_ckpt_name'])\n remove_file(self.topk_checkpoints[top_key]['optim_sche_ckpt_name'])\n\n print(f\"saving top checkpoint: average_loss={average_loss} epoch={epoch + 1}\")\n\n self.topk_checkpoints[top_key] = {\"loss\": average_loss,\n 'model_ckpt_name': model_ckpt_name,\n 'optim_sche_ckpt_name': optim_sche_ckpt_name}\n\n torch.save(model_states, model_ckpt_name) #_use_new_zipfile_serialization=False)\n torch.save(optimizer_scheduler_states, optim_sche_ckpt_name) #_use_new_zipfile_serialization=False)\n\n except BaseException as e:\n if self.accelerator.is_main_process:\n print(\"exception save model start....\")\n print(self.__class__.__name__)\n model_states, optimizer_scheduler_states = self.get_checkpoint_states(stage='exception')\n torch.save(model_states,\n os.path.join(self.config.result.ckpt_path, f'last_model.pth'),\n _use_new_zipfile_serialization=False)\n torch.save(optimizer_scheduler_states,\n os.path.join(self.config.result.ckpt_path, f'last_optim_sche.pth'),\n _use_new_zipfile_serialization=False)\n\n print(\"exception save model success!\")\n\n print('str(Exception):\\t', str(Exception))\n print('str(e):\\t\\t', str(e))\n print('repr(e):\\t', repr(e))\n print('traceback.print_exc():')\n traceback.print_exc()\n print('traceback.format_exc():\\n%s' % traceback.format_exc())\n\n @torch.no_grad()\n def test(self):\n self.test_loader = self.accelerator.prepare(self.test_loader)\n if self.use_ema:\n self.apply_ema()\n\n self.net.eval()\n if self.config.args.sample_to_eval:\n sample_path = self.config.result.sample_to_eval_path\n self.sample_to_eval(self.net, self.test_loader, sample_path)\n else:\n test_iter = iter(self.test_loader)\n for i in tqdm(range(1), initial=0, dynamic_ncols=True, smoothing=0.01):\n test_batch = next(test_iter)\n sample_path = os.path.join(self.config.result.sample_path, str(i))\n self.sample(self.net, test_batch, sample_path, stage='test')"
},
{
"identifier": "weights_init",
"path": "runners/utils.py",
"snippet": "def weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Attention') != -1:\n pass\n elif classname.find('Conv2d') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('Linear') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('Parameter') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)"
},
{
"identifier": "get_dataset",
"path": "runners/utils.py",
"snippet": "def get_dataset(data_config, test=False):\n train_dataset = Registers.datasets[data_config.dataset_type](data_config.dataset_config, stage='train')\n val_dataset = Registers.datasets[data_config.dataset_type](data_config.dataset_config, stage='val')\n if test:\n return train_dataset, val_dataset, val_dataset\n return train_dataset, val_dataset"
},
{
"identifier": "make_dir",
"path": "runners/utils.py",
"snippet": "def make_dir(dir):\n os.makedirs(dir, exist_ok=True)\n return dir"
},
{
"identifier": "get_image_grid",
"path": "runners/utils.py",
"snippet": "@torch.no_grad()\ndef get_image_grid(batch, grid_size=4, to_normal=True):\n batch = batch.detach().clone()\n image_grid = make_grid(batch, nrow=grid_size)\n if to_normal:\n image_grid = image_grid.mul_(0.5).add_(0.5).clamp_(0, 1.)\n image_grid = image_grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n return image_grid"
},
{
"identifier": "save_single_image",
"path": "runners/utils.py",
"snippet": "@torch.no_grad()\ndef save_single_image(image, save_path, file_name, to_normal=True):\n image = image.detach().clone()\n if to_normal:\n image = image.mul_(0.5).add_(0.5).clamp_(0, 1.)\n image = image.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(image)\n im.save(os.path.join(save_path, file_name))"
},
{
"identifier": "AverageMeter",
"path": "runners/utils.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count"
},
{
"identifier": "metric",
"path": "runners/utils.py",
"snippet": "def metric(pred, true, mean, std, return_ssim_psnr=False, clip_range=[0, 1]):\n pred = pred*std + mean\n true = true*std + mean\n mae = MAE(pred, true)\n mse = MSE(pred, true)\n\n if return_ssim_psnr:\n pred = np.maximum(pred, clip_range[0])\n pred = np.minimum(pred, clip_range[1])\n ssim, psnr = 0, 0\n for b in range(pred.shape[0]):\n for f in range(pred.shape[1]):\n ssim += cal_ssim(pred[b, f].swapaxes(0, 2), true[b, f].swapaxes(0, 2), multichannel=True)\n psnr += PSNR(pred[b, f], true[b, f])\n ssim = ssim / (pred.shape[0] * pred.shape[1])\n psnr = psnr / (pred.shape[0] * pred.shape[1])\n return mse, mae, ssim, psnr\n else:\n return mse, mae"
},
{
"identifier": "save_single_video",
"path": "runners/utils.py",
"snippet": "@torch.no_grad()\ndef save_single_video(image, save_path, file_name, grid_size=10, to_normal=True):\n image = image.detach().clone()\n image_grid = get_image_grid(image, grid_size=grid_size, to_normal=to_normal)\n im = Image.fromarray(image_grid)\n im.save(os.path.join(save_path, file_name))"
}
] | import os
import lpips
import torch.optim.lr_scheduler
import torch.nn.functional as F
import time
import imageio
import pdb
from torch.utils.data import DataLoader
from PIL import Image
from Register import Registers
from model.BrownianBridgeModel import BrownianBridgeModel
from runners.BaseRunner import BaseRunner
from runners.utils import weights_init, get_dataset, make_dir, get_image_grid, save_single_image
from runners.utils import AverageMeter, metric, save_single_video
from runners.utils import *
from tqdm.autonotebook import tqdm
from torchsummary import summary | 9,270 |
@Registers.runners.register_with_name('BBDMRunner')
class BBDMRunner(BaseRunner):
def __init__(self, config):
super().__init__(config)
def initialize_model(self, config):
if config.model.model_type == "BBDM":
bbdmnet = BrownianBridgeModel(config)
else:
raise NotImplementedError
# initialize model
try:
bbdmnet.apply(weights_init)
except:
pass
return bbdmnet
def load_model_from_checkpoint(self):
states = super().load_model_from_checkpoint()
def print_model_summary(self, net):
def get_parameter_number(model):
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_num, trainable_num
total_num, trainable_num = get_parameter_number(net)
print("Total Number of parameter: %.2fM" % (total_num / 1e6))
print("Trainable Number of parameter: %.2fM" % (trainable_num / 1e6))
def initialize_optimizer_scheduler(self, net, config):
# diffusion model weight
learning_params = [{'params':net.denoise_fn.parameters(), 'lr':config.model.BB.optimizer.lr}]
# condition model weight
if config.model.CondParams.train or config.model.CondParams.pretrained is None:
learning_params.append({'params':net.cond_stage_model.parameters(), 'lr':config.model.CondParams.lr})
optimizer = torch.optim.Adam(learning_params,
weight_decay=config.model.BB.optimizer.weight_decay,
betas=(config.model.BB.optimizer.beta1, config.model.BB.optimizer.beta2)
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min',
verbose=True,
threshold_mode='rel',
**vars(config.model.BB.lr_scheduler)
)
return [optimizer], [scheduler]
@torch.no_grad()
def get_checkpoint_states(self, stage='epoch_end'):
model_states, optimizer_scheduler_states = super().get_checkpoint_states()
return model_states, optimizer_scheduler_states
def loss_fn(self, net, batch, epoch, step, opt_idx=0, stage='train', write=True):
x, x_cond = batch
loss, additional_info, cond = net(x, x_cond)
if write:
self.writer.add_scalar(f'loss/{stage}', loss, step)
self.writer.add_scalar(f'loss/cond', cond, step)
loss = loss + cond
return loss
@torch.no_grad()
def sample(self, net, batch, sample_path, stage='train', write=True):
sample_path = make_dir(os.path.join(sample_path, f'{stage}_sample'))
x, x_cond = batch
# batch_size = x.shape[0] if x.shape[0] < 4 else 4
batch_size = 1
x = x[0:1]
x_cond = x_cond[0:1]
grid_size = max(x.size(1), x_cond.size(1))
# save images
sample = net.sample(x_cond, clip_denoised=self.config.testing.clip_denoised)
sample, prediction = sample[0], sample[1]
channels = ['ir105', 'sw038', 'wv063']
for z, channel in enumerate(channels):
x_conds = x_cond[0,:, z:z+1]
x_split = x[0,:, z:z+1]
sample_split = sample[:, z:z+1]
prediction_split = prediction[:, z:z+1]
|
@Registers.runners.register_with_name('BBDMRunner')
class BBDMRunner(BaseRunner):
def __init__(self, config):
super().__init__(config)
def initialize_model(self, config):
if config.model.model_type == "BBDM":
bbdmnet = BrownianBridgeModel(config)
else:
raise NotImplementedError
# initialize model
try:
bbdmnet.apply(weights_init)
except:
pass
return bbdmnet
def load_model_from_checkpoint(self):
states = super().load_model_from_checkpoint()
def print_model_summary(self, net):
def get_parameter_number(model):
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_num, trainable_num
total_num, trainable_num = get_parameter_number(net)
print("Total Number of parameter: %.2fM" % (total_num / 1e6))
print("Trainable Number of parameter: %.2fM" % (trainable_num / 1e6))
def initialize_optimizer_scheduler(self, net, config):
# diffusion model weight
learning_params = [{'params':net.denoise_fn.parameters(), 'lr':config.model.BB.optimizer.lr}]
# condition model weight
if config.model.CondParams.train or config.model.CondParams.pretrained is None:
learning_params.append({'params':net.cond_stage_model.parameters(), 'lr':config.model.CondParams.lr})
optimizer = torch.optim.Adam(learning_params,
weight_decay=config.model.BB.optimizer.weight_decay,
betas=(config.model.BB.optimizer.beta1, config.model.BB.optimizer.beta2)
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min',
verbose=True,
threshold_mode='rel',
**vars(config.model.BB.lr_scheduler)
)
return [optimizer], [scheduler]
@torch.no_grad()
def get_checkpoint_states(self, stage='epoch_end'):
model_states, optimizer_scheduler_states = super().get_checkpoint_states()
return model_states, optimizer_scheduler_states
def loss_fn(self, net, batch, epoch, step, opt_idx=0, stage='train', write=True):
x, x_cond = batch
loss, additional_info, cond = net(x, x_cond)
if write:
self.writer.add_scalar(f'loss/{stage}', loss, step)
self.writer.add_scalar(f'loss/cond', cond, step)
loss = loss + cond
return loss
@torch.no_grad()
def sample(self, net, batch, sample_path, stage='train', write=True):
sample_path = make_dir(os.path.join(sample_path, f'{stage}_sample'))
x, x_cond = batch
# batch_size = x.shape[0] if x.shape[0] < 4 else 4
batch_size = 1
x = x[0:1]
x_cond = x_cond[0:1]
grid_size = max(x.size(1), x_cond.size(1))
# save images
sample = net.sample(x_cond, clip_denoised=self.config.testing.clip_denoised)
sample, prediction = sample[0], sample[1]
channels = ['ir105', 'sw038', 'wv063']
for z, channel in enumerate(channels):
x_conds = x_cond[0,:, z:z+1]
x_split = x[0,:, z:z+1]
sample_split = sample[:, z:z+1]
prediction_split = prediction[:, z:z+1]
| save_single_video(x_conds, sample_path, f'{channel}_input.png', grid_size, to_normal=self.config.data.dataset_config.to_normal) | 10 | 2023-11-30 01:05:47+00:00 | 12k |
VinAIResearch/MISCA | main.py | [
{
"identifier": "Trainer",
"path": "trainer.py",
"snippet": "class Trainer(object):\n def __init__(self, args, collate, train_dataset=None, dev_dataset=None, test_dataset=None):\n self.args = args\n self.train_dataset = train_dataset\n self.dev_dataset = dev_dataset\n self.test_dataset = test_dataset\n self.collate_fn = collate\n args.n_chars = len(self.train_dataset.chars)\n if 'bert' in self.args.model_type:\n self.tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n train_dataset.load_bert(self.tokenizer)\n dev_dataset.load_bert(self.tokenizer)\n test_dataset.load_bert(self.tokenizer)\n\n self.intent_label_lst = get_intent_labels(args)\n self.slot_label_lst, self.hiers = get_slots_all(args)\n\n self.pad_token_label_id = args.ignore_index\n self.config_class, self.model_class, _ = MODEL_CLASSES[args.model_type]\n if 'bert' in self.args.model_type:\n self.config = self.config_class.from_pretrained(args.model_name_or_path, finetuning_task=args.task)\n self.model = self.model_class.from_pretrained(\n args.model_name_or_path,\n config=self.config,\n args=args,\n intent_label_lst=self.intent_label_lst,\n slot_label_lst=self.slot_label_lst,\n slot_hier=self.hiers\n )\n else:\n self.model = self.model_class(args, len(self.train_dataset.vocab), self.intent_label_lst, self.slot_label_lst, self.hiers)\n if args.base_model:\n model_state = self.model.state_dict()\n pretrained_state = torch.load(os.path.join(args.base_model, 'model.bin'))\n pretrained_state = { k:v for k,v in pretrained_state.items() if k in model_state and v.size() == model_state[k].size() }\n model_state.update(pretrained_state)\n self.model.load_state_dict(model_state)\n \n self.device = \"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\"\n self.model.to(self.device)\n\n def train(self):\n train_sampler = RandomSampler(self.train_dataset)\n train_dataloader = DataLoader(self.train_dataset, sampler=train_sampler, batch_size=self.args.train_batch_size, collate_fn=self.collate_fn)\n \n writer = SummaryWriter(log_dir=self.args.model_dir)\n if self.args.max_steps > 0:\n t_total = self.args.max_steps\n self.args.num_train_epochs = (\n self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1\n )\n else:\n t_total = len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs\n print(\"check init\")\n results = self.evaluate(\"dev\", -1)\n print(results)\n logfile = open(self.args.model_dir + \"/\" + self.args.logging, 'w')\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=t_total\n )\n\n if self.args.logging_steps < 0:\n self.args.logging_steps = len(train_dataloader)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(self.train_dataset))\n logger.info(\" Num Epochs = %d\", self.args.num_train_epochs)\n logger.info(\" Total train batch size = %d\", self.args.train_batch_size)\n logger.info(\" Gradient Accumulation steps = %d\", self.args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n logger.info(\" Logging steps = %d\", self.args.logging_steps)\n\n global_step = 0\n tr_loss = 0.0\n self.model.zero_grad()\n best_sent = 0\n best_slot = 0\n\n train_iterator = trange(int(self.args.num_train_epochs), desc=\"Epoch\")\n early_stopping = EarlyStopping(patience=self.args.early_stopping, verbose=True)\n\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", position=0, leave=True)\n print(\"\\nEpoch\", _)\n\n for step, batch in enumerate(epoch_iterator):\n self.model.train()\n batch = tuple(t.to(self.device) for t in batch[:-1]) + (batch[-1], ) # GPU or CPU\n if 'bert' in self.args.model_type:\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[3],\n \"intent_label_ids\": batch[5],\n \"slot_labels_ids\": batch[6],\n \"token_type_ids\": batch[4],\n \"heads\": batch[2],\n \"seq_lens\": batch[-1].cpu()\n }\n else:\n inputs = {\n \"input_ids\": batch[0],\n \"char_ids\": batch[1],\n \"intent_label_ids\": batch[2],\n \"slot_labels_ids\": batch[3],\n \"seq_lens\": batch[4],\n }\n outputs = self.model(**inputs)\n total_loss, intent_loss, slot_loss, count_loss = outputs[0]\n\n if self.args.gradient_accumulation_steps > 1:\n total_loss = total_loss / self.args.gradient_accumulation_steps\n if _ < self.args.num_train_epochs * self.args.only_intent:\n total_loss = intent_loss + count_loss \n total_loss.backward()\n else:\n total_loss.backward()\n\n tr_loss += total_loss.item()\n if (step + 1) % self.args.gradient_accumulation_steps == 0:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n self.model.zero_grad()\n global_step += 1\n\n if self.args.logging_steps > 0 and global_step % (self.args.logging_steps) == 0:\n print(\"\\nTuning metrics:\", self.args.tuning_metric)\n results = self.evaluate(\"dev\", _)\n # self.evaluate(\"test\")\n writer.add_scalar(\"Loss/validation\", results[\"loss\"], _)\n writer.add_scalar(\"Intent Accuracy/validation\", results[\"intent_acc\"], _)\n writer.add_scalar(\"Intent F1\", results[\"intent_f1\"], _)\n writer.add_scalar(\"Slot F1/validation\", results[\"slot_f1\"], _)\n writer.add_scalar(\"Mean Intent Slot\", results[\"mean_intent_slot\"], _)\n writer.add_scalar(\"Sentence Accuracy/validation\", results[\"semantic_frame_acc\"], _)\n\n if results['semantic_frame_acc'] >= best_sent or results['slot_f1'] >= best_slot:\n best_sent = results['semantic_frame_acc']\n best_slot = results['slot_f1']\n self.save_model()\n results = self.evaluate('test', _)\n logfile.write('\\n\\nEPOCH = ' + str(_) + '\\n')\n for key in sorted(results.keys()):\n to_write = \" {key} = {value}\".format(key=key, value=str(results[key]))\n logfile.write(to_write)\n logfile.write(\"\\n\")\n\n if 0 < self.args.max_steps < global_step:\n epoch_iterator.close()\n break\n\n if 0 < self.args.max_steps < global_step or early_stopping.early_stop:\n train_iterator.close()\n break\n writer.add_scalar(\"Loss/train\", tr_loss / global_step, _)\n logfile.close()\n return global_step, tr_loss / global_step\n\n def write_evaluation_result(self, out_file, results):\n out_file = self.args.model_dir + \"/\" + out_file\n w = open(out_file, \"w\", encoding=\"utf-8\")\n w.write(\"***** Eval results *****\\n\")\n for key in sorted(results.keys()):\n to_write = \" {key} = {value}\".format(key=key, value=str(results[key]))\n w.write(to_write)\n w.write(\"\\n\")\n w.close()\n\n def evaluate(self, mode, epoch):\n if mode == \"test\":\n dataset = self.test_dataset\n elif mode == \"dev\":\n dataset = self.dev_dataset\n else:\n raise Exception(\"Only dev and test dataset available\")\n\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size, collate_fn=self.collate_fn)\n\n logger.info(\"***** Running evaluation on %s dataset *****\", mode)\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", self.args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n slot_label_map = {i: label for i, label in enumerate(self.slot_label_lst)}\n out_slot_label_list = []\n slot_preds_list = []\n predictions = []\n intent_labels = []\n int_len_gold = []\n int_len_pred = []\n\n results = {}\n self.model.eval()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(self.device) for t in batch[:-1]) + (batch[-1], )\n # print(batch)\n with torch.no_grad():\n if 'bert' in self.args.model_type:\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[3],\n \"intent_label_ids\": batch[5],\n \"slot_labels_ids\": batch[6],\n \"token_type_ids\": batch[4],\n \"heads\": batch[2],\n \"seq_lens\": batch[-1].cpu()\n }\n else:\n inputs = {\n \"input_ids\": batch[0],\n \"char_ids\": batch[1],\n \"intent_label_ids\": batch[2],\n \"slot_labels_ids\": batch[3],\n \"seq_lens\": batch[4],\n }\n outputs = self.model(**inputs)\n \n if self.args.num_intent_detection:\n tmp_eval_loss, (intent_logits, slot_logits, intent_dec) = outputs[:2]\n else:\n tmp_eval_loss, (intent_logits, slot_logits) = outputs[:2]\n\n eval_loss += tmp_eval_loss[0].mean().item()\n nb_eval_steps += 1\n\n # Intent prediction\n intent_logits = F.logsigmoid(intent_logits).detach().cpu()\n intent_preds = intent_logits.numpy()\n if self.args.num_intent_detection:\n intent_nums = intent_dec.detach().cpu().numpy()\n out_intent_label_ids = inputs[\"intent_label_ids\"].detach().cpu().numpy()\n intent_labels.extend(out_intent_label_ids.tolist())\n\n # Slot prediction\n \n if self.args.use_crf:\n slot_preds = np.array(self.model.crf.decode(slot_logits))\n else:\n slot_preds = slot_logits.detach().cpu()\n out_slot_labels_ids = inputs[\"slot_labels_ids\"].detach().cpu().numpy()\n\n cur = []\n if self.args.num_intent_detection:\n num_intents = intent_logits.size(1)\n intent_nums = np.argmax(intent_nums, axis=-1)\n gold_nums = np.sum(out_intent_label_ids, axis=-1)\n int_len_gold.extend(gold_nums.tolist())\n int_len_pred.extend(intent_nums.tolist())\n for num, preds in zip(intent_nums, intent_preds):\n idx = preds.argsort()[-num:]\n p = np.zeros(num_intents)\n p[idx] = 1.\n predictions.append(p)\n cur.append(p)\n else:\n predictions.extend(np.rint(intent_preds).tolist())\n\n if not self.args.use_crf:\n slot_preds_arg = np.argmax(slot_preds.numpy(), axis=2)\n else:\n slot_preds_arg = slot_preds\n \n for i in range(out_slot_labels_ids.shape[0]):\n slt = None\n out_slot_label_list.append([])\n slot_preds_list.append([])\n for j in range(out_slot_labels_ids.shape[1]):\n if out_slot_labels_ids[i, j] != self.pad_token_label_id:\n out_slot_label_list[-1].append(slot_label_map[out_slot_labels_ids[i][j]])\n \n predict_label = slot_label_map[slot_preds_arg[i][j]]\n if predict_label[:2] == 'B-':\n slt = predict_label[2:]\n elif predict_label[:2] == 'I-':\n if slt is None:\n predict_label = 'O'\n elif slt != predict_label[2:]:\n predict_label = 'O'\n else:\n slt = None\n slot_preds_list[-1].append(predict_label)\n eval_loss = eval_loss / nb_eval_steps\n results['loss'] = eval_loss\n predictions = np.array(predictions)\n intent_labels = np.array(intent_labels)\n total_result = compute_metrics(predictions, intent_labels, slot_preds_list, out_slot_label_list)\n results.update(total_result)\n int_len_gold = np.array(int_len_gold)\n int_len_pred = np.array(int_len_pred)\n results['num_acc'] = (int_len_gold == int_len_pred).mean()\n results['epoch'] = epoch\n logger.info(\"***** Eval results *****\")\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n if mode == \"test\":\n self.write_evaluation_result(\"eval_test_results.txt\", results)\n elif mode == \"dev\":\n self.write_evaluation_result(\"eval_dev_results.txt\", results)\n return results\n\n def save_model(self):\n # Save model checkpoint (Overwrite)\n if not os.path.exists(self.args.model_dir):\n os.makedirs(self.args.model_dir)\n model_to_save = self.model.module if hasattr(self.model, \"module\") else self.model\n torch.save(model_to_save, os.path.join(self.args.model_dir, 'model.bin'))\n\n # Save training arguments together with the trained model\n torch.save(self.args, os.path.join(self.args.model_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", self.args.model_dir)\n\n def load_model(self):\n # Check whether model exists\n if not os.path.exists(self.args.model_dir):\n raise Exception(\"Model doesn't exists! Train first!\")\n\n try:\n self.model.load_state_dict(torch.load(os.path.join(self.args.model_dir, 'model.bin')), strict=False)\n self.model.to(self.device)\n logger.info(\"***** Model Loaded *****\")\n except Exception:\n raise Exception(\"Some model files might be missing...\")"
},
{
"identifier": "init_logger",
"path": "utils.py",
"snippet": "def init_logger():\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)"
},
{
"identifier": "load_tokenizer",
"path": "utils.py",
"snippet": "def load_tokenizer(args):\n return MODEL_CLASSES[args.model_type][2].from_pretrained(args.model_name_or_path)"
},
{
"identifier": "read_prediction_text",
"path": "utils.py",
"snippet": "def read_prediction_text(args):\n return [text.strip() for text in open(os.path.join(args.pred_dir, args.pred_input_file), 'r', encoding='utf-8')]"
},
{
"identifier": "set_seed",
"path": "utils.py",
"snippet": "def set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if not args.no_cuda and torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)"
},
{
"identifier": "MODEL_CLASSES",
"path": "utils.py",
"snippet": "MODEL_CLASSES = {\n \"lstm\": (None, JointLSTM, None),\n \"roberta\": (RobertaConfig, JointRoberta, RobertaTokenizer)\n}"
},
{
"identifier": "MODEL_PATH_MAP",
"path": "utils.py",
"snippet": "MODEL_PATH_MAP = {\n \"lstm\": \"\",\n \"roberta\": \"roberta-base\"\n}"
},
{
"identifier": "get_intent_labels",
"path": "utils.py",
"snippet": "def get_intent_labels(args):\n return [label.strip() for label in open(os.path.join(args.data_dir, args.task, args.intent_label_file), 'r', encoding='utf-8')]"
},
{
"identifier": "get_slots_all",
"path": "utils.py",
"snippet": "def get_slots_all(args):\n slot_labels = get_slot_labels(args)\n hier = ()\n if args.task == 'mixatis':\n slot_parents = get_clean_labels(args)\n hier = (slot_parents, )\n slot_type = sorted(set([name[2:] for name in slot_labels if name[:2] == 'B-' or name[:2] == 'I-']))\n hier += (slot_type, )\n return slot_labels, hier"
},
{
"identifier": "TextLoader",
"path": "data_loader.py",
"snippet": "class TextLoader(Dataset):\n\n def __init__(self, args, mode):\n self.args = args\n self.intent_labels = get_intent_labels(args)\n self.slot_labels, self.hiers = get_slots_all(args)\n\n self.vocab = Vocab(min_freq=self.args.min_freq)\n self.chars = Vocab()\n self.examples = self.build(mode)\n def load_bert(self, tokenizer):\n pad_token_label_id = self.args.ignore_index\n self.examples = convert_examples_to_features(self.examples, self.args.max_seq_len, tokenizer,\n pad_token_label_id=pad_token_label_id)\n @classmethod\n def read_file(cls, input_file, quotechar=None):\n \"\"\" Read data file of given path.\n :param file_path: path of data file.\n :return: list of sentence, list of slot and list of intent.\n \"\"\"\n\n texts, slots, intents = [], [], []\n text, slot = [], []\n\n with open(input_file, 'r', encoding=\"utf8\") as fr:\n for line in fr.readlines():\n items = line.strip().split()\n\n if len(items) == 1:\n texts.append(text)\n slots.append(slot)\n if \"/\" not in items[0]:\n intents.append(items)\n else:\n new = items[0].split(\"/\")\n intents.append([new[1]])\n\n # clear buffer lists.\n text, slot = [], []\n\n elif len(items) == 2:\n text.append(items[0].strip())\n slot.append(items[1].strip())\n\n return texts, slots, intents\n\n def _create_examples(self, texts, chars, intents, slots, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for i, (text, char, intent, slot) in enumerate(zip(texts, chars, intents, slots)):\n guid = \"%s-%s\" % (set_type, i)\n # 1. input_text\n words = self.vocab.get_index(text) # Some are spaced twice\n words = [self.vocab.start_index] + words + [self.vocab.end_index]\n # char\n char = self.chars.get_index(char)\n max_char = max([len(x) for x in char])\n for j in range(len(char)):\n char[j] = char[j] + [0] * (max_char - len(char[j]))\n char = [[0] * max_char] + char + [[0] * max_char]\n # 2. intent\n _intent = intent[0].split('#')\n intent_label = [0 for _ in self.intent_labels]\n for _int in _intent:\n idx = self.intent_labels.index(_int) if _int in self.intent_labels else self.intent_labels.index(\"UNK\")\n intent_label[idx] = 1\n # 3. slot\n slot_labels = []\n for s in slot:\n slot_labels.append(self.slot_labels.index(s) if s in self.slot_labels else self.slot_labels.index(\"UNK\"))\n slot_labels = [self.slot_labels.index('PAD')] + slot_labels + [self.slot_labels.index('PAD')]\n assert len(words) == len(slot_labels)\n examples.append(InputExample(guid=guid, words=words, chars=char, intent_label=intent_label, slot_labels=slot_labels, text=text))\n return examples\n\n def build(self, mode):\n data_path = os.path.join(self.args.data_dir, self.args.task, mode + '.txt')\n logger.info(\"LOOKING AT {}\".format(data_path))\n texts, slots, intents = self.read_file(data_path)\n\n chars = []\n max_len = 0\n for text in texts:\n chars.append([])\n for word in text:\n chars[-1].append(list(word))\n\n cache = os.path.join(self.args.data_dir, f'vocab_{self.args.task}')\n if os.path.exists(cache):\n self.vocab.load(cache)\n elif mode == 'train':\n self.vocab.add(texts)\n self.vocab.save(cache)\n cache_chars = os.path.join(self.args.data_dir, f'chars_{self.args.task}')\n if os.path.exists(cache_chars):\n self.chars.load(cache_chars)\n elif mode == 'train':\n self.chars.add(chars)\n self.chars.save(cache_chars)\n \n return self._create_examples(texts=texts,\n chars=chars,\n intents=intents,\n slots=slots,\n set_type=mode)\n \n def __getitem__(self, index):\n example = self.examples[index]\n words = torch.tensor(example.words, dtype=torch.long)\n \n intent = torch.tensor(example.intent_label, dtype=torch.float)\n slot = torch.tensor(example.slot_labels, dtype=torch.long)\n chars = torch.tensor(example.chars, dtype=torch.long)\n\n if 'bert' in self.args.model_type:\n attention_mask = torch.tensor(example.attention_mask, dtype=torch.long)\n token_type_ids = torch.tensor(example.token_type_ids, dtype=torch.long)\n heads = torch.tensor(example.heads, dtype=torch.long)\n return (words, chars, heads, attention_mask, token_type_ids, intent, slot)\n else:\n return (words, chars, intent, slot)\n\n def __len__(self):\n return len(self.examples)"
},
{
"identifier": "TextCollate",
"path": "data_loader.py",
"snippet": "class TextCollate():\n def __init__(self, pad_index, num_intents, max_seq_len):\n self.pad_index = pad_index\n self.num_intents = num_intents\n self.max_seq_len = max_seq_len\n\n def __call__(self, batch):\n \n len_list = [len(x[-1]) for x in batch]\n len_char = [x[1].size(1) for x in batch]\n max_len = max(len_list)\n max_char = max(len_char)\n\n seq_lens = []\n\n bert = len(batch[0]) > 4\n \n char_padded = torch.LongTensor(len(batch), max_len, max_char)\n slot_padded = torch.LongTensor(len(batch), max_len)\n intent = torch.FloatTensor(len(batch), self.num_intents)\n char_padded.zero_()\n intent.zero_()\n slot_padded.zero_()\n \n if not bert:\n text_padded = torch.LongTensor(len(batch), max_len)\n text_padded.zero_()\n \n else:\n input_ids = torch.LongTensor(len(batch), self.max_seq_len)\n attention_mask = torch.LongTensor(len(batch), self.max_seq_len)\n token_type_ids = torch.LongTensor(len(batch), self.max_seq_len)\n heads = torch.LongTensor(len(batch), max_len)\n input_ids.zero_()\n attention_mask.zero_()\n token_type_ids.zero_()\n heads.zero_()\n # Get sorted index of len_list.\n sorted_index = np.argsort(len_list)[::-1]\n\n for i, index in enumerate(sorted_index):\n seq_lens.append(len_list[index])\n intent[i] = batch[index][-2]\n slot = batch[index][-1]\n slot_padded[i, :slot.size(0)] = slot\n char = batch[index][1]\n char_padded[i, :char.size(0), :char.size(1)] = char\n\n if not bert:\n text = batch[index][0]\n text_padded[i, :text.size(0)] = text\n else:\n input_ids[i] = batch[index][0]\n attention_mask[i] = batch[index][3]\n token_type_ids[i] = batch[index][4]\n head = batch[index][2]\n heads[i, :head.size(0)] = head\n if not bert:\n return text_padded, char_padded, intent, slot_padded, torch.tensor(seq_lens, dtype=torch.long)\n else:\n return input_ids, char_padded, heads, attention_mask, token_type_ids, intent, slot_padded, torch.tensor(seq_lens, dtype=torch.long)"
}
] | import argparse
from trainer import Trainer
from utils import init_logger, load_tokenizer, read_prediction_text, set_seed, MODEL_CLASSES, MODEL_PATH_MAP, get_intent_labels, get_slots_all
from data_loader import TextLoader, TextCollate | 7,450 | if args.do_train:
trainer.train()
if args.do_eval:
trainer.load_model()
trainer.evaluate('dev', 0)
trainer.evaluate("test", -1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", default=None, required=True, type=str, help="The name of the task to train")
parser.add_argument("--model_dir", default=None, required=True, type=str, help="Path to save, load model")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--intent_label_file", default="intent_label.txt", type=str, help="Intent Label file")
parser.add_argument("--slot_label_file", default="slot_label.txt", type=str, help="Slot Label file")
parser.add_argument("--slot_label_clean", default="slot_clean.txt", type=str, help="Slot Label file")
parser.add_argument("--logging", default="log.txt", type=str, help="Logging file")
# LAAT
parser.add_argument("--n_levels", default=1, type=int, help="Number of attention")
parser.add_argument("--attention_mode", default=None, type=str)
parser.add_argument("--level_projection_size", default=32, type=int)
parser.add_argument("--d_a", default=-1, type=int)
parser.add_argument("--char_embed", default=64, type=int)
parser.add_argument("--char_out", default=64, type=int)
parser.add_argument("--use_charcnn", action="store_false", help="Whether to use CharCNN")
parser.add_argument("--use_charlstm", action="store_false", help="Whether to use CharLSTM")
parser.add_argument("--word_embedding_dim", default=128, type=int)
parser.add_argument("--encoder_hidden_dim", default=128, type=int)
parser.add_argument("--decoder_hidden_dim", default=256, type=int)
parser.add_argument("--attention_hidden_dim", default=256, type=int)
parser.add_argument("--attention_output_dim", default=256, type=int)
# Config training
parser.add_argument("--model_type", default="bert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument('--seed', type=int, default=1234, help="random seed for initialization")
parser.add_argument("--train_batch_size", default=32, type=int, help="Batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int, help="Batch size for evaluation.")
parser.add_argument("--max_seq_len", default=100, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=50, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--weight_decay", default=0, type=float, help="Weight decay if we apply some.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--dropout_rate", default=0.1, type=float, help="Dropout for fully-connected layers")
parser.add_argument('--logging_steps', type=int, default=-1, help="Log every X updates steps.")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--tuning_metric", default="mean_intent_slot", type=str, help="Metric to save checkpoint")
parser.add_argument("--only_intent", default=0, type=float, help="The first epochs to optimize intent")
parser.add_argument("--ignore_index", default=0, type=int,
help='Specifies a target value that is ignored and does not contribute to the input gradient')
parser.add_argument(
"--token_level",
type=str,
default="word-level",
help="Tokens are at syllable level or word level (Vietnamese) [word-level, syllable-level]",
)
parser.add_argument('--intent_loss_coef', type=float, default=0.5, help='Coefficient for the intent loss.')
parser.add_argument('--aux_loss_coef', type=float, default=0.5, help='Coefficient for the aux task.')
parser.add_argument('--early_stopping', type=float, default=-1, help='Early stopping strategy')
parser.add_argument("--base_model", default=None, type=str, help="The pretrained model path")
parser.add_argument(
"--num_intent_detection",
action="store_true",
help="Whether to use two-stage intent detection",
)
parser.add_argument(
"--auxiliary_tasks",
action="store_true",
help="Whether to optimize with auxiliary tasks",
)
parser.add_argument(
"--slot_decoder_size", type=int, default=512, help="hidden size of attention output vector"
)
parser.add_argument(
"--intent_slot_attn_size", type=int, default=128, help="hidden size of attention output vector"
)
parser.add_argument(
"--min_freq", type=int, default=1, help="Minimum number of frequency to be considered in the vocab"
)
parser.add_argument(
'--intent_slot_attn_type', choices=['coattention', 'attention_flow'],
)
parser.add_argument(
'--embedding_type', choices=['soft', 'hard'], default='soft',
)
parser.add_argument(
"--label_embedding_size", type=int, default=128, help="hidden size of label embedding vector"
)
# CRF option
parser.add_argument("--use_crf", action="store_true", help="Whether to use CRF")
parser.add_argument("--slot_pad_label", default="PAD", type=str, help="Pad token for slot label pad (to be ignore when calculate loss)")
args = parser.parse_args()
|
def main(args):
init_logger()
set_seed(args)
slot_label_lst, hiers = get_slots_all(args)
collate = TextCollate(0, len(get_intent_labels(args)), args.max_seq_len)
train_dataset = TextLoader(args, 'train')
dev_dataset = TextLoader(args, 'dev')
test_dataset = TextLoader(args, 'test')
trainer = Trainer(args, collate, train_dataset, dev_dataset, test_dataset)
if args.do_train:
trainer.train()
if args.do_eval:
trainer.load_model()
trainer.evaluate('dev', 0)
trainer.evaluate("test", -1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", default=None, required=True, type=str, help="The name of the task to train")
parser.add_argument("--model_dir", default=None, required=True, type=str, help="Path to save, load model")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--intent_label_file", default="intent_label.txt", type=str, help="Intent Label file")
parser.add_argument("--slot_label_file", default="slot_label.txt", type=str, help="Slot Label file")
parser.add_argument("--slot_label_clean", default="slot_clean.txt", type=str, help="Slot Label file")
parser.add_argument("--logging", default="log.txt", type=str, help="Logging file")
# LAAT
parser.add_argument("--n_levels", default=1, type=int, help="Number of attention")
parser.add_argument("--attention_mode", default=None, type=str)
parser.add_argument("--level_projection_size", default=32, type=int)
parser.add_argument("--d_a", default=-1, type=int)
parser.add_argument("--char_embed", default=64, type=int)
parser.add_argument("--char_out", default=64, type=int)
parser.add_argument("--use_charcnn", action="store_false", help="Whether to use CharCNN")
parser.add_argument("--use_charlstm", action="store_false", help="Whether to use CharLSTM")
parser.add_argument("--word_embedding_dim", default=128, type=int)
parser.add_argument("--encoder_hidden_dim", default=128, type=int)
parser.add_argument("--decoder_hidden_dim", default=256, type=int)
parser.add_argument("--attention_hidden_dim", default=256, type=int)
parser.add_argument("--attention_output_dim", default=256, type=int)
# Config training
parser.add_argument("--model_type", default="bert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument('--seed', type=int, default=1234, help="random seed for initialization")
parser.add_argument("--train_batch_size", default=32, type=int, help="Batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int, help="Batch size for evaluation.")
parser.add_argument("--max_seq_len", default=100, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=50, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--weight_decay", default=0, type=float, help="Weight decay if we apply some.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--dropout_rate", default=0.1, type=float, help="Dropout for fully-connected layers")
parser.add_argument('--logging_steps', type=int, default=-1, help="Log every X updates steps.")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--tuning_metric", default="mean_intent_slot", type=str, help="Metric to save checkpoint")
parser.add_argument("--only_intent", default=0, type=float, help="The first epochs to optimize intent")
parser.add_argument("--ignore_index", default=0, type=int,
help='Specifies a target value that is ignored and does not contribute to the input gradient')
parser.add_argument(
"--token_level",
type=str,
default="word-level",
help="Tokens are at syllable level or word level (Vietnamese) [word-level, syllable-level]",
)
parser.add_argument('--intent_loss_coef', type=float, default=0.5, help='Coefficient for the intent loss.')
parser.add_argument('--aux_loss_coef', type=float, default=0.5, help='Coefficient for the aux task.')
parser.add_argument('--early_stopping', type=float, default=-1, help='Early stopping strategy')
parser.add_argument("--base_model", default=None, type=str, help="The pretrained model path")
parser.add_argument(
"--num_intent_detection",
action="store_true",
help="Whether to use two-stage intent detection",
)
parser.add_argument(
"--auxiliary_tasks",
action="store_true",
help="Whether to optimize with auxiliary tasks",
)
parser.add_argument(
"--slot_decoder_size", type=int, default=512, help="hidden size of attention output vector"
)
parser.add_argument(
"--intent_slot_attn_size", type=int, default=128, help="hidden size of attention output vector"
)
parser.add_argument(
"--min_freq", type=int, default=1, help="Minimum number of frequency to be considered in the vocab"
)
parser.add_argument(
'--intent_slot_attn_type', choices=['coattention', 'attention_flow'],
)
parser.add_argument(
'--embedding_type', choices=['soft', 'hard'], default='soft',
)
parser.add_argument(
"--label_embedding_size", type=int, default=128, help="hidden size of label embedding vector"
)
# CRF option
parser.add_argument("--use_crf", action="store_true", help="Whether to use CRF")
parser.add_argument("--slot_pad_label", default="PAD", type=str, help="Pad token for slot label pad (to be ignore when calculate loss)")
args = parser.parse_args()
| args.model_name_or_path = MODEL_PATH_MAP[args.model_type] | 6 | 2023-11-29 16:47:05+00:00 | 12k |
ai2cm/ace | fme/fme/core/aggregator/inference/main.py | [
{
"identifier": "MeanAggregator",
"path": "fme/fme/core/aggregator/one_step/reduced.py",
"snippet": "class MeanAggregator:\n \"\"\"\n Aggregator for mean-reduced metrics.\n\n These are metrics such as means which reduce to a single float for each batch,\n and then can be averaged across batches to get a single float for the\n entire dataset. This is important because the aggregator uses the mean to combine\n metrics across batches and processors.\n \"\"\"\n\n def __init__(\n self,\n area_weights: torch.Tensor,\n target_time: int = 1,\n dist: Optional[Distributed] = None,\n ):\n self._area_weights = area_weights\n self._shape_x = None\n self._shape_y = None\n self._n_batches = 0\n self._loss = torch.tensor(0.0, device=get_device())\n self._variable_metrics: Optional[Dict[str, Dict[str, ReducedMetric]]] = None\n self._target_time = target_time\n if dist is None:\n self._dist = Distributed.get_instance()\n else:\n self._dist = dist\n\n def _get_variable_metrics(self, gen_data: Mapping[str, torch.Tensor]):\n if self._variable_metrics is None:\n self._variable_metrics = {\n \"l1\": {},\n \"weighted_rmse\": {},\n \"weighted_bias\": {},\n \"weighted_grad_mag_percent_diff\": {},\n }\n device = get_device()\n for key in gen_data:\n self._variable_metrics[\"l1\"][key] = L1Loss(device=device)\n self._variable_metrics[\"weighted_rmse\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=self._area_weights,\n device=device,\n compute_metric=metrics.root_mean_squared_error,\n )\n self._variable_metrics[\"weighted_bias\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=self._area_weights,\n device=device,\n compute_metric=metrics.weighted_mean_bias,\n )\n self._variable_metrics[\"weighted_grad_mag_percent_diff\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=self._area_weights,\n device=device,\n compute_metric=metrics.gradient_magnitude_percent_diff,\n )\n return self._variable_metrics\n\n @torch.no_grad()\n def record_batch(\n self,\n loss: float,\n target_data: Mapping[str, torch.Tensor],\n gen_data: Mapping[str, torch.Tensor],\n target_data_norm: Mapping[str, torch.Tensor],\n gen_data_norm: Mapping[str, torch.Tensor],\n i_time_start: int = 0,\n ):\n self._loss += loss\n variable_metrics = self._get_variable_metrics(gen_data)\n time_dim = 1\n time_len = gen_data[list(gen_data.keys())[0]].shape[time_dim]\n target_time = self._target_time - i_time_start\n if target_time >= 0 and time_len > target_time:\n for name in gen_data.keys():\n target = target_data[name].select(dim=time_dim, index=target_time)\n gen = gen_data[name].select(dim=time_dim, index=target_time)\n for metric in variable_metrics:\n variable_metrics[metric][name].record(\n target=target,\n gen=gen,\n )\n # only increment n_batches if we actually recorded a batch\n self._n_batches += 1\n\n def _get_data(self):\n if self._variable_metrics is None or self._n_batches == 0:\n raise ValueError(\"No batches have been recorded.\")\n data: Dict[str, Union[float, torch.Tensor]] = {\n \"loss\": self._loss / self._n_batches\n }\n for metric in self._variable_metrics:\n for key in self._variable_metrics[metric]:\n data[f\"{metric}/{key}\"] = (\n self._variable_metrics[metric][key].get() / self._n_batches\n )\n for key in sorted(data.keys()):\n data[key] = float(self._dist.reduce_mean(data[key].detach()).cpu().numpy())\n return data\n\n @torch.no_grad()\n def get_logs(self, label: str):\n \"\"\"\n Returns logs as can be reported to WandB.\n\n Args:\n label: Label to prepend to all log keys.\n \"\"\"\n return {\n f\"{label}/{key}\": data for key, data in sorted(self._get_data().items())\n }\n\n @torch.no_grad()\n def get_dataset(self) -> xr.Dataset:\n data = self._get_data()\n data = {key.replace(\"/\", \"-\"): data[key] for key in data}\n data_vars = {}\n for key, value in data.items():\n data_vars[key] = xr.DataArray(value)\n return xr.Dataset(data_vars=data_vars)"
},
{
"identifier": "MeanAggregator",
"path": "fme/fme/core/aggregator/inference/reduced.py",
"snippet": "class MeanAggregator:\n def __init__(\n self,\n area_weights: torch.Tensor,\n target: Literal[\"norm\", \"denorm\"],\n n_timesteps: int,\n dist: Optional[Distributed] = None,\n metadata: Optional[Mapping[str, VariableMetadata]] = None,\n ):\n self._area_weights = area_weights\n self._variable_metrics: Optional[Dict[str, Dict[str, MeanMetric]]] = None\n self._shape_x = None\n self._shape_y = None\n self._target = target\n self._n_timesteps = n_timesteps\n\n if dist is None:\n self._dist = Distributed.get_instance()\n else:\n self._dist = dist\n if metadata is None:\n self._metadata: Mapping[str, VariableMetadata] = {}\n else:\n self._metadata = metadata\n\n def _get_variable_metrics(self, gen_data: Mapping[str, torch.Tensor]):\n if self._variable_metrics is None:\n self._variable_metrics = {\n \"weighted_rmse\": {},\n \"weighted_grad_mag_percent_diff\": {},\n \"weighted_mean_gen\": {},\n \"weighted_mean_target\": {},\n \"weighted_bias\": {},\n \"weighted_std_gen\": {},\n }\n device = get_device()\n area_weights = self._area_weights\n for key in gen_data:\n self._variable_metrics[\"weighted_rmse\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=self._area_weights,\n device=device,\n compute_metric=metrics.root_mean_squared_error,\n n_timesteps=self._n_timesteps,\n )\n self._variable_metrics[\"weighted_grad_mag_percent_diff\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=self._area_weights,\n device=device,\n compute_metric=metrics.gradient_magnitude_percent_diff,\n n_timesteps=self._n_timesteps,\n )\n self._variable_metrics[\"weighted_mean_gen\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=area_weights,\n device=device,\n compute_metric=compute_metric_on(\n source=\"gen\", metric=metrics.weighted_mean\n ),\n n_timesteps=self._n_timesteps,\n )\n self._variable_metrics[\"weighted_mean_target\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=area_weights,\n device=device,\n compute_metric=compute_metric_on(\n source=\"target\", metric=metrics.weighted_mean\n ),\n n_timesteps=self._n_timesteps,\n )\n self._variable_metrics[\"weighted_bias\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=area_weights,\n device=device,\n compute_metric=metrics.weighted_mean_bias,\n n_timesteps=self._n_timesteps,\n )\n self._variable_metrics[\"weighted_std_gen\"][\n key\n ] = AreaWeightedReducedMetric(\n area_weights=area_weights,\n device=device,\n compute_metric=compute_metric_on(\n source=\"gen\", metric=metrics.weighted_std\n ),\n n_timesteps=self._n_timesteps,\n )\n\n return self._variable_metrics\n\n @torch.no_grad()\n def record_batch(\n self,\n loss: float,\n target_data: Mapping[str, torch.Tensor],\n gen_data: Mapping[str, torch.Tensor],\n target_data_norm: Mapping[str, torch.Tensor],\n gen_data_norm: Mapping[str, torch.Tensor],\n i_time_start: int = 0,\n ):\n if self._target == \"norm\":\n target_data = target_data_norm\n gen_data = gen_data_norm\n variable_metrics = self._get_variable_metrics(gen_data)\n for name in gen_data.keys():\n for metric in variable_metrics:\n variable_metrics[metric][name].record(\n target=target_data[name],\n gen=gen_data[name],\n i_time_start=i_time_start,\n )\n\n def _get_series_data(self) -> List[_SeriesData]:\n \"\"\"Converts internally stored variable_metrics to a list.\"\"\"\n if self._variable_metrics is None:\n raise ValueError(\"No batches have been recorded.\")\n data: List[_SeriesData] = []\n for metric in self._variable_metrics:\n for key in self._variable_metrics[metric]:\n arr = self._variable_metrics[metric][key].get().detach()\n datum = _SeriesData(\n metric_name=metric,\n var_name=key,\n data=self._dist.reduce_mean(arr).cpu().numpy(),\n )\n data.append(datum)\n return data\n\n @torch.no_grad()\n def get_logs(self, label: str):\n \"\"\"\n Returns logs as can be reported to WandB.\n\n Args:\n label: Label to prepend to all log keys.\n \"\"\"\n logs = {}\n series_data: Dict[str, np.ndarray] = {\n datum.get_wandb_key(): datum.data for datum in self._get_series_data()\n }\n table = data_to_table(series_data)\n logs[f\"{label}/series\"] = table\n return logs\n\n @torch.no_grad()\n def get_dataset(self) -> xr.Dataset:\n \"\"\"\n Returns a dataset representation of the logs.\n \"\"\"\n data_vars = {}\n for datum in self._get_series_data():\n metadata = self._metadata.get(\n datum.var_name, VariableMetadata(\"unknown_units\", datum.var_name)\n )\n data_vars[datum.get_xarray_key()] = xr.DataArray(\n datum.data, dims=[\"forecast_step\"], attrs=metadata._asdict()\n )\n\n n_forecast_steps = len(next(iter(data_vars.values())))\n coords = {\"forecast_step\": np.arange(n_forecast_steps)}\n return xr.Dataset(data_vars=data_vars, coords=coords)"
},
{
"identifier": "TimeMeanAggregator",
"path": "fme/fme/core/aggregator/inference/time_mean.py",
"snippet": "class TimeMeanAggregator:\n \"\"\"Statistics and images on the time-mean state.\n\n This aggregator keeps track of the time-mean state, then computes\n statistics and images on that time-mean state when logs are retrieved.\n \"\"\"\n\n _image_captions = {\n \"bias_map\": \"{name} time-mean bias (generated - target) [{units}]\",\n \"gen_map\": \"{name} time-mean generated [{units}]\",\n }\n\n def __init__(\n self,\n area_weights: torch.Tensor,\n dist: Optional[Distributed] = None,\n metadata: Optional[Mapping[str, VariableMetadata]] = None,\n ):\n \"\"\"\n Args:\n area_weights: Area weights for each grid cell.\n dist: Distributed object to use for communication.\n metadata: Mapping of variable names their metadata that will\n used in generating logged image captions.\n \"\"\"\n self._area_weights = area_weights\n if dist is None:\n self._dist = Distributed.get_instance()\n else:\n self._dist = dist\n if metadata is None:\n self._metadata: Mapping[str, VariableMetadata] = {}\n else:\n self._metadata = metadata\n\n # Dictionaries of tensors of shape [n_lat, n_lon] represnting time means\n self._target_data: Optional[Dict[str, torch.Tensor]] = None\n self._gen_data: Optional[Dict[str, torch.Tensor]] = None\n self._target_data_norm = None\n self._gen_data_norm = None\n self._n_batches = 0\n\n @staticmethod\n def _add_or_initialize_time_mean(\n maybe_dict: Optional[MutableMapping[str, torch.Tensor]],\n new_data: Mapping[str, torch.Tensor],\n ignore_initial: bool = False,\n ) -> Dict[str, torch.Tensor]:\n sample_dim = 0\n time_dim = 1\n if ignore_initial:\n time_slice = slice(1, None)\n else:\n time_slice = slice(0, None)\n if maybe_dict is None:\n d: Dict[str, torch.Tensor] = {\n name: tensor[:, time_slice].mean(dim=time_dim).mean(dim=sample_dim)\n for name, tensor in new_data.items()\n }\n else:\n d = dict(maybe_dict)\n for name, tensor in new_data.items():\n d[name] += tensor[:, time_slice].mean(dim=time_dim).mean(dim=sample_dim)\n return d\n\n @torch.no_grad()\n def record_batch(\n self,\n loss: float,\n target_data: Mapping[str, torch.Tensor],\n gen_data: Mapping[str, torch.Tensor],\n target_data_norm: Mapping[str, torch.Tensor],\n gen_data_norm: Mapping[str, torch.Tensor],\n i_time_start: int = 0,\n ):\n ignore_initial = i_time_start == 0\n self._target_data = self._add_or_initialize_time_mean(\n self._target_data, target_data, ignore_initial\n )\n self._gen_data = self._add_or_initialize_time_mean(\n self._gen_data, gen_data, ignore_initial\n )\n\n # we can ignore time slicing and just treat segments as though they're\n # different batches, because we can assume all time segments have the\n # same length\n self._n_batches += 1\n\n def _get_target_gen_pairs(self) -> List[_TargetGenPair]:\n if self._n_batches == 0 or self._gen_data is None or self._target_data is None:\n raise ValueError(\"No data recorded.\")\n\n ret = []\n for name in self._gen_data.keys():\n gen = self._dist.reduce_mean(self._gen_data[name] / self._n_batches)\n target = self._dist.reduce_mean(self._target_data[name] / self._n_batches)\n ret.append(_TargetGenPair(gen=gen, target=target, name=name))\n return ret\n\n @torch.no_grad()\n def get_logs(self, label: str) -> Dict[str, Union[float, torch.Tensor]]:\n logs = {}\n preds = self._get_target_gen_pairs()\n bias_map_key, gen_map_key = \"bias_map\", \"gen_map\"\n for pred in preds:\n logs.update(\n {\n f\"{bias_map_key}/{pred.name}\": _make_image(\n self._get_caption(bias_map_key, pred.name, pred.bias()),\n pred.bias(),\n ),\n f\"{gen_map_key}/{pred.name}\": _make_image(\n self._get_caption(gen_map_key, pred.name, pred.gen), pred.gen\n ),\n f\"rmse/{pred.name}\": pred.rmse(weights=self._area_weights),\n f\"bias/{pred.name}\": pred.weighted_mean_bias(\n weights=self._area_weights\n ),\n }\n )\n\n if len(label) != 0:\n return {f\"{label}/{key}\": logs[key] for key in logs}\n return logs\n\n def _get_caption(self, key: str, name: str, data: torch.Tensor) -> str:\n if name in self._metadata:\n caption_name = self._metadata[name].long_name\n units = self._metadata[name].units\n else:\n caption_name, units = name, \"unknown_units\"\n caption = self._image_captions[key].format(name=caption_name, units=units)\n caption += f\" vmin={data.min():.4g}, vmax={data.max():.4g}.\"\n return caption\n\n def get_dataset(self) -> xr.Dataset:\n data = {}\n preds = self._get_target_gen_pairs()\n dims = (\"lat\", \"lon\")\n for pred in preds:\n bias_metadata = self._metadata.get(\n pred.name, VariableMetadata(units=\"unknown_units\", long_name=pred.name)\n )._asdict()\n gen_metadata = VariableMetadata(units=\"\", long_name=pred.name)._asdict()\n data.update(\n {\n f\"bias_map-{pred.name}\": xr.DataArray(\n pred.bias().cpu(), dims=dims, attrs=bias_metadata\n ),\n f\"gen_map-{pred.name}\": xr.DataArray(\n pred.gen.cpu(),\n dims=dims,\n attrs=gen_metadata,\n ),\n }\n )\n return xr.Dataset(data)"
},
{
"identifier": "VideoAggregator",
"path": "fme/fme/core/aggregator/inference/video.py",
"snippet": "class VideoAggregator:\n \"\"\"Videos of state evolution.\"\"\"\n\n def __init__(\n self,\n n_timesteps: int,\n enable_extended_videos: bool,\n dist: Optional[Distributed] = None,\n metadata: Optional[Mapping[str, VariableMetadata]] = None,\n ):\n \"\"\"\n Args:\n n_timesteps: Number of timesteps of inference that will be run.\n enable_extended_videos: Whether to log videos of statistical\n metrics of state evolution\n dist: Distributed object to use for metric aggregation.\n metadata: Mapping of variable names their metadata that will\n used in generating logged video captions.\n \"\"\"\n if metadata is None:\n self._metadata: Mapping[str, VariableMetadata] = {}\n else:\n self._metadata = metadata\n self._mean_data = _MeanVideoData(n_timesteps=n_timesteps, dist=dist)\n if enable_extended_videos:\n self._error_data: Optional[_ErrorVideoData] = _ErrorVideoData(\n n_timesteps=n_timesteps, dist=dist\n )\n self._variance_data: Optional[_VarianceVideoData] = _VarianceVideoData(\n n_timesteps=n_timesteps, dist=dist\n )\n self._enable_extended_videos = True\n else:\n self._error_data = None\n self._variance_data = None\n self._enable_extended_videos = False\n\n @torch.no_grad()\n def record_batch(\n self,\n loss: float,\n target_data: Mapping[str, torch.Tensor],\n gen_data: Mapping[str, torch.Tensor],\n target_data_norm: Optional[Mapping[str, torch.Tensor]] = None,\n gen_data_norm: Optional[Mapping[str, torch.Tensor]] = None,\n i_time_start: int = 0,\n ):\n del target_data_norm, gen_data_norm # intentionally unused\n self._mean_data.record_batch(\n target_data=target_data,\n gen_data=gen_data,\n i_time_start=i_time_start,\n )\n if self._error_data is not None:\n self._error_data.record_batch(\n target_data=target_data,\n gen_data=gen_data,\n i_time_start=i_time_start,\n )\n if self._variance_data is not None:\n self._variance_data.record_batch(\n target_data=target_data,\n gen_data=gen_data,\n i_time_start=i_time_start,\n )\n\n @torch.no_grad()\n def get_logs(self, label: str):\n \"\"\"\n Returns logs as can be reported to WandB.\n\n Args:\n label: Label to prepend to all log keys.\n \"\"\"\n data = self._get_data()\n videos = {}\n for sub_label, d in data.items():\n videos[f\"{label}/{sub_label}\"] = d.make_video()\n return videos\n\n @torch.no_grad()\n def _get_data(self) -> Mapping[str, _MaybePairedVideoData]:\n \"\"\"\n Returns video data as can be reported to WandB.\n\n Args:\n label: Label to prepend to all log keys.\n \"\"\"\n gen_data, target_data = self._mean_data.get()\n video_data = {}\n\n def get_units(name: str) -> Optional[str]:\n if name in self._metadata:\n return self._metadata[name].units\n else:\n return None\n\n def get_long_name(name: str) -> Optional[str]:\n if name in self._metadata:\n return self._metadata[name].long_name\n else:\n return None\n\n for name in gen_data:\n video_data[name] = _MaybePairedVideoData(\n caption=self._get_caption(name),\n gen=gen_data[name],\n target=target_data[name],\n units=get_units(name),\n long_name=f\"ensemble mean of {get_long_name(name)}\",\n )\n if self._enable_extended_videos:\n video_data[f\"bias/{name}\"] = _MaybePairedVideoData(\n caption=(f\"prediction - target for {name}\"),\n gen=gen_data[name] - target_data[name],\n units=get_units(name),\n long_name=f\"bias of {get_long_name(name)}\",\n )\n if self._error_data is not None:\n data = self._error_data.get()\n for name in data.rmse:\n video_data[f\"rmse/{name}\"] = _MaybePairedVideoData(\n caption=f\"RMSE over ensemble for {name}\",\n gen=data.rmse[name],\n units=get_units(name),\n long_name=f\"root mean squared error of {get_long_name(name)}\",\n )\n for name in data.min_err:\n video_data[f\"min_err/{name}\"] = _MaybePairedVideoData(\n caption=f\"Min across ensemble members of min error for {name}\",\n gen=data.min_err[name],\n units=get_units(name),\n long_name=(\n f\"min error of {get_long_name(name)} \" \"across ensemble members\"\n ),\n )\n for name in data.max_err:\n video_data[f\"max_err/{name}\"] = _MaybePairedVideoData(\n caption=f\"Max across ensemble members of max error for {name}\",\n gen=data.max_err[name],\n units=get_units(name),\n long_name=(\n f\"max error of {get_long_name(name)} \" \"across ensemble members\"\n ),\n )\n if self._variance_data is not None:\n gen_data, target_data = self._variance_data.get()\n for name in gen_data:\n video_data[f\"gen_var/{name}\"] = _MaybePairedVideoData(\n caption=(\n f\"Variance of gen data for {name} \"\n \"as fraction of target variance\"\n ),\n gen=gen_data[name] / target_data[name],\n units=\"\",\n long_name=(\n f\"prediction variance of {get_long_name(name)} \"\n \"as fraction of target variance\"\n ),\n )\n return video_data\n\n @torch.no_grad()\n def get_dataset(self) -> xr.Dataset:\n \"\"\"\n Return video data as an xarray Dataset.\n \"\"\"\n data = self._get_data()\n video_data = {}\n for label, d in data.items():\n label = label.strip(\"/\").replace(\"/\", \"_\") # remove leading slash\n attrs = {}\n if d.units is not None:\n attrs[\"units\"] = d.units\n if d.long_name is not None:\n attrs[\"long_name\"] = d.long_name\n if d.target is not None:\n video_data[label] = xr.DataArray(\n data=np.concatenate(\n [d.gen.cpu().numpy()[None, :], d.target.cpu().numpy()[None, :]],\n axis=0,\n ),\n dims=(\"source\", \"timestep\", \"lat\", \"lon\"),\n attrs=attrs,\n )\n else:\n video_data[label] = xr.DataArray(\n data=d.gen.cpu().numpy(),\n dims=(\"timestep\", \"lat\", \"lon\"),\n attrs=attrs,\n )\n return xr.Dataset(video_data)\n\n def _get_caption(self, name: str) -> str:\n caption = (\n \"Autoregressive (left) prediction and (right) target for {name} [{units}]\"\n )\n if name in self._metadata:\n caption_name = self._metadata[name].long_name\n units = self._metadata[name].units\n else:\n caption_name, units = name, \"unknown units\"\n return caption.format(name=caption_name, units=units)"
},
{
"identifier": "ZonalMeanAggregator",
"path": "fme/fme/core/aggregator/inference/zonal_mean.py",
"snippet": "class ZonalMeanAggregator:\n \"\"\"Images of the zonal-mean state as a function of latitude and time.\n\n This aggregator keeps track of the generated and target zonal-mean state,\n then generates zonal-mean (Hovmoller) images when logs are retrieved.\n The zonal-mean images are averaged across the sample dimension.\n \"\"\"\n\n _captions = {\n \"error\": (\n \"{name} zonal-mean error (generated - target) [{units}], \"\n \"x-axis is time increasing to right, y-axis is latitude increasing upward\"\n ),\n \"gen\": (\n \"{name} zonal-mean generated [{units}], \"\n \"x-axis is time increasing to right, y-axis is latitude increasing upward\"\n ),\n }\n\n def __init__(\n self,\n n_timesteps: int,\n dist: Optional[Distributed] = None,\n metadata: Optional[Mapping[str, VariableMetadata]] = None,\n ):\n \"\"\"\n Args:\n n_timesteps: Number of timesteps of inference that will be run.\n dist: Distributed object to use for communication.\n metadata: Mapping of variable names their metadata that will\n used in generating logged image captions.\n \"\"\"\n self._n_timesteps = n_timesteps\n if dist is None:\n self._dist = Distributed.get_instance()\n else:\n self._dist = dist\n if metadata is None:\n self._metadata: Mapping[str, VariableMetadata] = {}\n else:\n self._metadata = metadata\n\n self._target_data: Optional[Dict[str, torch.Tensor]] = None\n self._gen_data: Optional[Dict[str, torch.Tensor]] = None\n self._n_batches = torch.zeros(\n n_timesteps, dtype=torch.int32, device=get_device()\n )[\n None, :, None\n ] # sample, time, lat\n\n def record_batch(\n self,\n loss: float,\n target_data: Mapping[str, torch.Tensor],\n gen_data: Mapping[str, torch.Tensor],\n target_data_norm: Mapping[str, torch.Tensor],\n gen_data_norm: Mapping[str, torch.Tensor],\n i_time_start: int,\n ):\n lon_dim = 3\n if self._target_data is None:\n self._target_data = self._initialize_zeros_zonal_mean_from_batch(\n target_data, self._n_timesteps\n )\n if self._gen_data is None:\n self._gen_data = self._initialize_zeros_zonal_mean_from_batch(\n gen_data, self._n_timesteps\n )\n\n window_steps = next(iter(target_data.values())).shape[1]\n time_slice = slice(i_time_start, i_time_start + window_steps)\n # we can average along longitude without area weighting\n for name, tensor in target_data.items():\n self._target_data[name][:, time_slice, :] += tensor.mean(dim=lon_dim)\n for name, tensor in gen_data.items():\n self._gen_data[name][:, time_slice, :] += tensor.mean(dim=lon_dim)\n self._n_batches[:, time_slice, :] += 1\n\n def _get_data(self) -> Dict[str, _RawData]:\n if self._gen_data is None or self._target_data is None:\n raise RuntimeError(\"No data recorded\")\n sample_dim = 0\n data: Dict[str, _RawData] = {}\n for name in self._gen_data.keys():\n gen = (\n self._dist.reduce_mean(self._gen_data[name] / self._n_batches)\n .mean(sample_dim)\n .cpu()\n .numpy()\n )\n error = (\n self._dist.reduce_mean(\n (self._gen_data[name] - self._target_data[name]) / self._n_batches\n )\n .mean(sample_dim)\n .cpu()\n .numpy()\n )\n\n metadata = self._metadata.get(name, VariableMetadata(\"unknown_units\", name))\n data[f\"gen/{name}\"] = _RawData(\n datum=gen,\n caption=self._get_caption(\"gen\", name, gen),\n # generated data is not considered to have units\n metadata=VariableMetadata(units=\"\", long_name=metadata.long_name),\n )\n data[f\"error/{name}\"] = _RawData(\n datum=error,\n caption=self._get_caption(\"error\", name, error),\n metadata=metadata,\n )\n\n return data\n\n def get_logs(self, label: str) -> Dict[str, Image]:\n logs = {}\n data = self._get_data()\n for key, datum in data.items():\n logs[f\"{label}/{key}\"] = datum.get_image()\n return logs\n\n def get_dataset(self) -> xr.Dataset:\n data = {\n k.replace(\"/\", \"-\"): xr.DataArray(\n v.datum, dims=(\"forecast_step\", \"lat\"), attrs=v.metadata._asdict()\n )\n for k, v in self._get_data().items()\n }\n\n ret = xr.Dataset(data)\n return ret\n\n def _get_caption(self, caption_key: str, varname: str, data: torch.Tensor) -> str:\n if varname in self._metadata:\n caption_name = self._metadata[varname].long_name\n units = self._metadata[varname].units\n else:\n caption_name, units = varname, \"unknown_units\"\n caption = self._captions[caption_key].format(name=caption_name, units=units)\n caption += f\" vmin={data.min():.4g}, vmax={data.max():.4g}.\"\n return caption\n\n @staticmethod\n def _initialize_zeros_zonal_mean_from_batch(\n data: Mapping[str, torch.Tensor], n_timesteps: int, lat_dim: int = 2\n ) -> Dict[str, torch.Tensor]:\n return {\n name: torch.zeros(\n (tensor.shape[0], n_timesteps, tensor.shape[lat_dim]),\n dtype=tensor.dtype,\n device=tensor.device,\n )\n for name, tensor in data.items()\n }"
}
] | from typing import Dict, Iterable, List, Mapping, Optional, Protocol, Union
from wandb import Table
from fme.core.data_loading.typing import SigmaCoordinates, VariableMetadata
from fme.core.distributed import Distributed
from fme.core.wandb import WandB
from ..one_step.reduced import MeanAggregator as OneStepMeanAggregator
from .reduced import MeanAggregator
from .time_mean import TimeMeanAggregator
from .video import VideoAggregator
from .zonal_mean import ZonalMeanAggregator
import torch
import xarray as xr | 7,894 |
wandb = WandB.get_instance()
class _Aggregator(Protocol):
@torch.no_grad()
def record_batch(
self,
loss: float,
target_data: Mapping[str, torch.Tensor],
gen_data: Mapping[str, torch.Tensor],
target_data_norm: Mapping[str, torch.Tensor],
gen_data_norm: Mapping[str, torch.Tensor],
):
...
@torch.no_grad()
def get_logs(self, label: str):
...
@torch.no_grad()
def get_dataset(self) -> xr.Dataset:
...
class InferenceAggregator:
"""
Aggregates statistics for inference.
To use, call `record_batch` on the results of each batch, then call
`get_logs` to get a dictionary of statistics when you're done.
"""
def __init__(
self,
area_weights: torch.Tensor,
sigma_coordinates: SigmaCoordinates,
n_timesteps: int,
record_step_20: bool = False,
log_video: bool = False,
enable_extended_videos: bool = False,
log_zonal_mean_images: bool = False,
dist: Optional[Distributed] = None,
metadata: Optional[Mapping[str, VariableMetadata]] = None,
):
"""
Args:
area_weights: Area weights for each grid cell.
sigma_coordinates: Data sigma coordinates
n_timesteps: Number of timesteps of inference that will be run.
record_step_20: Whether to record the mean of the 20th steps.
log_video: Whether to log videos of the state evolution.
enable_extended_videos: Whether to log videos of statistical
metrics of state evolution
log_zonal_mean_images: Whether to log zonal-mean images (hovmollers) with a
time dimension.
dist: Distributed object to use for metric aggregation.
metadata: Mapping of variable names their metadata that will
used in generating logged image captions.
"""
self._aggregators: Dict[str, _Aggregator] = {
|
wandb = WandB.get_instance()
class _Aggregator(Protocol):
@torch.no_grad()
def record_batch(
self,
loss: float,
target_data: Mapping[str, torch.Tensor],
gen_data: Mapping[str, torch.Tensor],
target_data_norm: Mapping[str, torch.Tensor],
gen_data_norm: Mapping[str, torch.Tensor],
):
...
@torch.no_grad()
def get_logs(self, label: str):
...
@torch.no_grad()
def get_dataset(self) -> xr.Dataset:
...
class InferenceAggregator:
"""
Aggregates statistics for inference.
To use, call `record_batch` on the results of each batch, then call
`get_logs` to get a dictionary of statistics when you're done.
"""
def __init__(
self,
area_weights: torch.Tensor,
sigma_coordinates: SigmaCoordinates,
n_timesteps: int,
record_step_20: bool = False,
log_video: bool = False,
enable_extended_videos: bool = False,
log_zonal_mean_images: bool = False,
dist: Optional[Distributed] = None,
metadata: Optional[Mapping[str, VariableMetadata]] = None,
):
"""
Args:
area_weights: Area weights for each grid cell.
sigma_coordinates: Data sigma coordinates
n_timesteps: Number of timesteps of inference that will be run.
record_step_20: Whether to record the mean of the 20th steps.
log_video: Whether to log videos of the state evolution.
enable_extended_videos: Whether to log videos of statistical
metrics of state evolution
log_zonal_mean_images: Whether to log zonal-mean images (hovmollers) with a
time dimension.
dist: Distributed object to use for metric aggregation.
metadata: Mapping of variable names their metadata that will
used in generating logged image captions.
"""
self._aggregators: Dict[str, _Aggregator] = { | "mean": MeanAggregator( | 1 | 2023-11-29 23:08:42+00:00 | 12k |
fzmi/ubdd | models/dino/models/dino/dino.py | [
{
"identifier": "box_ops",
"path": "models/dino/util/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):"
},
{
"identifier": "NestedTensor",
"path": "models/dino/util/misc.py",
"snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }"
},
{
"identifier": "nested_tensor_from_tensor_list",
"path": "models/dino/util/misc.py",
"snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)"
},
{
"identifier": "accuracy",
"path": "models/dino/util/misc.py",
"snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res"
},
{
"identifier": "get_world_size",
"path": "models/dino/util/misc.py",
"snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()"
},
{
"identifier": "interpolate",
"path": "models/dino/util/misc.py",
"snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)"
},
{
"identifier": "is_dist_avail_and_initialized",
"path": "models/dino/util/misc.py",
"snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True"
},
{
"identifier": "inverse_sigmoid",
"path": "models/dino/util/misc.py",
"snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)"
},
{
"identifier": "build_backbone",
"path": "models/dino/models/dino/backbone.py",
"snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone: \n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords: \n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n backbone_freeze_keywords = args.backbone_freeze_keywords\n use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n if args.backbone in ['resnet50', 'resnet101']:\n backbone = Backbone(args.backbone, train_backbone, args.dilation, \n return_interm_indices, \n batch_norm=FrozenBatchNorm2d)\n bb_num_channels = backbone.num_channels\n elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n pretrain_img_size = int(args.backbone.split('_')[-2])\n backbone = build_swin_transformer(args.backbone, \\\n pretrain_img_size=pretrain_img_size, \\\n out_indices=tuple(return_interm_indices), \\\n dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n # freeze some layers\n if backbone_freeze_keywords is not None:\n for name, parameter in backbone.named_parameters():\n for keyword in backbone_freeze_keywords:\n if keyword in name:\n parameter.requires_grad_(False)\n break\n if \"backbone_dir\" in args:\n pretrained_dir = args.backbone_dir\n PTDICT = {\n 'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n 'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n 'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n }\n pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n from collections import OrderedDict\n def key_select_function(keyname):\n if 'head' in keyname:\n return False\n if args.dilation and 'layers.3' in keyname:\n return False\n return True\n _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n print(str(_tmp_st_output))\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n elif args.backbone in ['convnext_xlarge_22k']:\n backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n \n\n assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels \n assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n return model"
},
{
"identifier": "build_matcher",
"path": "models/dino/models/dino/matcher.py",
"snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))"
},
{
"identifier": "DETRsegm",
"path": "models/dino/models/dino/segmentation.py",
"snippet": "class DETRsegm(nn.Module):\n def __init__(self, detr, freeze_detr=False):\n super().__init__()\n self.detr = detr\n\n if freeze_detr:\n for p in self.parameters():\n p.requires_grad_(False)\n\n hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n def forward(self, samples: NestedTensor):\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.detr.backbone(samples)\n\n bs = features[-1].tensors.shape[0]\n\n src, mask = features[-1].decompose()\n assert mask is not None\n src_proj = self.detr.input_proj(src)\n hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n outputs_class = self.detr.class_embed(hs)\n outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.detr.aux_loss:\n out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n # FIXME h_boxes takes the last one computed, keep this in mind\n bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n out[\"pred_masks\"] = outputs_seg_masks\n return out"
},
{
"identifier": "PostProcessPanoptic",
"path": "models/dino/models/dino/segmentation.py",
"snippet": "class PostProcessPanoptic(nn.Module):\n \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n coco panoptic API \"\"\"\n\n def __init__(self, is_thing_map, threshold=0.85):\n \"\"\"\n Parameters:\n is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n the class is a thing (True) or a stuff (False) class\n threshold: confidence threshold: segments with confidence lower than this will be deleted\n \"\"\"\n super().__init__()\n self.threshold = threshold\n self.is_thing_map = is_thing_map\n\n def forward(self, outputs, processed_sizes, target_sizes=None):\n \"\"\" This function computes the panoptic prediction from the model's predictions.\n Parameters:\n outputs: This is a dict coming directly from the model. See the model doc for the content.\n processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n model, ie the size after data augmentation but before batching.\n target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n of each prediction. If left to None, it will default to the processed_sizes\n \"\"\"\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n ):\n # we filter empty queries and detection below threshold\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n\n # It may be that we have several predicted masks for the same stuff class.\n # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda: [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n # This helper function creates the final panoptic segmentation image\n # It also returns the area of the masks that appears on the image\n\n m_id = masks.transpose(0, 1).softmax(-1)\n\n if m_id.shape[-1] == 0:\n # We didn't detect any mask :(\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n\n if dedup:\n # Merge the masks corresponding to the same stuff class\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n final_h, final_w = to_tuple(target_size)\n\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n np_seg_img = (\n torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n )\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n # We know filter empty masks as long as we find some\n while True:\n filtered_small = torch.as_tensor(\n [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n )\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n del cur_classes\n\n with io.BytesIO() as out:\n seg_img.save(out, format=\"PNG\")\n predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n preds.append(predictions)\n return preds"
},
{
"identifier": "PostProcessSegm",
"path": "models/dino/models/dino/segmentation.py",
"snippet": "class PostProcessSegm(nn.Module):\n def __init__(self, threshold=0.5):\n super().__init__()\n self.threshold = threshold\n\n @torch.no_grad()\n def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n assert len(orig_target_sizes) == len(max_target_sizes)\n max_h, max_w = max_target_sizes.max(0)[0].tolist()\n outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n img_h, img_w = t[0], t[1]\n results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n results[i][\"masks\"] = F.interpolate(\n results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n ).byte()\n\n return results"
},
{
"identifier": "dice_loss",
"path": "models/dino/models/dino/segmentation.py",
"snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes"
},
{
"identifier": "build_deformable_transformer",
"path": "models/dino/models/dino/deformable_transformer.py",
"snippet": "def build_deformable_transformer(args):\n decoder_query_perturber = None\n if args.decoder_layer_noise:\n from .utils import RandomBoxPerturber\n decoder_query_perturber=RandomBoxPerturber(\n x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n use_detached_boxes_dec_out = False\n try:\n use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n except:\n use_detached_boxes_dec_out =False\n\n return DeformableTransformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_unicoder_layers=args.unic_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n modulate_hw_attn=True,\n\n deformable_encoder=True,\n deformable_decoder=True,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n use_deformable_box_attn=args.use_deformable_box_attn,\n box_attn_type=args.box_attn_type,\n\n learnable_tgt_init=True,\n decoder_query_perturber=decoder_query_perturber,\n\n add_channel_attention=args.add_channel_attention,\n add_pos_value=args.add_pos_value,\n random_refpoints_xy=args.random_refpoints_xy,\n\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n two_stage_pat_embed=args.two_stage_pat_embed,\n two_stage_add_query_num=args.two_stage_add_query_num,\n two_stage_learn_wh=args.two_stage_learn_wh,\n two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n dec_layer_number=args.dec_layer_number,\n rm_self_attn_layers=None,\n key_aware_type=None,\n layer_share_type=None,\n\n rm_detach=None,\n decoder_sa_type=args.decoder_sa_type,\n module_seq=args.decoder_module_seq,\n\n embed_init_tgt=args.embed_init_tgt,\n use_detached_boxes_dec_out=use_detached_boxes_dec_out\n )"
},
{
"identifier": "sigmoid_focal_loss",
"path": "models/dino/models/dino/utils.py",
"snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1).sum() / num_boxes"
},
{
"identifier": "MLP",
"path": "models/dino/models/dino/utils.py",
"snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x"
},
{
"identifier": "MODULE_BUILD_FUNCS",
"path": "models/dino/models/registry.py",
"snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')"
},
{
"identifier": "prepare_for_cdn",
"path": "models/dino/models/dino/dn_components.py",
"snippet": "def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if training:\n targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n dn_number = 1\n else:\n if dn_number >= 100:\n dn_number = dn_number // (int(max(known_num) * 2))\n elif dn_number < 1:\n dn_number = 1\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t['labels'] for t in targets])\n boxes = torch.cat([t['boxes'] for t in targets])\n batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob\n new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_pad = int(max(known_num))\n\n pad_size = int(single_pad * 2 * dn_number)\n positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part,\n diff).cuda() * box_noise_scale\n known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to('cuda')\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to('cuda')\n if len(known_num):\n map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]\n map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n if i == dn_number - 1:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n else:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n dn_meta = {\n 'pad_size': pad_size,\n 'num_dn_group': dn_number,\n }\n else:\n\n input_query_label = None\n input_query_bbox = None\n attn_mask = None\n dn_meta = None\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta"
},
{
"identifier": "dn_post_process",
"path": "models/dino/models/dino/dn_components.py",
"snippet": "def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n \"\"\"\n post process of dn after output from the transformer\n put the dn part in the dn_meta\n \"\"\"\n if dn_meta and dn_meta['pad_size'] > 0:\n output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n if aux_loss:\n out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n dn_meta['output_known_lbs_bboxes'] = out\n return outputs_class, outputs_coord"
}
] | import copy
import math
import torch
import torch.nn.functional as F
from typing import List
from torch import nn
from torchvision.ops.boxes import nms
from models.dino.util import box_ops
from models.dino.util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized, inverse_sigmoid)
from .backbone import build_backbone
from .matcher import build_matcher
from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,
dice_loss)
from .deformable_transformer import build_deformable_transformer
from .utils import sigmoid_focal_loss, MLP
from ..registry import MODULE_BUILD_FUNCS
from .dn_components import prepare_for_cdn,dn_post_process | 9,380 | # ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR model and criterion classes.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
class DINO(nn.Module):
""" This is the Cross-Attention Detector module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries,
aux_loss=False, iter_update=False,
query_dim=2,
random_refpoints_xy=False,
fix_refpoints_hw=-1,
num_feature_levels=1,
nheads=8,
# two stage
two_stage_type='no', # ['no', 'standard']
two_stage_add_query_num=0,
dec_pred_class_embed_share=True,
dec_pred_bbox_embed_share=True,
two_stage_class_embed_share=True,
two_stage_bbox_embed_share=True,
decoder_sa_type = 'sa',
num_patterns = 0,
dn_number = 100,
dn_box_noise_scale = 0.4,
dn_label_noise_ratio = 0.5,
dn_labelbook_size = 100,
):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
fix_refpoints_hw: -1(default): learn w and h for each box seperately
>0 : given fixed number
-2 : learn a shared w and h
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.num_classes = num_classes
self.hidden_dim = hidden_dim = transformer.d_model
self.num_feature_levels = num_feature_levels
self.nheads = nheads
self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim)
# setting query dim
self.query_dim = query_dim
assert query_dim == 4
self.random_refpoints_xy = random_refpoints_xy
self.fix_refpoints_hw = fix_refpoints_hw
# for dn training
self.num_patterns = num_patterns
self.dn_number = dn_number
self.dn_box_noise_scale = dn_box_noise_scale
self.dn_label_noise_ratio = dn_label_noise_ratio
self.dn_labelbook_size = dn_labelbook_size
# prepare input projection layers
if num_feature_levels > 1:
num_backbone_outs = len(backbone.num_channels)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!"
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.backbone = backbone
self.aux_loss = aux_loss
self.box_pred_damping = box_pred_damping = None
self.iter_update = iter_update
assert iter_update, "Why not iter_update?"
# prepare pred layers
self.dec_pred_class_embed_share = dec_pred_class_embed_share
self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share
# prepare class & box embed
_class_embed = nn.Linear(hidden_dim, num_classes)
| # ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR model and criterion classes.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
class DINO(nn.Module):
""" This is the Cross-Attention Detector module that performs object detection """
def __init__(self, backbone, transformer, num_classes, num_queries,
aux_loss=False, iter_update=False,
query_dim=2,
random_refpoints_xy=False,
fix_refpoints_hw=-1,
num_feature_levels=1,
nheads=8,
# two stage
two_stage_type='no', # ['no', 'standard']
two_stage_add_query_num=0,
dec_pred_class_embed_share=True,
dec_pred_bbox_embed_share=True,
two_stage_class_embed_share=True,
two_stage_bbox_embed_share=True,
decoder_sa_type = 'sa',
num_patterns = 0,
dn_number = 100,
dn_box_noise_scale = 0.4,
dn_label_noise_ratio = 0.5,
dn_labelbook_size = 100,
):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
fix_refpoints_hw: -1(default): learn w and h for each box seperately
>0 : given fixed number
-2 : learn a shared w and h
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.num_classes = num_classes
self.hidden_dim = hidden_dim = transformer.d_model
self.num_feature_levels = num_feature_levels
self.nheads = nheads
self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim)
# setting query dim
self.query_dim = query_dim
assert query_dim == 4
self.random_refpoints_xy = random_refpoints_xy
self.fix_refpoints_hw = fix_refpoints_hw
# for dn training
self.num_patterns = num_patterns
self.dn_number = dn_number
self.dn_box_noise_scale = dn_box_noise_scale
self.dn_label_noise_ratio = dn_label_noise_ratio
self.dn_labelbook_size = dn_labelbook_size
# prepare input projection layers
if num_feature_levels > 1:
num_backbone_outs = len(backbone.num_channels)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!"
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.backbone = backbone
self.aux_loss = aux_loss
self.box_pred_damping = box_pred_damping = None
self.iter_update = iter_update
assert iter_update, "Why not iter_update?"
# prepare pred layers
self.dec_pred_class_embed_share = dec_pred_class_embed_share
self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share
# prepare class & box embed
_class_embed = nn.Linear(hidden_dim, num_classes) | _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) | 16 | 2023-12-04 00:27:58+00:00 | 12k |
yongzhuo/ChatGLM3-SFT | chatglm3_sft/ft_chatglm3/train_deepspeed.py | [
{
"identifier": "CUDA_VISIBLE_DEVICES",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "CUDA_VISIBLE_DEVICES = \"0\""
},
{
"identifier": "USE_TORCH",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "USE_TORCH = \"1\""
},
{
"identifier": "CPU_NUMS",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "CPU_NUMS = \"9\""
},
{
"identifier": "ChatGLMForConditionalGeneration",
"path": "chatglm3_sft/models/modeling_chatglm.py",
"snippet": "_CHECKPOINT_FOR_DOC = \"THUDM/ChatGLM\"\n_CONFIG_FOR_DOC = \"ChatGLMConfig\"\nCHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"THUDM/chatglm3-6b\",\n # See all ChatGLM models at https://huggingface.co/models?filter=chatglm\n]\ndef default_init(cls, *args, **kwargs):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n def __init__(self, config: ChatGLMConfig):\n def forward(self, prefix: torch.Tensor):\ndef split_tensor_along_last_dim(\n tensor: torch.Tensor,\n num_partitions: int,\n contiguous_split_chunks: bool = False,\n) -> List[torch.Tensor]:\n def __init__(self, dim, original_impl=False, device=None, dtype=None):\n def forward_impl(\n self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000\n ):\n def forward(self, max_seq_len, offset=0):\ndef apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:\n def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):\n def forward(self, hidden_states: torch.Tensor):\n def __init__(self, config: ChatGLMConfig, layer_number):\n def forward(self, query_layer, key_layer, value_layer, attention_mask):\n def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):\n def forward(\n self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True\n ):\ndef _config_to_kwargs(args):\n def __init__(self, config: ChatGLMConfig, device=None):\n def swiglu(x):\n def forward(self, hidden_states):\n def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n def forward(\n self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,\n ):\n def __init__(self, config: ChatGLMConfig, device=None):\n def build_layer(layer_number):\n def _get_layer(self, layer_number):\n def forward(\n self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,\n use_cache: Optional[bool] = True,\n output_hidden_states: Optional[bool] = False,\n ):\n def _init_weights(self, module):\n def get_masks(self, input_ids, past_key_values, padding_mask=None):\n def get_position_ids(self, input_ids, device):\n def _set_gradient_checkpointing(self, module, value=False):\n def __init__(self, config: ChatGLMConfig, device=None):\n def forward(self, input_ids):\n def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):\n def get_input_embeddings(self):\n def set_input_embeddings(self, new_embeddings: torch.Tensor):\n def get_prompt(self, batch_size, device, dtype=torch.half):\n def forward(\n self,\n input_ids,\n position_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.BoolTensor] = None,\n full_attention_mask: Optional[torch.BoolTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n def quantize(self, weight_bit_width: int):\n def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n def _update_model_kwargs_for_generation(\n self,\n outputs: ModelOutput,\n model_kwargs: Dict[str, Any],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n ) -> Dict[str, Any]:\n def prepare_inputs_for_generation(\n self,\n input_ids: torch.LongTensor,\n past_key_values: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n is_first_forward: bool = True,\n **kwargs\n ) -> dict:\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n full_attention_mask: Optional[torch.BoolTensor] = None,\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n labels: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n return_last_logit: Optional[bool] = False,\n ):\n def _reorder_cache(\n past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:\n def process_response(self, output, history):\n def tool_call(**kwargs):\n def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, role: str = \"user\",\n max_length: int = 8192, num_beams=1, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,\n **kwargs):\n def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, role: str = \"user\",\n past_key_values=None,max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8,\n logits_processor=None, return_past_key_values=False, **kwargs):\n def stream_generate(\n self,\n input_ids,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n return_past_key_values=False,\n **kwargs,\n ):\n def quantize(self, bits: int, empty_init=False, device=None, **kwargs):\n def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n full_attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n inputs_embeds: Optional[torch.LongTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\nclass PrefixEncoder(torch.nn.Module):\nclass RotaryEmbedding(nn.Module):\nclass RMSNorm(torch.nn.Module):\nclass CoreAttention(torch.nn.Module):\nclass SelfAttention(torch.nn.Module):\nclass MLP(torch.nn.Module):\nclass GLMBlock(torch.nn.Module):\nclass GLMTransformer(torch.nn.Module):\nclass ChatGLMPreTrainedModel(PreTrainedModel):\nclass Embedding(torch.nn.Module):\nclass ChatGLMModel(ChatGLMPreTrainedModel):\nclass ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):\nclass ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):"
},
{
"identifier": "ChatGLMTokenizer",
"path": "chatglm3_sft/models/tokenization_chatglm.py",
"snippet": "class ChatGLMTokenizer(PreTrainedTokenizer):\n vocab_files_names = {\"vocab_file\": \"tokenizer.model\"}\n\n model_input_names = [\"input_ids\", \"attention_mask\", \"position_ids\"]\n\n def __init__(self, vocab_file, padding_side=\"left\", clean_up_tokenization_spaces=False, **kwargs):\n self.name = \"GLMTokenizer\"\n\n self.vocab_file = vocab_file\n self.tokenizer = SPTokenizer(vocab_file)\n self.special_tokens = {\n \"<bos>\": self.tokenizer.bos_id,\n \"<eos>\": self.tokenizer.eos_id,\n \"<pad>\": self.tokenizer.pad_id\n }\n super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)\n\n def get_command(self, token):\n if token in self.special_tokens:\n return self.special_tokens[token]\n assert token in self.tokenizer.special_tokens, f\"{token} is not a special token for {self.name}\"\n return self.tokenizer.special_tokens[token]\n\n @property\n def unk_token(self) -> str:\n return \"<unk>\"\n\n @property\n def pad_token(self) -> str:\n return \"<unk>\"\n\n @property\n def pad_token_id(self):\n return self.get_command(\"<pad>\")\n\n @property\n def eos_token(self) -> str:\n return \"</s>\"\n\n @property\n def eos_token_id(self):\n return self.get_command(\"<eos>\")\n\n @property\n def vocab_size(self):\n return self.tokenizer.n_words\n\n def get_vocab(self):\n \"\"\" Returns vocab as a dict \"\"\"\n vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n def _tokenize(self, text, **kwargs):\n return self.tokenizer.tokenize(text)\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n return self.tokenizer.convert_token_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.tokenizer.convert_id_to_token(index)\n\n def convert_tokens_to_string(self, tokens: List[str]) -> str:\n return self.tokenizer.decode_tokens(tokens)\n\n def save_vocabulary(self, save_directory, filename_prefix=None):\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n filename_prefix (`str`, *optional*):\n An optional prefix to add to the named of the saved files.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(\n save_directory, self.vocab_files_names[\"vocab_file\"]\n )\n else:\n vocab_file = save_directory\n\n with open(self.vocab_file, 'rb') as fin:\n proto_str = fin.read()\n\n with open(vocab_file, \"wb\") as writer:\n writer.write(proto_str)\n\n return (vocab_file,)\n\n def get_prefix_tokens(self):\n prefix_tokens = [self.get_command(\"[gMASK]\"), self.get_command(\"sop\")]\n return prefix_tokens\n\n def build_single_message(self, role, metadata, message):\n assert role in [\"system\", \"user\", \"assistant\", \"observation\"], role\n role_tokens = [self.get_command(f\"<|{role}|>\")] + self.tokenizer.encode(f\"{metadata}\\n\")\n message_tokens = self.tokenizer.encode(message)\n tokens = role_tokens + message_tokens\n return tokens\n\n def build_chat_input(self, query, history=None, role=\"user\"):\n if history is None:\n history = []\n input_ids = []\n for item in history:\n content = item[\"content\"]\n if item[\"role\"] == \"system\" and \"tools\" in item:\n content = content + \"\\n\" + json.dumps(item[\"tools\"], indent=4, ensure_ascii=False)\n input_ids.extend(self.build_single_message(item[\"role\"], item.get(\"metadata\", \"\"), content))\n input_ids.extend(self.build_single_message(role, \"\", query))\n input_ids.extend([self.get_command(\"<|assistant|>\")])\n return self.batch_encode_plus([input_ids], return_tensors=\"pt\", is_split_into_words=True)\n\n def build_inputs_with_special_tokens(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A BERT sequence has the following format:\n\n - single sequence: `[CLS] X [SEP]`\n - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n prefix_tokens = self.get_prefix_tokens()\n token_ids_0 = prefix_tokens + token_ids_0\n if token_ids_1 is not None:\n token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command(\"<eos>\")]\n return token_ids_0\n\n def _pad(\n self,\n encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],\n max_length: Optional[int] = None,\n padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,\n pad_to_multiple_of: Optional[int] = None,\n return_attention_mask: Optional[bool] = None,\n ) -> dict:\n \"\"\"\n Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\n Args:\n encoded_inputs:\n Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n max_length: maximum length of the returned list and optionally padding length (see below).\n Will truncate by taking into account the special tokens.\n padding_strategy: PaddingStrategy to use for padding.\n\n - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n - PaddingStrategy.DO_NOT_PAD: Do not pad\n The tokenizer padding sides are defined in self.padding_side:\n\n - 'left': pads on the left of the sequences\n - 'right': pads on the right of the sequences\n pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n `>= 7.5` (Volta).\n return_attention_mask:\n (optional) Set to False to avoid returning attention mask (default: set to model specifics)\n \"\"\"\n # Load from model defaults\n # assert self.padding_side == \"left\"\n\n required_input = encoded_inputs[self.model_input_names[0]]\n seq_length = len(required_input)\n\n if padding_strategy == PaddingStrategy.LONGEST:\n max_length = len(required_input)\n\n if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n\n needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length\n\n # Initialize attention mask if not present.\n if \"attention_mask\" not in encoded_inputs:\n encoded_inputs[\"attention_mask\"] = [1] * seq_length\n\n if \"position_ids\" not in encoded_inputs:\n encoded_inputs[\"position_ids\"] = list(range(seq_length))\n\n if needs_to_be_padded:\n difference = max_length - len(required_input)\n\n if \"attention_mask\" in encoded_inputs:\n encoded_inputs[\"attention_mask\"] = [0] * difference + encoded_inputs[\"attention_mask\"]\n if \"position_ids\" in encoded_inputs:\n encoded_inputs[\"position_ids\"] = [0] * difference + encoded_inputs[\"position_ids\"]\n encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input\n\n return encoded_inputs"
},
{
"identifier": "PATH_MODEL_PRETRAIN",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "PATH_MODEL_PRETRAIN = \"\""
},
{
"identifier": "DATA_PATH",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "DATA_PATH = \"../dataset/alpaca_gpt4_data_zh.json\""
},
{
"identifier": "MODEL_SAVE_DIR",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "MODEL_SAVE_DIR = \"model_chatglm3_sft\""
},
{
"identifier": "REPO_ID",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "REPO_ID = \"THUDM/chatglm3-6b\""
},
{
"identifier": "MICRO_BATCH_SIZE",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "MICRO_BATCH_SIZE = 4 # default=4 # this could actually be 5 but i like powers of 2"
},
{
"identifier": "BATCH_SIZE",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "BATCH_SIZE = 128"
},
{
"identifier": "GRADIENT_ACCUMULATION_STEPS",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE"
},
{
"identifier": "LEARNING_RATE",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "LEARNING_RATE = 3e-4 # default=3e-4 # the Karpathy constant"
},
{
"identifier": "EPOCHS",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "EPOCHS = 3 # default=3 # we don't always need 3 tbh"
},
{
"identifier": "SAVE_STEPS",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "SAVE_STEPS = 382"
},
{
"identifier": "VAL_SET_SIZE",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "VAL_SET_SIZE = 0"
},
{
"identifier": "TARGET_MODULES",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "TARGET_MODULES = [\"query_key_value\"]"
},
{
"identifier": "IS_PARALLELIZABLE",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "IS_PARALLELIZABLE = False"
},
{
"identifier": "MODEL_PARALLEL",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "MODEL_PARALLEL = False"
},
{
"identifier": "USE_CACHE",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "USE_CACHE = False"
},
{
"identifier": "MAX_LENGTH_Q",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "MAX_LENGTH_Q = 256 - 2 # default=128 - 2 # 512 - 2"
},
{
"identifier": "MAX_LENGTH_A",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "MAX_LENGTH_A = 256 - 2 # default=128 - 2 # 512 - 2"
},
{
"identifier": "MAX_LENGTH_QA",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "MAX_LENGTH_QA = MAX_LENGTH_Q + MAX_LENGTH_A + 4"
},
{
"identifier": "LORA_DROPOUT",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "LORA_DROPOUT = 0.05"
},
{
"identifier": "LORA_ALPHA",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "LORA_ALPHA = 16"
},
{
"identifier": "LORA_R",
"path": "chatglm3_sft/ft_chatglm3/config.py",
"snippet": "LORA_R = 8"
}
] | import random
import copy
import sys
import os
import torch.nn as nn
import transformers
import torch
import torch.distributed as dist
from chatglm3_sft.ft_chatglm3.config import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig)
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.modeling_utils import unwrap_model
from tensorboardX import SummaryWriter
from datasets import load_dataset
from chatglm3_sft.models.modeling_chatglm import ChatGLMForConditionalGeneration, ChatGLMConfig
from chatglm3_sft.models.tokenization_chatglm import ChatGLMTokenizer
from chatglm3_sft.ft_chatglm3.config import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID
from chatglm3_sft.ft_chatglm3.config import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS
from chatglm3_sft.ft_chatglm3.config import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES
from chatglm3_sft.ft_chatglm3.config import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE
from chatglm3_sft.ft_chatglm3.config import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA
from chatglm3_sft.ft_chatglm3.config import LORA_DROPOUT, LORA_ALPHA, LORA_R | 8,091 | # attention_mask[..., idx] = 1
attention_mask = (attention_mask < 0.5).bool()
return attention_mask
len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1
for i in range(len(batch))]
len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch))
batch_attention_mask = []
batch_position_ids = []
batch_input_ids = []
batch_labels = []
for ba in batch:
x, y = ba.get("input_ids"), ba.get("labels")
len_padding = len_max_batch - len(x) - len(y)
# if tokenizer.padding_side and tokenizer.padding_side == "left":
# labels = [-100] * len_padding + [-100] * len(x) + y
# input_ids = [ID_PAD] * (len_padding) + x + y
# else:
# labels = [-100] * len(x) + y + [-100] * len_padding
# input_ids = x + y + [ID_PAD] * (len_padding)
## pretrain, 不mask
if tokenizer.padding_side and tokenizer.padding_side == "left":
labels = [-100] * len_padding + x + y
input_ids = [ID_PAD] * (len_padding) + x + y
else:
labels = x + y + [-100] * len_padding
input_ids = x + y + [ID_PAD] * (len_padding)
tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP)
tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG)
tensor_input_ids = torch.tensor(input_ids, dtype=torch.long)
tensor_labels = torch.tensor(labels, dtype=torch.long)
batch_attention_mask.append(tensor_attention_mask)
batch_position_ids.append(tensor_position_ids)
batch_input_ids.append(tensor_input_ids)
batch_labels.append(tensor_labels)
# print_rank_0(batch_attention_mask)
batch_attention_mask = torch.stack(batch_attention_mask)
batch_position_ids = torch.stack(batch_position_ids)
batch_input_ids = torch.stack(batch_input_ids)
batch_labels = torch.stack(batch_labels)
input_dict = {
"full_attention_mask": copy.deepcopy(batch_attention_mask),
"attention_mask": batch_attention_mask,
"position_ids": batch_position_ids,
"input_ids": batch_input_ids,
"labels": batch_labels,
}
# print_rank_0(input_dict)
return input_dict
def dfs_file(path_dir):
"""
递归获取某个目录下的所有文件(所有层, 包括子目录)
Args:
path_dir[String]:, path of dir, eg. "/home/data"
Returns:
data[List]: data of input, eg. ["2020_01_08.txt"]
"""
path_files = []
for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件
for file in files: # 遍历文件
file_path = os.path.join(root, file) # 获取文件绝对路径
path_files.append(file_path) # 将文件路径添加进列表
files = list(set(path_files))
files.sort() # the same list
return files
def print_rank_0(*args):
""" 只打印 0 号GPU的 """
if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。
print(*args)
def local_rank_is_0():
""" 判断是哪台机子的 """
flag = False
if torch.distributed.get_rank() == 0:
flag = True
return flag
dist.init_process_group(backend="nccl")
tokenizer = ChatGLMTokenizer.from_pretrained(PATH_MODEL_PRETRAIN)
# tokenizer.pad_token = tokenizer.eos_token
# tokenizer.padding_side = "left" # Allow batched inference
tokenizer.padding_side = "right" # Allow batched inference
ID_MASK = 64789
ID_gMASK = 64790
ID_sMASK = 64791
ID_SOP = 64792
ID_EOP = 64793
ID_PAD = 64793
ID_system = 64794
ID_user = 64795
ID_assistant = 64796
ID_observation = 64797
TOKEN_ASSISTANT = "<|assistant|>"
TOKEN_SYSTEM = "<|system|>"
TOKEN_USER = "<|user|>"
TOKEN_START = "sop"
TOKEN_END = "eop"
# IDS_ORG = [ID_MASK, ID_gMASK, ID_sMASK, ID_SOP, ID_EOP,
# ID_BOS, ID_EOS, ID_PAD]
# IDS_ORG = [ID_PAD]
IDS_ORG = [ID_user] # start, <|user|>
model = ChatGLMForConditionalGeneration.from_pretrained(PATH_MODEL_PRETRAIN)
model = prepare_model_for_half_training(model,
use_gradient_checkpointing=True,
output_embedding_layer_name="lm_head",
layer_norm_names=["post_attention_layernorm",
"final_layernorm",
"input_layernorm",
],
)
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
model.is_parallelizable = IS_PARALLELIZABLE
model.model_parallel = MODEL_PARALLEL
model.config.use_cache = USE_CACHE
| # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2023/3/5 21:04
# @author : Mo
# @function: chatglm3
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
# print_rank_0(path_root)
sys.path.append(path_root)
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072"
# os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES
os.environ["USE_TORCH"] = USE_TORCH
os.environ["OMP_NUM_THREADS"] = CPU_NUMS # export OMP_NUM_THREADS=1
os.environ["OPENBLAS_NUM_THREADS"] = CPU_NUMS # export OPENBLAS_NUM_THREADS=1
os.environ["MKL_NUM_THREADS"] = CPU_NUMS # export MKL_NUM_THREADS=1
os.environ["VECLIB_MAXIMUM_THREADS"] = CPU_NUMS # export VECLIB_MAXIMUM_THREADS=1
os.environ["NUMEXPR_NUM_THREADS"] = CPU_NUMS # export NUMEXPR_NUM_THREADS=1
# import bitsandbytes as bnb
def save_model_state(model, config=None, model_save_dir="./", model_name="adapter_model.bin"):
""" 仅保存 有梯度 的 模型参数(推荐使用) """
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
# save config
if config:
config.save_pretrained(model_save_dir)
# config.to_dict()
# save model
path_model = os.path.join(model_save_dir, model_name)
grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters()
if v.requires_grad == True}
torch.save(grad_params_dict, path_model)
print_rank_0("******model_save_path is {}******".format(path_model))
def print_rank_0_named_parameters(model, use_print_rank_0_data=False):
""" 打印模型训练参数/数据类型信息 """
trainable_params = 0
all_param = 0
for name, param in model.named_parameters():
if use_print_rank_0_data:
print_rank_0((name, param.data.dtype, param.requires_grad, param.data))
else:
print_rank_0((name, param.data.dtype, param.requires_grad))
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
num_params = param.ds_numel
all_param += num_params
if param.requires_grad:
trainable_params += num_params
print_rank_0(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}")
def prepare_model_for_half_training(model, output_embedding_layer_name="lm_head",
use_gradient_checkpointing=True, layer_norm_names=["layer_norm"]):
r"""
This method wrapps the entire protocol for preparing a model before running a training. This includes:
1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm
head to fp32
Args:
model, (`transformers.PreTrainedModel`):
The loaded model from `transformers`
"""
# 不要使用 model.half(), 这样会先截取精度再训练了, 最初data就要保持half
for name, param in model.named_parameters():
# freeze base model's layers
param.requires_grad = False
# cast layer norm in fp32 for stability for 8bit models
if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names):
param.data = param.data.to(torch.float32)
elif output_embedding_layer_name in name: # lm_head也需要是tf.float32(最后一层)
param.data = param.data.to(torch.float32)
else:
param.data = param.data.to(torch.half)
if use_gradient_checkpointing:
# For backward compatibility
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
# enable gradient checkpointing for memory efficiency
model.gradient_checkpointing_enable()
return model
def generate_prompt(data_point, is_logger=False):
# sorry about the formatting disaster gotta move fast
# text_1 = f"指令:\n{data_point.get('instruction', '')}\n问:\n{data_point.get('input', '')}\n答:\n" \
# if data_point.get('input', '') else f"指令:\n{data_point.get('instruction', '')}\n答:\n"
# text_2 = f"{data_point.get('output', '')}"
prompt_system_std = "You are a helpful, safety and harmless assistant."
prompt_system_std = "You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown."
instruction = data_point.get("instruction", "").strip()
text_input = instruction + " " + data_point.get("input", "").strip()
text_output = data_point.get("output", "").strip()
history = data_point.get("history", "")
string_system = TOKEN_SYSTEM + "\n" + prompt_system_std + "\n"
string_output = TOKEN_USER + "\n" + text_input + "\n" + \
TOKEN_ASSISTANT + "\n" + text_output + "\n"
ids_prompt_system = [ID_gMASK, ID_SOP, ID_system] + tokenizer.encode(
"\n" + instruction + "\n", is_split_into_words=True)[2:]
ids_text_input = [ID_user] + tokenizer.encode("\n" + text_input + "\n",
is_split_into_words=True)[2:]
ids_text_output = [ID_assistant] + tokenizer.encode("\n" + text_output + "\n",
is_split_into_words=True)[2:]
string_history = ""
ids_history = []
for idx, item in enumerate(history):
content = item
if idx % 2 == 0:
id_role = ID_user
token_role = TOKEN_USER
else:
id_role = ID_assistant
token_role = TOKEN_ASSISTANT
string_history += token_role + "\n" + content + "\n"
id_history = [id_role] + tokenizer.encode("\n" + content + "\n",
is_split_into_words=True)[2:]
ids_history.extend(id_history)
### 全量加入x, 同预训练(为了与多轮对话数据构造一致)
ids_prompt_input = ids_prompt_system + ids_history + ids_text_input + ids_text_output
string_print = string_system + string_history + string_output
if len(ids_prompt_input) > (MAX_LENGTH_Q + MAX_LENGTH_A):
# if > len_max, clip MAX_LENGTH_q and MAX_LENGTH_a
ids_prompt_input = ids_prompt_input[:MAX_LENGTH_Q]
out = {"input_ids": ids_prompt_input, "labels": []}
if is_logger:
print(string_print)
print(out)
return out
def data_collator(batch):
def get_position_ids(seq, bos_token_id):
seq_length = len(seq)
position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0)
return position_ids
def get_masks(seq, special_ids=IDS_ORG):
""" padding-mask """
# mask until ID_user
# context_length = seq.index(ID_user)
attention_mask = torch.ones((1, len(seq), len(seq)))
attention_mask.tril_()
### mask掉id_sop前面的
# attention_mask[..., :context_length] = 1
# ### 如果 padding-right, 也mask掉
# for idx, s in enumerate(seq):
# if s in special_ids:
# attention_mask[..., idx] = 1
attention_mask = (attention_mask < 0.5).bool()
return attention_mask
len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1
for i in range(len(batch))]
len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch))
batch_attention_mask = []
batch_position_ids = []
batch_input_ids = []
batch_labels = []
for ba in batch:
x, y = ba.get("input_ids"), ba.get("labels")
len_padding = len_max_batch - len(x) - len(y)
# if tokenizer.padding_side and tokenizer.padding_side == "left":
# labels = [-100] * len_padding + [-100] * len(x) + y
# input_ids = [ID_PAD] * (len_padding) + x + y
# else:
# labels = [-100] * len(x) + y + [-100] * len_padding
# input_ids = x + y + [ID_PAD] * (len_padding)
## pretrain, 不mask
if tokenizer.padding_side and tokenizer.padding_side == "left":
labels = [-100] * len_padding + x + y
input_ids = [ID_PAD] * (len_padding) + x + y
else:
labels = x + y + [-100] * len_padding
input_ids = x + y + [ID_PAD] * (len_padding)
tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP)
tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG)
tensor_input_ids = torch.tensor(input_ids, dtype=torch.long)
tensor_labels = torch.tensor(labels, dtype=torch.long)
batch_attention_mask.append(tensor_attention_mask)
batch_position_ids.append(tensor_position_ids)
batch_input_ids.append(tensor_input_ids)
batch_labels.append(tensor_labels)
# print_rank_0(batch_attention_mask)
batch_attention_mask = torch.stack(batch_attention_mask)
batch_position_ids = torch.stack(batch_position_ids)
batch_input_ids = torch.stack(batch_input_ids)
batch_labels = torch.stack(batch_labels)
input_dict = {
"full_attention_mask": copy.deepcopy(batch_attention_mask),
"attention_mask": batch_attention_mask,
"position_ids": batch_position_ids,
"input_ids": batch_input_ids,
"labels": batch_labels,
}
# print_rank_0(input_dict)
return input_dict
def dfs_file(path_dir):
"""
递归获取某个目录下的所有文件(所有层, 包括子目录)
Args:
path_dir[String]:, path of dir, eg. "/home/data"
Returns:
data[List]: data of input, eg. ["2020_01_08.txt"]
"""
path_files = []
for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件
for file in files: # 遍历文件
file_path = os.path.join(root, file) # 获取文件绝对路径
path_files.append(file_path) # 将文件路径添加进列表
files = list(set(path_files))
files.sort() # the same list
return files
def print_rank_0(*args):
""" 只打印 0 号GPU的 """
if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。
print(*args)
def local_rank_is_0():
""" 判断是哪台机子的 """
flag = False
if torch.distributed.get_rank() == 0:
flag = True
return flag
dist.init_process_group(backend="nccl")
tokenizer = ChatGLMTokenizer.from_pretrained(PATH_MODEL_PRETRAIN)
# tokenizer.pad_token = tokenizer.eos_token
# tokenizer.padding_side = "left" # Allow batched inference
tokenizer.padding_side = "right" # Allow batched inference
ID_MASK = 64789
ID_gMASK = 64790
ID_sMASK = 64791
ID_SOP = 64792
ID_EOP = 64793
ID_PAD = 64793
ID_system = 64794
ID_user = 64795
ID_assistant = 64796
ID_observation = 64797
TOKEN_ASSISTANT = "<|assistant|>"
TOKEN_SYSTEM = "<|system|>"
TOKEN_USER = "<|user|>"
TOKEN_START = "sop"
TOKEN_END = "eop"
# IDS_ORG = [ID_MASK, ID_gMASK, ID_sMASK, ID_SOP, ID_EOP,
# ID_BOS, ID_EOS, ID_PAD]
# IDS_ORG = [ID_PAD]
IDS_ORG = [ID_user] # start, <|user|>
model = ChatGLMForConditionalGeneration.from_pretrained(PATH_MODEL_PRETRAIN)
model = prepare_model_for_half_training(model,
use_gradient_checkpointing=True,
output_embedding_layer_name="lm_head",
layer_norm_names=["post_attention_layernorm",
"final_layernorm",
"input_layernorm",
],
)
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
model.is_parallelizable = IS_PARALLELIZABLE
model.model_parallel = MODEL_PARALLEL
model.config.use_cache = USE_CACHE | config = LoraConfig(target_modules=TARGET_MODULES, | 16 | 2023-11-30 12:27:42+00:00 | 12k |
yongzhuo/MacroGPT-Pretrain | macro_gpt/ft_gpt/train.pt.py | [
{
"identifier": "CUDA_VISIBLE_DEVICES",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "CUDA_VISIBLE_DEVICES = \"0\""
},
{
"identifier": "USE_TORCH",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "USE_TORCH = \"1\""
},
{
"identifier": "CPU_NUMS",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "CPU_NUMS = \"9\""
},
{
"identifier": "LlamaForCausalLM",
"path": "macro_gpt/models/llama/modeling_llama.py",
"snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n # logits = self.lm_head(hidden_states)\n logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype))\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values is not None:\n past_length = past_key_values[0][0].shape[2]\n\n # Some generation methods already pass only the last input ID\n if input_ids.shape[1] > past_length:\n remove_prefix_length = past_length\n else:\n # Default to old behavior: keep only final ID\n remove_prefix_length = input_ids.shape[1] - 1\n\n input_ids = input_ids[:, remove_prefix_length:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past"
},
{
"identifier": "LlamaTokenizer",
"path": "macro_gpt/models/llama/tokenization_llama.py",
"snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<unk>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<s>\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"</s>\"`):\n The end of sequence token.\n pad_token (`str` or `tokenizers.AddedToken`, *optional*):\n A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by\n attention mechanisms or loss computation.\n sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):\n Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for\n SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,\n to set:\n\n - `enable_sampling`: Enable subword regularization.\n - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.\n\n - `nbest_size = {0,1}`: No sampling is performed.\n - `nbest_size > 1`: samples from the nbest_size results.\n - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)\n using forward-filtering-and-backward-sampling algorithm.\n\n - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for\n BPE-dropout.\n\n add_bos_token (`bool`, *optional*, defaults to `True`):\n Whether or not to add an `bos_token` at the start of sequences.\n add_eos_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add an `eos_token` at the end of sequences.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like\n extra spaces.\n use_default_system_prompt (`bool`, *optional*, defaults to `True`):\n Whether or not the default system prompt for Llama should be used.\n spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to add spaces between special tokens.\n legacy (`bool`, *optional*):\n Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622\n and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple\n example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n use_default_system_prompt=True,\n spaces_between_special_tokens=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This is\"\n \" expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you.\"\n \" If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it\"\n \" means, and thouroughly read the reason why this was added as explained in\"\n \" https://github.com/huggingface/transformers/pull/24565\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.use_default_system_prompt = use_default_system_prompt\n self.sp_model = self.get_spm_processor(kwargs.pop(\"from_slow\", False))\n\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n use_default_system_prompt=use_default_system_prompt,\n spaces_between_special_tokens=spaces_between_special_tokens,\n legacy=legacy,\n **kwargs,\n )\n\n @property\n def unk_token_length(self):\n return len(self.sp_model.encode(str(self.unk_token)))\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor\n def get_spm_processor(self, from_slow=False):\n tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n if self.legacy or from_slow: # no dependency on protobuf\n tokenizer.Load(self.vocab_file)\n return tokenizer\n\n with open(self.vocab_file, \"rb\") as f:\n sp_model = f.read()\n model_pb2 = import_protobuf(f\"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)\")\n model = model_pb2.ModelProto.FromString(sp_model)\n normalizer_spec = model_pb2.NormalizerSpec()\n normalizer_spec.add_dummy_prefix = False\n model.normalizer_spec.MergeFrom(normalizer_spec)\n sp_model = model.SerializeToString()\n tokenizer.LoadFromSerializedProto(sp_model)\n return tokenizer\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", add_special_tokens=False, **kwargs) -> List[str]:\n \"\"\"\n Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the\n first token is special.\n \"\"\"\n if self.legacy or len(text) == 0:\n return super().tokenize(text, **kwargs)\n\n tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \"), **kwargs)\n\n if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:\n tokens = tokens[1:]\n return tokens\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any\n SPIECE_UNDERLINE. For example: `self.sp_model.encode(f\"{SPIECE_UNDERLINE}Hey\", out_type = str)` will give\n `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f\"{unk_token}text\"` and strip the\n `unk_token`. Here is an example with `unk_token = \"<unk>\"` and `unk_token_length = 4`.\n `self.tokenizer.sp_model.encode(\"<unk> Hey\", out_type = str)[4:]`.\n \"\"\"\n tokens = self.sp_model.encode(text, out_type=str)\n if self.legacy or not text.startswith((SPIECE_UNDERLINE, \" \")):\n return tokens\n\n # 1. Encode string + prefix ex: \"<unk> Hey\"\n tokens = self.sp_model.encode(self.unk_token + text, out_type=str)\n # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']\n return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n # since we manually add the prefix space, we have to remove it when decoding\n if tokens[0].startswith(SPIECE_UNDERLINE):\n tokens[0] = tokens[0][1:]\n\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0 and self.legacy:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n @property\n def default_chat_template(self):\n \"\"\"\n LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.\n Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict\n user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering\n rather than needing special tokens. The system message is partly 'embedded' in the first user message, which\n results in an unusual token ordering when it is present. This template should definitely be changed if you wish\n to fine-tune a model with more flexible role ordering!\n\n The output should look something like:\n\n <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos> <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n \"\"\"\n\n template = (\n \"{% if messages[0]['role'] == 'system' %}\"\n \"{% set loop_messages = messages[1:] %}\" # Extract system message if it's present\n \"{% set system_message = messages[0]['content'] %}\"\n \"{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}\"\n \"{% set loop_messages = messages %}\" # Or use the default system message if the flag is set\n \"{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}\"\n \"{% else %}\"\n \"{% set loop_messages = messages %}\"\n \"{% set system_message = false %}\"\n \"{% endif %}\"\n \"{% for message in loop_messages %}\" # Loop over all non-system messages\n \"{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\"\n \"{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\"\n \"{% endif %}\"\n \"{% if loop.index0 == 0 and system_message != false %}\" # Embed system message in first message\n \"{% set content = '<<SYS>>\\\\n' + system_message + '\\\\n<</SYS>>\\\\n\\\\n' + message['content'] %}\"\n \"{% else %}\"\n \"{% set content = message['content'] %}\"\n \"{% endif %}\"\n \"{% if message['role'] == 'user' %}\" # After all of that, handle messages/roles in a fairly normal way\n \"{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}\"\n \"{% elif message['role'] == 'system' %}\"\n \"{{ '<<SYS>>\\\\n' + content.strip() + '\\\\n<</SYS>>\\\\n\\\\n' }}\"\n \"{% elif message['role'] == 'assistant' %}\"\n \"{{ ' ' + content.strip() + ' ' + eos_token }}\"\n \"{% endif %}\"\n \"{% endfor %}\"\n )\n template = template.replace(\"USE_DEFAULT_PROMPT\", \"true\" if self.use_default_system_prompt else \"false\")\n default_message = DEFAULT_SYSTEM_PROMPT.replace(\"\\n\", \"\\\\n\").replace(\"'\", \"\\\\'\")\n template = template.replace(\"DEFAULT_SYSTEM_MESSAGE\", default_message)\n\n return template"
},
{
"identifier": "LlamaConfig",
"path": "macro_gpt/models/llama/modeling_llama.py",
"snippet": "def is_flash_attn_available():\n def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]:\ndef _get_unpad_data(padding_mask):\ndef _make_causal_mask(\n input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0\n):\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n def __init__(self, hidden_size, eps=1e-6):\n def forward(self, hidden_states):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def forward(self, x, seq_len=None):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\ndef rotate_half(x):\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids):\n def __init__(self, config):\n def forward(self, x):\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n def __init__(self, config: LlamaConfig):\n def _init_rope(self):\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def _flash_attention_forward(\n self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None\n ):\n def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length):\n def __init__(self, config: LlamaConfig):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n def _init_weights(self, module):\n def _set_gradient_checkpointing(self, module, value=False):\n def __init__(self, config: LlamaConfig):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def set_decoder(self, decoder):\n def get_decoder(self):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n def _reorder_cache(past_key_values, beam_idx):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n_CONFIG_FOR_DOC = \"LlamaConfig\"\nLLAMA_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`LlamaConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nLLAMA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\nclass LlamaRMSNorm(nn.Module):\nclass LlamaRotaryEmbedding(nn.Module):\nclass LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaMLP(nn.Module):\nclass LlamaAttention(nn.Module):\nclass LlamaFlashAttention2(LlamaAttention):\nclass LlamaDecoderLayer(nn.Module):\nclass LlamaPreTrainedModel(PreTrainedModel):\nclass LlamaModel(LlamaPreTrainedModel):\nclass LlamaForCausalLM(LlamaPreTrainedModel):\nclass LlamaForSequenceClassification(LlamaPreTrainedModel):"
},
{
"identifier": "PATH_MODEL_PRETRAIN",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "PATH_MODEL_PRETRAIN = \"\""
},
{
"identifier": "DATA_PATH",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "DATA_PATH = \"../datasets/tigerbot-train-00001-of-00097.json\""
},
{
"identifier": "MODEL_SAVE_DIR",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "MODEL_SAVE_DIR = \"model_macrogpt_1b3_float32\""
},
{
"identifier": "REPO_ID",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "REPO_ID = \"Macropodus/macrogpt-tokenizer\""
},
{
"identifier": "MICRO_BATCH_SIZE",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "MICRO_BATCH_SIZE = 4 # default=4 # this could actually be 5 but i like powers of 2"
},
{
"identifier": "BATCH_SIZE",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "BATCH_SIZE = 128"
},
{
"identifier": "GRADIENT_ACCUMULATION_STEPS",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE"
},
{
"identifier": "LEARNING_RATE",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "LEARNING_RATE = 3e-4 # default=3e-4 # the Karpathy constant"
},
{
"identifier": "EPOCHS",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "EPOCHS = 1 # default=3 # we don't always need 3 tbh"
},
{
"identifier": "SAVE_STEPS",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "SAVE_STEPS = 384"
},
{
"identifier": "VAL_SET_SIZE",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "VAL_SET_SIZE = 0"
},
{
"identifier": "TARGET_MODULES",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "TARGET_MODULES = [\"query_key_value\"]"
},
{
"identifier": "IS_PARALLELIZABLE",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "IS_PARALLELIZABLE = False"
},
{
"identifier": "MODEL_PARALLEL",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "MODEL_PARALLEL = False"
},
{
"identifier": "USE_CACHE",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "USE_CACHE = False"
},
{
"identifier": "MAX_LENGTH_Q",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "MAX_LENGTH_Q = 1024 - 2 # default=128 - 2"
},
{
"identifier": "MAX_LENGTH_A",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "MAX_LENGTH_A = 1024 - 2 # default=128 - 2"
},
{
"identifier": "MAX_LENGTH_QA",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "MAX_LENGTH_QA = MAX_LENGTH_Q + MAX_LENGTH_A + 4"
},
{
"identifier": "LORA_DROPOUT",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "LORA_DROPOUT = 0.05"
},
{
"identifier": "LORA_ALPHA",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "LORA_ALPHA = 16"
},
{
"identifier": "LORA_R",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "LORA_R = 8"
},
{
"identifier": "PATH_MODEL_CONFIG",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "PATH_MODEL_CONFIG = \"config_macrogpt_1b3_float32.json\" or MODEL_SAVE_DIR"
},
{
"identifier": "PATH_TOKENIZER_PRETRAIN",
"path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py",
"snippet": "PATH_TOKENIZER_PRETRAIN = REPO_ID or \"./macrogpt.model\""
}
] | import random
import copy
import sys
import os
import bitsandbytes as bnb
import torch.nn as nn
import transformers
import torch
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig)
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.modeling_utils import unwrap_model
from tensorboardX import SummaryWriter
from datasets import load_dataset
from macro_gpt.models.llama.modeling_llama import LlamaForCausalLM as LLMForCausalLM
from macro_gpt.models.llama.tokenization_llama import LlamaTokenizer as LLMTokenizer
from macro_gpt.models.llama.modeling_llama import LlamaConfig as LLMConfig
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LORA_DROPOUT, LORA_ALPHA, LORA_R
from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_CONFIG, PATH_TOKENIZER_PRETRAIN | 10,799 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2023/3/5 21:04
# @author : Mo
# @function: macro-gpt
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(path_root)
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072"
| # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2023/3/5 21:04
# @author : Mo
# @function: macro-gpt
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path.append(path_root)
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072" | os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES | 0 | 2023-11-30 12:39:19+00:00 | 12k |
owkin/fedeca | fedeca/scripts/fl_iptw.py | [
{
"identifier": "FedECA",
"path": "fedeca/fedeca_core.py",
"snippet": "class FedECA(Experiment, BaseSurvivalEstimator):\n \"\"\"FedECA class tthat performs Federated IPTW.\"\"\"\n\n def __init__(\n self,\n ndim: int,\n ds_client=None,\n train_data_nodes: Union[list[TrainDataNode], None] = None,\n treated_col: str = \"treated\",\n event_col: str = \"E\",\n duration_col: str = \"T\",\n ps_col=\"propensity_scores\",\n num_rounds_list: list[int] = [10, 10],\n damping_factor_nr: float = 0.8,\n l2_coeff_nr: float = 0.0,\n standardize_data: bool = True,\n penalizer: float = 0.0,\n l1_ratio: float = 1.0,\n initial_step_size: float = 0.95,\n learning_rate_strategy: str = \"lifelines\",\n dtype: float = \"float64\",\n propensity_strategy=\"iptw\",\n robust: bool = False,\n dp_target_epsilon: Union[float, None] = None,\n dp_target_delta: Union[float, None] = None,\n dp_max_grad_norm: Union[float, None] = None,\n dp_propensity_model_optimizer_class: Optimizer = SGD,\n dp_propensity_model_optimizer_kwargs: Union[dict, None] = None,\n dp_propensity_model_training_params: Union[dict, None] = None,\n seed: int = 42,\n aggregation_node: Union[AggregationNode, None] = None,\n experiment_folder: str = \"./iptw_experiment\",\n clean_models: bool = False,\n dependencies: Union[list, None] = None,\n timeout: int = 3600,\n sleep_time: int = 30,\n fedeca_path: Union[None, str] = None,\n evaluation_frequency=None,\n ):\n \"\"\"Initialize the Federated IPTW class.\n\n Implements the FedECA algorithm which combines\n an estimation of propensity scores using logistic regression\n and the fit of a weighted Cox Model in a federated fashion.\n\n Parameters\n ----------\n client : fl.client.Client\n Federated Learning client object.\n train_data_nodes : list\n List of data nodes participating in the federated training.\n ndim : int\n Number of dimensions (features) in the dataset.\n treated_col : str, optional\n Column name indicating treatment status, by default \"treated\".\n event_col : str, optional\n Column name indicating event occurrence, by default \"E\".\n duration_col : str, optional\n Column name indicating time to event or censoring, by default \"T\".\n num_rounds_list : list, optional\n List of number of rounds for each stage, by default [10, 10].\n damping_factor_nr : float, optional\n Damping factor for natural gradient regularization, by default 0.8.\n l2_coeff_nr : float, optional\n L2 regularization coefficient for natural gradient, by default 0.0.\n standardize_data : bool, optional\n Whether to standardize data before training, by default True.\n penalizer : float, optional\n Penalizer for IPTW objective, by default 0.0.\n l1_ratio : float, optional\n L1 ratio for IPTW objective, by default 1.0.\n initial_step_size : float, optional\n Initial step size for optimization, by default 0.95.\n learning_rate_strategy : str, optional\n Learning rate strategy, by default \"lifelines\".\n batch_size : int, optional\n Batch size for optimization, by default sys.maxsize.\n dtype : str, optional\n Data type for the model, by default \"float64\".\n propensity_strategy: str, optional\n The propensity strategy to use.\n robust: bool, optional\n Whether or not to use robust estimator of variance as in [1] and\n lifelines.\n Defauts to False.\n [1] David A Binder. Fitting cox’s proportional hazards models from survey data. Biometrika, 79(1):139–147, 1992. # noqa: E501\n dp_target_epsilon: float\n The target epsilon for (epsilon, delta)-differential\n private guarantee. Defaults to None.\n dp_target_delta: float\n The target delta for (epsilon, delta)-differential\n private guarantee. Defaults to None.\n dp_max_grad_norm: float\n The maximum L2 norm of per-sample gradients;\n used to enforce differential privacy. Defaults to None.\n dp_propensity_model_optimizer_class: torch.optim.Optimizer\n The optimizer to use for the training of the propensity model.\n Defauts to Adam.\n dp_propensity_model_optimizer_class_kwargs: dict\n The params to give to optimizer class.\n dp_propensity_model_training_params: dict\n A dict with keys batch_size and num_updates for the DP-SGD training.\n Defaults to None.\n seed : int, optional\n Seed for random number generation, by default 42.\n aggregation_node : str or None, optional\n Node for aggregation, by default None.\n experiment_folder : str, optional\n Folder path for experiment outputs, by default \"./iptw_experiment\".\n clean_models : bool, optional\n Whether to clean models after training, by default False.\n dependencies : list, optional\n List of dependencies, by default None.\n timeout : int, optional\n Timeout for a single round of federated learning, by default 3600.\n sleep_time : int, optional\n Sleep time between rounds, by default 30.\n fedeca_path:\n Path towards the fedeca reository.\n evaluation_frequency:\n Evaluation_frequency.\n **kwargs\n Additional keyword arguments.\n \"\"\"\n self.standardize_data = standardize_data\n assert dtype in [\"float64\", \"float32\", \"float16\"]\n if dtype == \"float64\":\n self.torch_dtype = torch.float64\n elif dtype == \"float32\":\n self.torch_dtype = torch.float32\n else:\n self.torch_dtype = torch.float16\n\n self.ndim = ndim\n self.treated_col = treated_col\n self.event_col = event_col\n self.duration_col = duration_col\n self.ps_col = ps_col\n self.seed = seed\n self.penalizer = penalizer\n self.l1_ratio = l1_ratio\n self.initial_step_size = initial_step_size\n self.learning_rate_strategy = learning_rate_strategy\n self.num_rounds_list = num_rounds_list\n self.timeout = timeout\n self.sleep_time = sleep_time\n self.damping_factor_nr = damping_factor_nr\n self.l2_coeff_nr = l2_coeff_nr\n self.propensity_strategy = propensity_strategy\n self.robust = robust\n self.dp_target_delta = dp_target_delta\n self.dp_target_epsilon = dp_target_epsilon\n self.dp_max_grad_norm = dp_max_grad_norm\n self.dp_propensity_model_training_params = dp_propensity_model_training_params\n self.dp_propensity_model_optimizer_class = dp_propensity_model_optimizer_class\n self.dp_propensity_model_optimizer_kwargs = dp_propensity_model_optimizer_kwargs\n self.dependencies = dependencies\n self.experiment_folder = experiment_folder\n self.fedeca_path = fedeca_path\n self.evaluation_frequency = evaluation_frequency\n self.dtype = dtype\n\n kwargs = {}\n kwargs[\"algo_dependencies\"] = self.dependencies\n self.accuracy_metrics_dict = {\n \"accuracy\": make_accuracy_function(self.treated_col)\n }\n self.cindex_metrics_dict = {\n \"C-index\": make_c_index_function(\n event_col=self.event_col, duration_col=self.duration_col\n )\n }\n self.metrics_dicts_list = [\n self.accuracy_metrics_dict,\n self.cindex_metrics_dict,\n ]\n\n # Note that we don't use self attributes because substrafl classes are messed up\n # and we don't want confusion\n self.logreg_model = LogisticRegressionTorch(self.ndim, self.torch_dtype)\n self.logreg_dataset_class = make_substrafl_torch_dataset_class(\n [self.treated_col],\n self.event_col,\n self.duration_col,\n dtype=dtype,\n return_torch_tensors=True,\n )\n # Set propensity model training to DP or not DP mode\n self.set_propensity_model_strategy()\n\n # We use only the treatment variable in the model\n cox_model = CoxPHModelTorch(ndim=1, torch_dtype=self.torch_dtype)\n survival_dataset_class = make_substrafl_torch_dataset_class(\n [self.duration_col, self.event_col],\n self.event_col,\n self.duration_col,\n dtype=dtype,\n )\n\n # no self attributes in this class !!!!!!\n class WDAlgo(TorchWebDiscoAlgo):\n def __init__(self, propensity_model, robust):\n super().__init__(\n model=cox_model,\n # TODO make this batch-size argument disappear from\n # webdisco algo\n batch_size=sys.maxsize,\n dataset=survival_dataset_class,\n seed=seed,\n duration_col=duration_col,\n event_col=event_col,\n treated_col=treated_col,\n standardize_data=standardize_data,\n penalizer=penalizer,\n l1_ratio=l1_ratio,\n initial_step_size=initial_step_size,\n learning_rate_strategy=learning_rate_strategy,\n store_hessian=True,\n propensity_model=propensity_model,\n propensity_strategy=propensity_strategy,\n robust=robust,\n )\n self._propensity_model = propensity_model\n\n self.webdisco_algo = WDAlgo(propensity_model=None, robust=self.robust)\n self.webdisco_strategy = WebDisco(\n algo=self.webdisco_algo, standardize_data=self.standardize_data\n )\n\n kwargs[\"strategies\"] = [self.propensity_model_strategy, self.webdisco_strategy]\n if self.robust:\n # We prepare robust estimation\n class MockAlgo:\n def __init__(self):\n self.strategies = [\"Robust Cox Variance\"]\n\n mock_algo = MockAlgo()\n kwargs[\"strategies\"].append(\n RobustCoxVariance(\n algo=mock_algo,\n )\n )\n # We need those two lines for the zip to consider all 3\n # strategies\n self.metrics_dicts_list.append({})\n self.num_rounds_list.append(sys.maxsize)\n\n kwargs[\"metrics_dicts_list\"] = self.metrics_dicts_list\n kwargs[\"ds_client\"] = ds_client\n kwargs[\"train_data_nodes\"] = train_data_nodes\n kwargs[\"aggregation_node\"] = aggregation_node\n kwargs[\"experiment_folder\"] = self.experiment_folder\n kwargs[\"clean_models\"] = clean_models\n kwargs[\"num_rounds_list\"] = self.num_rounds_list\n kwargs[\"fedeca_path\"] = self.fedeca_path\n kwargs[\"algo_dependencies\"] = self.dependencies\n kwargs[\"evaluation_frequency\"] = self.evaluation_frequency\n\n # TODO: test_data_nodes and evaluation_frequency are not passed\n\n super().__init__(**kwargs)\n\n def check_cp_status(self, idx=0):\n \"\"\"Check the status of the process.\"\"\"\n training_type = \"training\"\n if idx == 0:\n model_name = \"Propensity Model\"\n elif idx == 1:\n model_name = \"Weighted Cox Model\"\n else:\n model_name = \"Robust Variance\"\n training_type = \"estimation\"\n\n print(f\"Waiting on {model_name} {training_type} to finish...\")\n t1 = time.time()\n t2 = t1\n while (t2 - t1) < self.timeout:\n status = self.ds_client.get_compute_plan(\n self.compute_plan_keys[idx].key\n ).status\n if status == ComputePlanStatus.done:\n print(\n f\"\"\"Compute plan {self.compute_plan_keys[0].key} of {model_name} has\n finished !\"\"\"\n )\n break\n elif (\n status == ComputePlanStatus.failed\n or status == ComputePlanStatus.canceled\n ):\n raise ValueError(\n f\"\"\"Compute plan {self.compute_plan_keys[0].key} of {model_name} has\n failed\"\"\"\n )\n elif (\n status == ComputePlanStatus.doing\n or status == ComputePlanStatus.todo\n or status == ComputePlanStatus.waiting\n ):\n pass\n else:\n print(\n f\"\"\"Compute plan status is {status}, this shouldn't happen, sleeping\n {self.time_sleep} and retrying until timeout {self.timeout}\"\"\"\n )\n time.sleep(self.sleep_time)\n\n def set_propensity_model_strategy(self):\n \"\"\"Set FedECA to use DP.\n\n At the end it sets the parameter self.propensity_model_strateg\n \"\"\"\n self.dp_params_given = [\n self.dp_max_grad_norm is not None,\n self.dp_target_epsilon is not None,\n self.dp_target_delta is not None,\n ]\n\n if any(self.dp_params_given) and not all(self.dp_params_given):\n raise ValueError(\n \"To use DP you should provide values for all DP parameters: \"\n \"dp_max_grad_norm, dp_target_epsilon and dp_target_delta\"\n )\n self._apply_dp = all(self.dp_params_given)\n if self._apply_dp:\n assert (\n self.dp_propensity_model_training_params is not None\n ), \"You should give dp_propensity_model_training_params\"\n \"={'batch_size': ?, 'num_updates': ?}\"\n assert (\n \"batch_size\" in self.dp_propensity_model_training_params\n and \"num_updates\" in self.dp_propensity_model_training_params\n ), \"You should fill all fields of dp_propensity_model_training_params\"\n \"={'batch_size': ?, 'num_updates': ?}\"\n if self.dp_propensity_model_optimizer_kwargs is None:\n self.dp_propensity_model_optimizer_kwargs = {}\n dp_propensity_model_optimizer = self.dp_propensity_model_optimizer_class(\n params=self.logreg_model.parameters(),\n **self.dp_propensity_model_optimizer_kwargs,\n )\n num_rounds_propensity = self.num_rounds_list[0]\n\n # no self attributes in this class !!!!!!\n # fed_iptw_self = self hack doesn't work for serialization issue\n logreg_model = self.logreg_model\n logreg_dataset_class = self.logreg_dataset_class\n seed = self.seed\n num_updates = self.dp_propensity_model_training_params[\"num_updates\"]\n batch_size = self.dp_propensity_model_training_params[\"batch_size\"]\n dp_target_epsilon = self.dp_target_epsilon\n dp_target_delta = self.dp_target_delta\n dp_max_grad_norm = self.dp_max_grad_norm\n\n class DPLogRegAlgo(TorchDPFedAvgAlgo):\n def __init__(self):\n super().__init__(\n model=logreg_model,\n criterion=nn.BCELoss(),\n optimizer=dp_propensity_model_optimizer,\n dataset=logreg_dataset_class,\n seed=seed,\n num_updates=num_updates,\n batch_size=batch_size,\n num_rounds=num_rounds_propensity,\n dp_target_epsilon=dp_target_epsilon,\n dp_target_delta=dp_target_delta,\n dp_max_grad_norm=dp_max_grad_norm,\n )\n\n self.dp_algo = DPLogRegAlgo()\n self.dp_strategy = FedAvg(algo=self.dp_algo)\n self.propensity_model_strategy = self.dp_strategy\n else:\n # no self attributes in this class\n # fed_iptw_self = self hack doesn't work for serialization issue\n logreg_model = self.logreg_model\n logreg_dataset_class = self.logreg_dataset_class\n seed = self.seed\n l2_coeff_nr = self.l2_coeff_nr\n\n class NRAlgo(TorchNewtonRaphsonAlgo):\n def __init__(self):\n super().__init__(\n model=logreg_model,\n batch_size=sys.maxsize,\n criterion=nn.BCELoss(),\n dataset=logreg_dataset_class,\n seed=seed,\n l2_coeff=l2_coeff_nr,\n )\n\n self.nr_algo = NRAlgo()\n self.nr_strategy = NewtonRaphson(\n damping_factor=self.damping_factor_nr, algo=self.nr_algo\n )\n self.propensity_model_strategy = self.nr_strategy\n\n def reset_experiment(self):\n \"\"\"Remove the propensity model just in case.\"\"\"\n super().reset_experiment()\n if hasattr(self, \"propensity_model\"):\n self.propensity_model = None\n\n def fit(\n self,\n data: pd.DataFrame,\n targets: Optional[pd.DataFrame] = None,\n n_clients: Union[int, None] = None,\n split_method: Union[Callable, None] = None,\n split_method_kwargs: Union[Callable, None] = None,\n data_path: Union[str, None] = None,\n robust: Union[bool, None] = None,\n dp_target_epsilon: Union[float, None] = None,\n dp_target_delta: Union[float, None] = None,\n dp_max_grad_norm: Union[float, None] = None,\n dp_propensity_model_training_params: Union[dict, None] = None,\n dp_propensity_model_optimizer_class: Union[Optimizer, None] = None,\n dp_propensity_model_optimizer_kwargs: Union[dict, None] = None,\n backend_type: str = \"subprocess\",\n urls: Union[list[str], None] = None,\n server_org_id: Union[str, None] = None,\n tokens: Union[list[str], None] = None,\n ):\n \"\"\"Fit strategies on global data split across clients.\n\n For test if provided we use test_data_nodes from int or the\n train_data_nodes in the latter train=test.\n\n Parameters\n ----------\n data : pd.DataFrame\n The global data to be split has to be a dataframe as we only support\n one opener type.\n targets : Optional[pd.DataFrame], optional\n A dataframe with propensity score or nothing.\n nb_clients : Union[int, None], optional\n The number of clients used to split data across, by default None\n split_method : Union[Callable, None], optional\n How to split data across the nb_clients, by default None\n split_method_kwargs : Union[Callable, None], optional\n Argument of the function used to split data, by default None\n data_path : Union[str, None]\n Where to store the data on disk when backend is not remote.\n robust: Union[None, bool], optional\n Whether or not to use robust estimator of variance as in [1] and\n lifelines.\n Defauts to False.\n [1] David A Binder. Fitting cox’s proportional hazards models from survey data. Biometrika, 79(1):139–147, 1992. # noqa: E501\n dp_target_epsilon: float\n The target epsilon for (epsilon, delta)-differential\n private guarantee. Defaults to None.\n dp_target_delta: float\n The target delta for (epsilon, delta)-differential\n private guarantee. Defaults to None.\n dp_max_grad_norm: float\n The maximum L2 norm of per-sample gradients;\n used to enforce differential privacy. Defaults to None.\n dp_propensity_model_optimizer_class: torch.optim.Optimizer\n The optimizer to use for the training of the propensity model.\n Defauts to Adam.\n dp_propensity_model_optimizer_class_kwargs: dict\n The params to give to optimizer class.\n dp_propensity_model_training_params: dict\n A dict with keys batch_size and num_updates for the DP-SGD training.\n Defaults to None.\n backend_type: str\n The backend to use for substra. Can be either:\n [\"subprocess\", \"docker\", \"remote\"]. Defaults to \"subprocess\".\n urls: Union[list[str], None]\n Urls corresponding to clients API if using remote backend_type.\n Defaults to None.\n server_org_id: Union[str, None]\n Url corresponding to server API if using remote backend_type.\n Defaults to None.\n tokens: Union[list[str], None]\n Tokens necessary to authenticate each client API if backend_type\n is remote. Defauts to None.\n \"\"\"\n # Reset experiment so that it can fit on a new dataset\n self.reset_experiment()\n if backend_type != \"remote\" and (\n urls is not None or server_org_id is not None or tokens is not None\n ):\n print(\n \"urls, server_org_id and tokens are ignored if backend_type is \"\n \"not remote; Make sure that you launched the fit with the right\"\n \" combination of parameters.\"\n )\n\n # We first have to create the TrainDataNodes objects for this we split\n # the data into nb_clients using split_method\n (\n self.clients,\n self.train_data_nodes,\n test_data_nodes,\n _,\n _,\n ) = split_dataframe_across_clients(\n df=data,\n n_clients=n_clients,\n split_method=split_method,\n split_method_kwargs=split_method_kwargs,\n backend_type=backend_type,\n data_path=data_path,\n urls=urls,\n tokens=tokens,\n )\n if server_org_id is not None:\n # Curiously we don't need to identify the server with its own token\n # it's basically a passive entity\n kwargs_agg_node = {\n \"organization_id\": server_org_id,\n }\n self.aggregation_node = AggregationNode(**kwargs_agg_node)\n # Overwrites test_data_nodes\n if self.test_data_nodes is None:\n self.test_data_nodes = test_data_nodes\n else:\n raise ValueError(\n \"You should not use the fit method if you already provided\"\n \" test_data_nodes\"\n )\n\n # So there is a tension between every param is given at instantiation or\n # everything is given to fit\n dp_params_given = False\n for dp_param_name in [\n \"dp_target_epsilon\",\n \"dp_target_delta\",\n \"dp_max_grad_norm\",\n \"dp_propensity_model_training_params\",\n \"dp_propensity_model_optimizer_class\",\n \"dp_propensity_model_optimizer_kwargs\",\n ]:\n param = eval(dp_param_name)\n if param is not None:\n dp_params_given = True\n setattr(self, dp_param_name, param)\n\n if dp_params_given:\n # We need to reset the training mode more deeply\n self.set_propensity_model_strategy()\n # Allow for robust=True\n self.strategies[0] = self.propensity_model_strategy\n self.strategies[1] = self.webdisco_strategy\n\n if robust is not None and robust != self.robust:\n self.robust = robust\n\n if self.robust:\n\n class MockAlgo:\n def __init__(self):\n self.strategies = [\"Robust Cox Variance\"]\n\n mock_algo = MockAlgo()\n self.strategies.append(\n RobustCoxVariance(\n algo=mock_algo,\n )\n )\n # We put WebDisco in \"robust\" mode in the sense that we ask it\n # to store all needed quantities for robust variance estimation\n self.strategies[\n 1\n ].algo._robust = True # not sufficient for serialization\n # possible only because we added robust as a kwargs\n self.strategies[1].algo.kwargs.update({\"robust\": True})\n # We need those two lines for the zip to consider all 3\n # strategies\n self.metrics_dicts_list.append({})\n self.num_rounds_list.append(sys.maxsize)\n else:\n self.strategies = self.strategies[:2]\n\n self.run(targets=targets)\n self.propensity_scores_, self.weights_ = self.compute_propensity_scores(data)\n\n def run(self, targets: Union[pd.DataFrame, None] = None):\n \"\"\"Run the federated iptw algorithms.\"\"\"\n del targets\n print(\"Careful for now the argument target is ignored completely\")\n # We first run the propensity model\n print(\"Fitting the propensity model...\")\n t1 = time.time()\n super().run(1)\n\n if not (self.simu_mode):\n self.check_cp_status()\n self.performances_propensity_model = pd.DataFrame(\n self.ds_client.get_performances(self.compute_plan_keys[0].key).dict()\n )\n else:\n self.performances_propensity_model = self.performances_strategies[0]\n print(self.performances_propensity_model)\n t2 = time.time()\n self.propensity_model_fit_time = t2 - t1\n print(f\"Time to fit Propensity model {self.propensity_model_fit_time}s\")\n print(\"Finished, recovering the final propensity model from substra\")\n # TODO to add the opportunity to use the targets you have to either:\n # give the full targets to every client as a kwargs of their Algo\n # so effectively one would need to reinstantiate algos objects or to\n # modify the API to do it in the run (cleaner)\n # or to rebuild the data on disk with an additional column that would be\n # the propensity score, aka rerun split_dataframes after having given it\n # an additional column and modify the algo so that it uses this column as\n # a score. Both schemes are quite cumbersome to implement.\n # We retrieve the model and pass it to the strategy\n # we run the IPTW Cox\n if not (self.simu_mode):\n algo = download_algo_state(\n client=self.ds_client,\n compute_plan_key=self.compute_plan_keys[0].key,\n round_idx=None,\n )\n\n self.propensity_model = algo.model\n else:\n # The algos are stored in the nodes\n self.propensity_model = self.train_data_nodes[0].algo.model\n # TODO check with webdisco as well\n # Do not touch the two lines below this is dark dark magic\n self.strategies[1].algo._propensity_model = self.propensity_model\n self.strategies[1].algo.kwargs.update(\n {\"propensity_model\": self.propensity_model}\n )\n # We need to save intermediate outputs now\n for t in self.train_data_nodes:\n t.keep_intermediate_states = True\n\n print(\"Fitting propensity weighted Cox model...\")\n t1 = time.time()\n super().run(1)\n\n if not self.simu_mode:\n self.check_cp_status(idx=1)\n t2 = time.time()\n self.webdisco_fit_time = t2 - t1\n print(f\"Time to fit WebDisco {self.webdisco_fit_time}s\")\n print(\"Finished fitting weighted Cox model.\")\n self.total_fit_time = self.propensity_model_fit_time + self.webdisco_fit_time\n self.print_summary()\n\n def print_summary(self):\n \"\"\"Print a summary of the FedECA estimation.\"\"\"\n assert (\n len(self.compute_plan_keys) == 2\n ), \"You need to run the run method before getting the summary\"\n print(\"Evolution of performance of propensity model:\")\n print(self.performances_propensity_model)\n print(\"Checking if the Cox model has converged:\")\n self.get_final_cox_model()\n print(\"Computing summary...\")\n self.compute_summary()\n print(\"Final partial log-likelihood:\")\n print(self.ll)\n print(self.results_)\n\n def get_final_cox_model(self):\n \"\"\"Retrieve final cox model.\"\"\"\n print(\"Retrieving final hessian and log-likelihood\")\n if not self.simu_mode:\n cp = self.compute_plan_keys[1].key\n else:\n cp = self.compute_plan_keys[1]\n\n (\n self.hessian,\n self.ll,\n self.final_params,\n self.computed_stds,\n self.global_robust_statistics,\n ) = get_final_cox_model_function(\n self.ds_client,\n cp,\n self.num_rounds_list[1],\n self.standardize_data,\n self.duration_col,\n self.event_col,\n simu_mode=self.simu_mode,\n robust=self.robust,\n )\n\n def compute_propensity_scores(self, data: pd.DataFrame):\n \"\"\"Compute propensity scores and corresponding weights.\"\"\"\n X = data.drop([self.duration_col, self.event_col, self.treated_col], axis=1)\n Xprop = torch.from_numpy(np.array(X)).type(self.torch_dtype)\n with torch.no_grad():\n propensity_scores = self.propensity_model(Xprop)\n\n propensity_scores = propensity_scores.detach().numpy().flatten()\n weights = data[self.treated_col] * 1.0 / propensity_scores + (\n 1 - data[self.treated_col]\n ) * 1.0 / (1.0 - propensity_scores)\n\n return np.array(propensity_scores), np.array(weights)\n\n def compute_summary(self, alpha=0.05):\n \"\"\"Compute summary for a given threshold.\n\n Parameters\n ----------\n alpha: float, (default=0.05)\n Confidence level for computing CIs\n \"\"\"\n self.variance_matrix = -inv(self.hessian) / np.outer(\n self.computed_stds, self.computed_stds\n )\n if self.robust:\n assert self.global_robust_statistics\n beta = self.final_params\n variance_matrix = self.variance_matrix\n global_robust_statistics = self.global_robust_statistics\n propensity_model = self.propensity_model\n duration_col = self.duration_col\n event_col = self.event_col\n treated_col = self.treated_col\n\n # no self attributes in this class !!!!!!\n class MyRobustCoxVarianceAlgo(RobustCoxVarianceAlgo):\n def __init__(self, **kwargs):\n super().__init__(\n beta=beta,\n variance_matrix=variance_matrix,\n global_robust_statistics=global_robust_statistics,\n propensity_model=propensity_model,\n duration_col=duration_col,\n event_col=event_col,\n treated_col=treated_col,\n )\n\n my_robust_cox_algo = MyRobustCoxVarianceAlgo()\n # Now we need to make sure strategy has the right algo\n self.strategies[2].algo = my_robust_cox_algo\n super().run(1)\n\n if not self.simu_mode:\n self.check_cp_status(idx=2)\n self.variance_matrix = get_outmodel_function(\n \"Aggregating Qk into Q\",\n self.ds_client,\n compute_plan_key=self.compute_plan_keys[2].key,\n idx_task=0,\n )\n\n else:\n # Awful but hard to hack better\n self.variance_matrix = sum(\n [e.algo._client_statistics[\"Qk\"] for e in self.compute_plan_keys[2]]\n )\n\n summary = compute_summary_function(\n self.final_params, self.variance_matrix, alpha\n )\n summary[\"exp(coef)\"] = np.exp(summary[\"coef\"])\n summary[\"exp(coef) lower 95%\"] = np.exp(summary[\"coef lower 95%\"])\n summary[\"exp(coef) upper 95%\"] = np.exp(summary[\"coef upper 95%\"])\n\n self.results_ = summary.copy()"
},
{
"identifier": "generate_survival_data",
"path": "fedeca/utils/data_utils.py",
"snippet": "def generate_cox_data_and_substra_clients(\n n_clients: int = 2,\n ndim: int = 10,\n split_method_kwargs: Union[dict, None] = None,\n backend_type: str = \"subprocess\",\n data_path: Union[str, None] = None,\n urls: Union[list, None] = None,\n tokens: Union[list, None] = None,\n seed: int = 42,\n n_per_client: int = 200,\n add_treated: bool = False,\n ncategorical: int = 0,\n):\ndef split_dataframe_across_clients(\n df,\n n_clients,\n split_method: Union[Callable, str] = \"uniform\",\n split_method_kwargs: Union[dict, None] = None,\n backend_type=\"subprocess\",\n data_path: Union[str, None] = None,\n urls=[],\n tokens=[],\n):\ndef uniform_split(\n df: pd.DataFrame, n_clients: int, use_random: bool = True, seed: int = 42\n):\ndef split_control_over_centers(\n df,\n n_clients,\n treatment_info=\"treatment_allocation\",\n use_random: bool = True,\n seed: int = 42,\n):\n ORGS_ID = list(clients.keys())\n ALGO_ORG_ID = ORGS_ID[0] # Algo provider is defined as the first organization.\n DATA_PROVIDER_ORGS_ID = ORGS_ID"
}
] | import torch
from fedeca import FedECA
from fedeca.utils.data_utils import generate_survival_data | 8,052 | """Federated IPTW script."""
if __name__ == "__main__":
seed = 42
torch.manual_seed(seed)
N_CLIENTS = 2
NDIM = 10
URLS = []
TOKENS = []
# Choose BACKEND_TYPE between subprocess, remote and docker
BACKEND_TYPE = "subprocess"
if BACKEND_TYPE == "remote":
# If you use BACKEND_TYPE="remote", download your API key with SSO login then
# copy-paste it in a file called api_key inside the tokens folder otherwise
# comment the following two lines
URLS = [f"https://api.org-{i + 1}.demo.cg.owkin.tech" for i in range(N_CLIENTS)]
TOKENS = [open(f"tokens/api_key{i + 1}", "r").read() for i in range(N_CLIENTS)]
| """Federated IPTW script."""
if __name__ == "__main__":
seed = 42
torch.manual_seed(seed)
N_CLIENTS = 2
NDIM = 10
URLS = []
TOKENS = []
# Choose BACKEND_TYPE between subprocess, remote and docker
BACKEND_TYPE = "subprocess"
if BACKEND_TYPE == "remote":
# If you use BACKEND_TYPE="remote", download your API key with SSO login then
# copy-paste it in a file called api_key inside the tokens folder otherwise
# comment the following two lines
URLS = [f"https://api.org-{i + 1}.demo.cg.owkin.tech" for i in range(N_CLIENTS)]
TOKENS = [open(f"tokens/api_key{i + 1}", "r").read() for i in range(N_CLIENTS)]
| df, cox_model_coeffs = generate_survival_data( | 1 | 2023-11-27 18:01:37+00:00 | 12k |
forefy/eburger | eburger/serializer.py | [
{
"identifier": "settings",
"path": "eburger/settings.py",
"snippet": ""
},
{
"identifier": "ASTNode",
"path": "eburger/models.py",
"snippet": "class ASTNode:\n \"\"\"\n Represents a generic node in an Abstract Syntax Tree (AST).\n\n An ASTNode is a fundamental part of representing the structure of a\n programming language's source code, used extensively in compilers and\n code analysis tools.\n\n Attributes:\n node_id: int - A unique identifier for the node within the AST.\n node_type: str - The type of the node (e.g., 'SourceUnit', 'PragmaDirective').\n src: str - Source location for this node within its file.\n children: List[ASTNode] - Child nodes of this AST node.\n\n Methods:\n add_child - Adds a child node to this node's children.\n \"\"\"\n\n node_id: int\n node_type: str\n src: str\n children: List[\"ASTNode\"]\n\n def add_child(self, child: \"ASTNode\"):\n \"\"\"Adds a child AST node to the current node.\"\"\"\n self.children.append(child)\n\n def get_display_name(self):\n return f\"{self.node_type} {self.node_id}\""
},
{
"identifier": "Assignment",
"path": "eburger/models.py",
"snippet": "class Assignment(ASTNode):\n \"\"\"\n Represents an Assignment operation in a Solidity source file.\n\n An Assignment operation assigns a value to a variable.\n\n Attributes:\n left_hand_side: Identifier - The variable being assigned to.\n operator: str - The assignment operator (usually '=').\n right_hand_side: Union[Identifier, LiteralValue] - The value being assigned.\n type_descriptions: Dict[str, str] - Type descriptions of the assignment.\n \"\"\"\n\n left_hand_side: Identifier\n operator: str\n right_hand_side: Union[Identifier, LiteralValue]\n type_descriptions: Dict[str, str]\n\n def get_display_name(self):\n return f\"{self.node_type} {self.left_hand_side.get_display_name()} {self.operator} {self.right_hand_side.get_display_name()}\""
},
{
"identifier": "BinaryOperation",
"path": "eburger/models.py",
"snippet": "class BinaryOperation(ASTNode):\n \"\"\"\n Represents a Binary Operation in a Solidity source file.\n\n Binary Operations include arithmetic, logical, and relational operations.\n\n Attributes:\n leftExpression: ASTNode - The left operand of the binary operation.\n operator: str - The operator of the binary operation.\n rightExpression: ASTNode - The right operand of the binary operation.\n \"\"\"\n\n leftExpression: ASTNode\n operator: str\n rightExpression: ASTNode\n\n def get_display_name(self):\n return f\"{self.node_type} {self.leftExpression.get_display_name()} {self.operator} {self.rightExpression.get_display_name()}\""
},
{
"identifier": "Block",
"path": "eburger/models.py",
"snippet": "class Block(ASTNode):\n statements: List[ASTNode]"
},
{
"identifier": "Conditional",
"path": "eburger/models.py",
"snippet": "class Conditional(ASTNode):\n \"\"\"\n Represents a Conditional (ternary) operation in a Solidity source file.\n\n The Conditional node evaluates a condition and returns one of two expressions based on the condition's truthiness.\n\n Attributes:\n condition: ASTNode - The condition being evaluated.\n trueExpression: ASTNode - The expression returned if the condition is true.\n falseExpression: ASTNode - The expression returned if the condition is false.\n type_descriptions: Dict[str, str] - Type descriptions of the conditional expression.\n \"\"\"\n\n condition: ASTNode\n trueExpression: ASTNode\n falseExpression: ASTNode\n type_descriptions: Dict[str, str]"
},
{
"identifier": "ContractDefinition",
"path": "eburger/models.py",
"snippet": "class ContractDefinition(ASTNode):\n \"\"\"\n Represents a Contract Definition in a Solidity source file.\n\n This node type can represent a contract, an interface, or a library. It includes\n details such as the contract's kind, its documentation, and its dependencies.\n\n Attributes:\n abstract: bool - Indicates whether the contract is abstract.\n base_contracts: List[int] - Node IDs of the contract's base contracts.\n contract_dependencies: List[int] - Node IDs of contracts that this contract depends on.\n contract_kind: str - The kind of contract (e.g., 'contract', 'interface', 'library').\n fully_implemented: bool - Indicates whether the contract is fully implemented.\n linearized_base_contracts: List[int] - A linearized list of base contracts,\n important for understanding inheritance hierarchies.\n name: str - The name of the contract.\n name_location: str - Location of the contract name in the source file.\n nodes: List[ASTNode] - Child nodes (like functions, state variables) of the contract.\n scope: int - The scope ID where this contract is defined.\n used_errors: List[int] - Node IDs of errors used by the contract.\n \"\"\"\n\n abstract: bool\n base_contracts: List[int]\n contract_dependencies: List[int]\n contract_kind: str\n fully_implemented: bool\n linearized_base_contracts: List[int]\n name: str\n name_location: str\n scope: int\n used_errors: List[int]\n nodes: List[ASTNode] = field(default_factory=dict)\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "ElementaryTypeName",
"path": "eburger/models.py",
"snippet": "class ElementaryTypeName(ASTNode):\n \"\"\"\n Represents an Elementary Type Name in a Solidity source file.\n\n Elementary Type Names include basic types like int, uint, bool, address, etc.\n\n Attributes:\n name: str - The name of the elementary type.\n type_descriptions: Dict[str, str] - Additional type descriptions.\n \"\"\"\n\n name: str\n type_descriptions: Dict[str, str]\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "ElementaryTypeNameExpression",
"path": "eburger/models.py",
"snippet": "class ElementaryTypeNameExpression(ASTNode):\n \"\"\"\n Represents an Elementary Type Name Expression in a Solidity source file.\n\n This type of expression is often used in casting to a basic type.\n\n Attributes:\n typeName: ElementaryTypeName - The type to which the expression is referring.\n argumentTypes: List[Dict[str, str]] - Argument types for the expression.\n is_pure: bool - Indicates if the expression is pure.\n \"\"\"\n\n typeName: ElementaryTypeName\n argumentTypes: List[Dict[str, str]]\n is_pure: bool"
},
{
"identifier": "EmitStatement",
"path": "eburger/models.py",
"snippet": "class EmitStatement(ASTNode):\n \"\"\"\n Represents an Emit Statement in a Solidity source file.\n\n Emit Statements are used to emit events.\n\n Attributes:\n event_call: FunctionCall - The function call that emits the event.\n \"\"\"\n\n event_call: FunctionCall"
},
{
"identifier": "ErrorDefinition",
"path": "eburger/models.py",
"snippet": "class ErrorDefinition(ASTNode):\n \"\"\"\n Represents an Error Definition in a Solidity source file.\n\n Custom errors are defined using the 'error' keyword and can be used for\n efficient error handling and reporting in contracts.\n\n Attributes:\n name: str - The name of the error.\n parameters: ParameterList - The list of parameters for the error.\n \"\"\"\n\n name: str\n parameters: ParameterList\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "EventDefinition",
"path": "eburger/models.py",
"snippet": "class EventDefinition(ASTNode):\n \"\"\"\n Represents an Event Definition in a Solidity source file.\n\n Events are used for logging and can be emitted in contracts to signal and record actions.\n\n Attributes:\n anonymous: bool - Indicates if the event is anonymous.\n name: str - The name of the event.\n parameters: ParameterList - The list of parameters for the event.\n \"\"\"\n\n anonymous: bool\n name: str\n parameters: ParameterList\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "ExpressionStatement",
"path": "eburger/models.py",
"snippet": "class ExpressionStatement(ASTNode):\n \"\"\"\n Represents an Expression Statement in a Solidity source file.\n\n Expression Statements are typically used for expressions that have side effects.\n\n Attributes:\n expression: Assignment - The expression in the statement.\n \"\"\"\n\n expression: Assignment"
},
{
"identifier": "ForStatement",
"path": "eburger/models.py",
"snippet": "class ForStatement(ASTNode):\n \"\"\"\n Represents a 'for' statement in a Solidity source file.\n\n Attributes:\n initializationExpression: VariableDeclarationStatement - The initialization expression of the for loop.\n condition: ASTNode - The condition expression of the for loop.\n loopExpression: ExpressionStatement - The loop expression (increment/decrement) of the for loop.\n body: Block - The body of the for loop.\n node_id: int - A unique identifier for the node within the AST.\n src: str - Source location for this node within its file.\n \"\"\"\n\n initializationExpression: Optional[VariableDeclarationStatement]\n condition: Optional[ASTNode]\n loopExpression: Optional[ExpressionStatement]\n body: Block\n node_id: int\n src: str"
},
{
"identifier": "FunctionCall",
"path": "eburger/models.py",
"snippet": "class FunctionCall(ASTNode):\n \"\"\"\n Represents a Function Call in a Solidity source file.\n\n Function Calls are expressions that call a function.\n\n Attributes:\n arguments: List[Union[Identifier, LiteralValue]] - Arguments passed to the function.\n expression: Identifier - The function being called.\n is_constant: bool - Indicates if the function call is constant.\n is_lvalue: bool - Indicates if the function call can be assigned to (left-value).\n is_pure: bool - Indicates if the function call is pure (has no side effects).\n kind: str - The kind of function call.\n lvalue_requested: bool - Indicates if an l-value has been requested for this function call.\n type_descriptions: Dict[str, str] - Type descriptions of the function call.\n \"\"\"\n\n arguments: List[Union[Identifier, LiteralValue]]\n expression: Identifier\n is_constant: bool\n is_lvalue: bool\n is_pure: bool\n kind: str\n lvalue_requested: bool\n type_descriptions: Dict[str, str]\n\n def get_display_name(self):\n return f\"{self.node_type} {self.node_id}\""
},
{
"identifier": "FunctionCallOptions",
"path": "eburger/models.py",
"snippet": "class FunctionCallOptions(ASTNode):\n \"\"\"\n Represents function call options in a Solidity source file.\n\n Attributes:\n expression: ASTNode - The expression representing the function being called.\n options: List[Identifier] - The list of options used in the function call.\n names: List[str] - The names of the options.\n typeDescriptions: TypeDescriptions - Type descriptions for the function call.\n \"\"\"\n\n expression: ASTNode\n options: List[Identifier] # Assuming Identifier is already defined\n names: List[str]\n typeDescriptions: Dict = field(default_factory=dict)\n src: str"
},
{
"identifier": "FunctionDefinition",
"path": "eburger/models.py",
"snippet": "class FunctionDefinition(ASTNode):\n \"\"\"\n Represents a Function Definition in a Solidity source file.\n\n This node type includes details about a function such as its parameters,\n return types, visibility (public, external, etc.), state mutability\n (payable, view, pure, etc.), and any attached documentation.\n\n Attributes:\n function_selector: str - The unique selector for the function.\n implemented: bool - Indicates whether the function is implemented.\n kind: str - The kind of function (e.g., function, constructor).\n modifiers: List[int] - Node IDs of the modifiers applied to the function.\n name: str - The name of the function.\n parameters: ParameterList - The list of parameters for the function.\n return_parameters: ParameterList - The list of return parameters for the function.\n scope: int - The scope ID where this function is defined.\n state_mutability: str - The state mutability of the function (e.g., payable, nonpayable).\n virtual: bool - Indicates whether the function is virtual.\n visibility: str - The visibility of the function (e.g., public, external).\n \"\"\"\n\n function_selector: str\n kind: str\n modifiers: List[int]\n name: str\n parameters: ParameterList\n return_parameters: ParameterList\n scope: int\n state_mutability: str\n virtual: bool\n visibility: str\n body: Block\n name_location: str = None\n implemented: bool = None\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "Identifier",
"path": "eburger/models.py",
"snippet": "class Identifier(ASTNode):\n \"\"\"\n Represents an Identifier node in a Solidity source file.\n\n An Identifier node typically refers to names of variables, functions,\n contracts, libraries, etc., within the Solidity code.\n\n Attributes:\n name: str - The name of the identifier.\n overloaded_declarations: List[int] - A list of declaration IDs if the identifier is overloaded.\n type_descriptions: Dict - Type descriptions for the identifier.\n \"\"\"\n\n name: str\n overloaded_declarations: List[int] = field(default_factory=list)\n referenced_declaration: int = None\n type_descriptions: Dict = field(default_factory=dict)\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "IdentifierPath",
"path": "eburger/models.py",
"snippet": "class IdentifierPath(ASTNode):\n \"\"\"\n Represents an identifier path in Solidity, typically used for names of contracts,\n libraries, and other identifiers.\n\n Attributes:\n name: str - The name of the identifier.\n referencedDeclaration: int - The declaration ID that this identifier refers to.\n \"\"\"\n\n name: str\n referencedDeclaration: int\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "IfStatement",
"path": "eburger/models.py",
"snippet": "class IfStatement(ASTNode):\n \"\"\"\n Represents an If Statement in a Solidity source file.\n\n If Statements are used for conditional execution of code blocks.\n\n Attributes:\n condition: ASTNode - The condition being evaluated.\n trueBody: ASTNode - The body of the statement if the condition is true.\n falseBody: Optional[ASTNode] - The body of the statement if the condition is false.\n \"\"\"\n\n condition: ASTNode\n trueBody: ASTNode\n falseBody: Optional[ASTNode] = None"
},
{
"identifier": "ImportDirective",
"path": "eburger/models.py",
"snippet": "class ImportDirective(ASTNode):\n \"\"\"\n Represents an Import Directive in a Solidity source file.\n\n Import Directives are used in Solidity to include code from other files,\n similar to import statements in other programming languages. This is essential\n for code reusability and organization, allowing the use of libraries, contracts,\n and other constructs from separate files.\n\n Attributes:\n file: str - The path of the file being imported.\n name_location: str - The location of the name in the source file, typically \"-1:-1:-1\" for imports.\n scope: int - The scope in which this import is valid. Usually refers to the ID of the parent node.\n source_unit: int - The ID of the source unit that this import refers to.\n src: str - Source location for this node within its file.\n symbol_aliases: List[str] - A list of aliases for symbols imported from the file.\n unit_alias: str - An alias for the unit being imported, if any.\n \"\"\"\n\n file: str\n name_location: str\n scope: int\n source_unit: int\n unit_alias: str\n symbol_aliases: List[SymbolAlias]\n absolute_path: str = None\n\n def get_display_name(self):\n return f\"{self.node_type} {prettify_path(self.file)}\""
},
{
"identifier": "IndexAccess",
"path": "eburger/models.py",
"snippet": "class IndexAccess(ASTNode):\n \"\"\"\n Represents an Index Access operation in a Solidity source file.\n\n Index Access is used for accessing elements of a mapping or an array by a key or an index.\n\n Attributes:\n baseExpression: Identifier - The base mapping or array being accessed.\n indexExpression: Identifier - The index or key used to access the element.\n type_descriptions: Dict[str, str] - Type descriptions of the accessed element.\n \"\"\"\n\n baseExpression: Identifier\n type_descriptions: Dict[str, str]\n indexExpression: Identifier = None"
},
{
"identifier": "LiteralValue",
"path": "eburger/models.py",
"snippet": "class LiteralValue(ASTNode):\n \"\"\"\n Represents a Literal value in a Solidity source file.\n\n Literals are constant values directly written in the source code, such as numbers, strings, or boolean values.\n\n Attributes:\n hex_value: str - The hexadecimal representation of the value, if applicable.\n is_constant: bool - Indicates if the literal is a constant value.\n is_lvalue: bool - Indicates if the literal can be assigned to (left-value).\n is_pure: bool - Indicates if the literal is a pure value (has no side effects).\n kind: str - The kind of literal (e.g., 'number', 'string').\n lvalue_requested: bool - Indicates if an l-value has been requested for this literal.\n subdenomination: str - The subdenomination of the value, if any (e.g., 'wei', 'gwei' for Ether values).\n type_descriptions: Dict - Descriptions of the literal's type.\n value: str - The value of the literal in string format.\n \"\"\"\n\n hex_value: str\n is_constant: bool\n is_lvalue: bool\n is_pure: bool\n kind: str\n lvalue_requested: bool\n type_descriptions: Dict\n value: str\n subdenomination: str = None\n\n def get_display_name(self):\n return f\"{self.node_type} {self.value}\""
},
{
"identifier": "ReturnValue",
"path": "eburger/models.py",
"snippet": "class ReturnValue(ASTNode):\n \"\"\"\n Represents a Return Statement in a Solidity source file.\n\n Return Statements are used to return values from functions.\n\n Attributes:\n expression: Optional[ASTNode] - The expression being returned.\n functionReturnParameters: int - ID of the return parameter list.\n \"\"\"\n\n expression: Optional[ASTNode]\n functionReturnParameters: int"
},
{
"identifier": "MemberAccess",
"path": "eburger/models.py",
"snippet": "class MemberAccess(ASTNode):\n \"\"\"\n Represents a Member Access operation in a Solidity source file.\n\n Member Access is used to access properties or methods of an object or type.\n\n Attributes:\n expression: Identifier - The object or type being accessed.\n memberName: str - The name of the member (property or method) being accessed.\n type_descriptions: Dict[str, str] - Type descriptions of the member being accessed.\n \"\"\"\n\n expression: Identifier\n memberName: str\n type_descriptions: Dict[str, str]\n\n def get_display_name(self):\n return f\"{self.node_type} {self.memberName}\""
},
{
"identifier": "ModifierDefinition",
"path": "eburger/models.py",
"snippet": "class ModifierDefinition(ASTNode):\n \"\"\"\n Represents a Modifier Definition in a Solidity source file.\n\n Modifiers are reusable components that modify the behavior of functions.\n\n Attributes:\n name: str - The name of the modifier.\n name_location: str - Location of the modifier name in the source file.\n parameters: ParameterList - The list of parameters for the modifier.\n body: Block - The body of the modifier.\n virtual: bool - Indicates whether the modifier is virtual.\n visibility: str - The visibility of the modifier (e.g., 'public', 'internal').\n \"\"\"\n\n name: str\n name_location: str\n parameters: ParameterList\n body: Block\n virtual: bool\n visibility: str\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "ParameterList",
"path": "eburger/models.py",
"snippet": "class ParameterList(ASTNode):\n \"\"\"\n Represents a list of parameters in a Solidity function.\n\n This is used for representing both the input parameters and return parameters\n of a function.\n\n Attributes:\n parameters: List[VariableDeclaration] - A list of variable declarations representing the parameters.\n \"\"\"\n\n parameters: List[ASTNode] = field(default_factory=list)"
},
{
"identifier": "RevertStatement",
"path": "eburger/models.py",
"snippet": "class RevertStatement(ASTNode):\n \"\"\"\n Represents a Revert Statement in a Solidity source file.\n\n Revert Statements are used to revert the entire transaction, optionally returning an error.\n\n Attributes:\n errorCall: FunctionCall - The function call that triggers the revert, typically an error.\n \"\"\"\n\n errorCall: FunctionCall"
},
{
"identifier": "SourceUnit",
"path": "eburger/models.py",
"snippet": "class SourceUnit(ASTNode):\n \"\"\"\n Represents a Source Unit node in an AST for Solidity source code.\n\n A Source Unit typically represents a single Solidity source file and is the\n root node for the AST of that file. It contains metadata about the file, like\n licensing information, and serves as the container for all top-level declarations\n in the file.\n\n Attributes:\n license: str - License information for the source file, if available.\n exported_symbols: Dict[str, List[int]] - A mapping of exported symbols and\n their respective positions within the source file.\n \"\"\"\n\n absolute_path: str\n license: str\n exported_symbols: Dict[str, List[int]] = field(default_factory=dict)\n\n def get_display_name(self):\n display_name = self.node_type\n if self.absolute_path is not None:\n display_name += f\" {prettify_path(self.absolute_path)}\"\n return display_name"
},
{
"identifier": "PragmaDirective",
"path": "eburger/models.py",
"snippet": "class PragmaDirective(ASTNode):\n \"\"\"\n Represents a Pragma Directive in a Solidity source file.\n\n Pragma Directives are used to specify compiler instructions or configurations,\n most commonly the compiler version. For example, `pragma solidity ^0.8.0;`\n indicates that the Solidity file should be compiled with a compiler version\n greater than or equal to 0.8.0 but less than 0.9.0.\n\n Attributes:\n literals: List[str] - The components of the pragma directive,\n e.g., ['solidity', '^', '0.8', '.0'] for `pragma solidity ^0.8.0;`.\n \"\"\"\n\n literals: List[str] = field(default_factory=list)"
},
{
"identifier": "SymbolAlias",
"path": "eburger/models.py",
"snippet": "class SymbolAlias:\n \"\"\"\n Represents an alias in an Import Directive, mapping a foreign identifier to a local name.\n\n Attributes:\n foreign: Identifier - The foreign identifier being imported.\n name_location: str - The location of the alias name in the source file.\n \"\"\"\n\n foreign: Identifier\n name_location: str"
},
{
"identifier": "TupleExpression",
"path": "eburger/models.py",
"snippet": "class TupleExpression(ASTNode):\n \"\"\"\n Represents a Tuple Expression in a Solidity source file.\n\n Tuple Expressions are used to group multiple values into a single compound value.\n\n Attributes:\n components: List[ASTNode] - A list of expressions or declarations that form the components of the tuple.\n type_descriptions: Dict[str, str] - Type descriptions of the tuple expression.\n \"\"\"\n\n components: List[ASTNode]\n type_descriptions: Dict[str, str]"
},
{
"identifier": "UnaryOperation",
"path": "eburger/models.py",
"snippet": "class UnaryOperation(ASTNode):\n is_constant: bool\n is_lvalue: bool\n is_pure: bool\n lvalue_requested: bool\n type_descriptions: Dict\n operator: str\n prefix: bool\n src: str\n subExpression: IndexAccess"
},
{
"identifier": "UserDefinedTypeName",
"path": "eburger/models.py",
"snippet": "class UserDefinedTypeName(ASTNode):\n \"\"\"\n Represents a user-defined type name in Solidity, like contract names or new types defined by the user.\n\n Attributes:\n pathNode: IdentifierPath - The path node representing the type name.\n referencedDeclaration: int - The declaration ID of the type.\n \"\"\"\n\n pathNode: IdentifierPath\n referencedDeclaration: int"
},
{
"identifier": "UsingForDirective",
"path": "eburger/models.py",
"snippet": "class UsingForDirective(ASTNode):\n \"\"\"\n Represents a Using For Directive in a Solidity source file.\n\n This directive is used to attach library functions to a specific type.\n For example, `using SafeMath for uint;` allows the functions from the\n SafeMath library to be called on uint types.\n\n Attributes:\n libraryName: IdentifierPath - The name of the library being used.\n typeName: UserDefinedTypeName - The type that the library is being used for.\n \"\"\"\n\n libraryName: IdentifierPath\n typeName: UserDefinedTypeName"
},
{
"identifier": "VariableDeclaration",
"path": "eburger/models.py",
"snippet": "class VariableDeclaration(ASTNode):\n \"\"\"\n Represents a Variable Declaration in a Solidity source file.\n\n This class is used for declaring both state variables in contracts and parameters in functions.\n Certain attributes may only be relevant in specific contexts (e.g., 'stateVariable' is used\n for state variables, 'functionSelector' is applicable to certain state variables).\n\n Attributes:\n constant: bool - Indicates if the variable is constant.\n function_selector: Optional[str] - The unique selector for the variable, if applicable.\n mutability: str - The mutability of the variable (e.g., 'mutable', 'constant').\n name: str - The name of the variable.\n scope: int - The scope ID where this variable is defined.\n src: str - Source location for this node within its file.\n state_variable: bool - Indicates if it is a state variable.\n storage_location: str - The storage location of the variable (e.g., 'storage', 'memory').\n type_descriptions: Dict - Descriptions of the variable's type.\n typeName: TypeName - The type of the variable.\n value: Optional[LiteralValue] - The value of the variable, if any.\n visibility: str - The visibility of the variable (e.g., 'public', 'internal').\n \"\"\"\n\n constant: bool\n mutability: str\n name: str\n scope: int\n src: str\n state_variable: bool\n storage_location: str\n type_descriptions: Dict\n visibility: str\n indexed: bool = None\n function_selector: Optional[str] = None\n typeName: Dict = field(default_factory=dict)\n value: Optional[LiteralValue] = None\n\n def get_display_name(self):\n return f\"{self.node_type} {self.name}\""
},
{
"identifier": "VariableDeclarationStatement",
"path": "eburger/models.py",
"snippet": "class VariableDeclarationStatement(ASTNode):\n \"\"\"\n Represents a Variable Declaration Statement in a Solidity source file.\n\n This statement is used for declaring and optionally initializing variables.\n\n Attributes:\n declarations: List[VariableDeclaration] - The variables being declared.\n assignments: List[int] - Node IDs where this variable is assigned.\n initialValue: Optional[ASTNode] - Initial value assigned to the variable, if any.\n \"\"\"\n\n declarations: List[VariableDeclaration]\n assignments: List[int]\n initialValue: Optional[ASTNode] = None"
},
{
"identifier": "log",
"path": "eburger/utils/logger.py",
"snippet": "def log(type: str, message: str):\n match type:\n case \"success\":\n if \"success\" not in args.no:\n print(f\"[{color.Success} 🍔 Success {color.Default}] {message}\")\n case \"error\":\n print(f\"[{color.Error} Error {color.Default}] {message}\")\n sys.exit(0)\n case \"warning\":\n if \"warning\" not in args.no:\n print(f\"[{color.Warning} Warning {color.Default}] {message}\")\n case \"info\":\n if \"info\" not in args.no:\n print(f\"[{color.Info} Info {color.Default}] {message}\")\n case \"insights\":\n # json_printable = json.dumps(message, indent=4)\n # print(json_printable)\n if \"insights\" not in args.no:\n for item in message:\n name = item.get(\"name\")\n severity = item.get(\"severity\")\n results = item.get(\"results\")\n\n # Check a sample result to ensure correct structure\n try:\n results[0][\"file\"]\n except Exception:\n log(\"warning\", f\"Bad results for {item.get('name')}, skipping.\")\n continue\n\n occurrences = construct_insight_occurrences(results)\n\n match severity:\n case \"High\":\n severity = f\"[{color.Error} ❗️High {color.Default}]\"\n case \"Medium\":\n severity = f\"[{color.Warning} ❗️Medium {color.Default}]\"\n case \"Low\":\n severity = f\"[{color.Info} ❗️Low {color.Default}]\"\n\n print(f\"{severity} {name} at:\")\n for occurrence in occurrences:\n print(f\" {occurrence}\")"
}
] | from eburger import settings
from eburger.models import (
ASTNode,
Assignment,
BinaryOperation,
Block,
Conditional,
ContractDefinition,
ElementaryTypeName,
ElementaryTypeNameExpression,
EmitStatement,
ErrorDefinition,
EventDefinition,
ExpressionStatement,
ForStatement,
FunctionCall,
FunctionCallOptions,
FunctionDefinition,
Identifier,
IdentifierPath,
IfStatement,
ImportDirective,
IndexAccess,
LiteralValue,
ReturnValue,
MemberAccess,
ModifierDefinition,
ParameterList,
RevertStatement,
SourceUnit,
PragmaDirective,
SymbolAlias,
TupleExpression,
UnaryOperation,
UserDefinedTypeName,
UsingForDirective,
VariableDeclaration,
VariableDeclarationStatement,
)
from eburger.utils.logger import log | 9,416 | statements=statements,
src=src,
children=[],
)
for stmt in statements:
G.add_edge(parsed_node.get_display_name(), stmt.get_display_name())
case "Assignment":
left_hand_side, G = parse_ast_node(node_dict["leftHandSide"], G)
right_hand_side, G = parse_ast_node(node_dict["rightHandSide"], G)
parsed_node = Assignment(
node_id=node_id,
node_type=node_type,
left_hand_side=left_hand_side,
operator=node_dict.get("operator", ""),
right_hand_side=right_hand_side,
type_descriptions=node_dict.get("typeDescriptions", {}),
src=src,
children=[],
)
G.add_edge(
parsed_node.get_display_name(), left_hand_side.get_display_name()
)
G.add_edge(
parsed_node.get_display_name(), right_hand_side.get_display_name()
)
case "ExpressionStatement":
expression, G = parse_ast_node(node_dict["expression"], G)
parsed_node = ExpressionStatement(
node_id=node_id,
node_type=node_type,
expression=expression,
src=src,
children=[],
)
G.add_edge(parsed_node.get_display_name(), expression.get_display_name())
case "Identifier":
parsed_node = Identifier(
node_id=node_id,
node_type=node_type,
name=node_dict.get("name", ""),
overloaded_declarations=node_dict.get("overloadedDeclarations", []),
referenced_declaration=node_dict.get("referencedDeclaration", 0),
type_descriptions=node_dict.get("typeDescriptions", {}),
src=src,
children=[],
)
case "FunctionCall":
arguments = []
for arg in node_dict.get("arguments", []):
parsed_arg, G = parse_ast_node(arg, G)
arguments.append(parsed_arg)
expression, G = parse_ast_node(node_dict["expression"], G)
parsed_node = FunctionCall(
node_id=node_id,
node_type=node_type,
arguments=arguments,
expression=expression,
is_constant=node_dict.get("isConstant", False),
is_lvalue=node_dict.get("isLValue", False),
is_pure=node_dict.get("isPure", False),
kind=node_dict.get("kind", ""),
lvalue_requested=node_dict.get("lValueRequested", False),
type_descriptions=node_dict.get("typeDescriptions", {}),
src=src,
children=[],
)
for arg in arguments:
G.add_edge(parsed_node.get_display_name(), arg.get_display_name())
G.add_edge(parsed_node.get_display_name(), expression.get_display_name())
case "EmitStatement":
event_call, G = parse_ast_node(node_dict["eventCall"], G)
parsed_node = EmitStatement(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
event_call=event_call,
src=node_dict.get("src", ""),
children=[],
)
G.add_edge(parsed_node.get_display_name(), event_call.get_display_name())
case "PlaceholderStatement":
parsed_node = ASTNode(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
src=node_dict.get("src", ""),
children=[],
)
case "IfStatement":
condition, G = parse_ast_node(node_dict["condition"], G)
trueBody, G = parse_ast_node(node_dict["trueBody"], G)
falseBody, G = (
parse_ast_node(node_dict["falseBody"], G)
if "falseBody" in node_dict
else (None, G)
)
parsed_node = IfStatement(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
condition=condition,
trueBody=trueBody,
falseBody=falseBody,
src=node_dict.get("src", ""),
children=[],
)
G.add_edge(parsed_node.get_display_name(), condition.get_display_name())
G.add_edge(condition.get_display_name(), trueBody.get_display_name())
if falseBody:
G.add_edge(condition.get_display_name(), falseBody.get_display_name())
case "Return":
expression, G = (
parse_ast_node(node_dict["expression"], G)
if "expression" in node_dict
else (None, G)
)
|
def parse_ast_node(node_dict, G, parent=None):
"""
Parses an AST node and creates an instance of the corresponding Python class.
"""
absolute_path = node_dict.get("absolutePath")
exported_symbols = node_dict.get("exportedSymbols", {})
node_id = node_dict.get("id", 0)
license = node_dict.get("license")
node_type = node_dict.get("nodeType")
child_dicts = node_dict.get("nodes", []) # List of child node dictionaries
src = node_dict.get("src")
match node_type:
case "SourceUnit":
parsed_node = SourceUnit(
absolute_path=absolute_path,
exported_symbols=exported_symbols,
node_id=node_id,
license=license,
node_type=node_type,
src=src,
children=[],
)
case "PragmaDirective":
parsed_node = PragmaDirective(
node_id=node_id,
node_type=node_type,
literals=node_dict.get("literals", []),
src=src,
children=[],
)
case "ImportDirective":
symbol_aliases = [
SymbolAlias(
foreign=Identifier(
node_id=alias["foreign"].get("id", 0),
node_type=alias["foreign"].get("nodeType", ""),
name=alias["foreign"].get("name", ""),
overloaded_declarations=alias["foreign"].get(
"overloadedDeclarations", []
),
type_descriptions=alias["foreign"].get("typeDescriptions", {}),
src=alias["foreign"].get("src", ""),
children=[],
),
name_location=alias.get("nameLocation", ""),
)
for alias in node_dict.get("symbolAliases", [])
]
parsed_node = ImportDirective(
node_id=node_dict.get("id", 0),
node_type=node_type,
file=node_dict.get("file"),
name_location=node_dict.get("nameLocation"),
scope=node_dict.get("scope"),
source_unit=node_dict.get("sourceUnit"),
src=node_dict.get("src"),
symbol_aliases=symbol_aliases,
unit_alias=node_dict.get("unitAlias", ""),
children=[],
)
case "ContractDefinition":
parsed_node = ContractDefinition(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType"),
abstract=node_dict.get("abstract", False),
base_contracts=node_dict.get("baseContracts", []),
contract_dependencies=node_dict.get("contractDependencies", []),
contract_kind=node_dict.get("contractKind", ""),
fully_implemented=node_dict.get("fullyImplemented", False),
linearized_base_contracts=node_dict.get("linearizedBaseContracts", []),
name=node_dict.get("name", ""),
name_location=node_dict.get("nameLocation", ""),
scope=node_dict.get("scope", 0),
src=node_dict.get("src", ""),
used_errors=node_dict.get("usedErrors", []),
children=[],
)
case "FunctionDefinition":
# Parsing parameters
parameters = []
params_node = node_dict.get("parameters", {}).get("parameters", [])
for param in params_node:
parsed_param, G = parse_ast_node(param, G)
parameters.append(parsed_param)
# Parsing return parameters
return_parameters = []
params_node = node_dict.get("returnParameters", {}).get("parameters", [])
for param in params_node:
parsed_param, G = parse_ast_node(param, G)
return_parameters.append(parsed_param)
body, G = (
parse_ast_node(node_dict.get("body", {}), G)
if "body" in node_dict
else (None, G)
)
parsed_node = FunctionDefinition(
node_id=node_dict.get("id", 0),
node_type=node_type,
function_selector=node_dict.get("functionSelector", ""),
implemented=node_dict.get("implemented", False),
kind=node_dict.get("kind", ""),
modifiers=node_dict.get("modifiers", []),
name=node_dict.get("name", ""),
parameters=parameters,
return_parameters=return_parameters,
scope=node_dict.get("scope", 0),
src=node_dict.get("src", ""),
state_mutability=node_dict.get("stateMutability", ""),
virtual=node_dict.get("virtual", False),
visibility=node_dict.get("visibility", ""),
body=body,
children=[],
)
if body:
G.add_edge(parsed_node.get_display_name(), body.get_display_name())
for param in parameters:
G.add_edge(parsed_node.get_display_name(), param.get_display_name())
for ret_param in parameters:
G.add_edge(parsed_node.get_display_name(), ret_param.get_display_name())
case "VariableDeclaration":
parsed_node = VariableDeclaration(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
constant=node_dict.get("constant", False),
function_selector=node_dict.get("functionSelector"),
mutability=node_dict.get("mutability", ""),
name=node_dict.get("name", ""),
scope=node_dict.get("scope", 0),
src=node_dict.get("src", ""),
state_variable=node_dict.get("stateVariable", False),
storage_location=node_dict.get("storageLocation", ""),
type_descriptions=node_dict.get("typeDescriptions", {}),
typeName=node_dict.get("typeName", {}),
visibility=node_dict.get("visibility", ""),
# Handling optional 'value' field
value=(
LiteralValue(
node_id=node_dict.get("value", {}).get("id", 0),
node_type=node_dict.get("value", {}).get("nodeType", ""),
hex_value=node_dict.get("value", {}).get("hexValue", ""),
is_constant=node_dict.get("value", {}).get("isConstant", False),
is_lvalue=node_dict.get("value", {}).get("isLValue", False),
is_pure=node_dict.get("value", {}).get("isPure", True),
kind=node_dict.get("value", {}).get("kind", ""),
lvalue_requested=node_dict.get("value", {}).get(
"lValueRequested", False
),
src=node_dict.get("value", {}).get("src", ""),
subdenomination=node_dict.get("value", {}).get(
"subdenomination", ""
),
type_descriptions=node_dict.get("value", {}).get(
"typeDescriptions", {}
),
value=node_dict.get("value", {}).get("value", ""),
children=[],
)
)
if "value" in node_dict
else None,
children=[],
)
case "UsingForDirective":
library_name_node = node_dict.get("libraryName", {})
library_name = IdentifierPath(
node_id=library_name_node.get("id", 0),
node_type=library_name_node.get("nodeType", ""),
name=library_name_node.get("name", ""),
referencedDeclaration=library_name_node.get("referencedDeclaration", 0),
src=library_name_node.get("src", ""),
children=[],
)
type_name_node = node_dict.get("typeName", {})
type_name_path_node = type_name_node.get("pathNode", {})
type_name = UserDefinedTypeName(
node_id=type_name_node.get("id", 0),
node_type=type_name_node.get("nodeType", ""),
pathNode=IdentifierPath(
node_id=type_name_path_node.get("id", 0),
node_type=type_name_path_node.get("nodeType", ""),
name=type_name_path_node.get("name", ""),
referencedDeclaration=type_name_path_node.get(
"referencedDeclaration", 0
),
src=type_name_path_node.get("src", ""),
children=[],
),
referencedDeclaration=type_name_node.get("referencedDeclaration", 0),
src=type_name_node.get("src", ""),
children=[],
)
parsed_node = UsingForDirective(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
src=node_dict.get("src", ""),
libraryName=library_name,
typeName=type_name,
children=[],
)
case "ErrorDefinition":
parameters = []
params_node = node_dict.get("parameters", {}).get("parameters", [])
for param in params_node:
parsed_param, G = parse_ast_node(param, G)
parameters.append(parsed_param)
parsed_node = ErrorDefinition(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
name=node_dict.get("name", ""),
parameters=ParameterList(
node_id=node_dict.get("parameters", {}).get("id", 0),
node_type=node_dict.get("parameters", {}).get("nodeType", ""),
parameters=parameters,
src=node_dict.get("parameters", {}).get("src", ""),
children=[],
),
src=node_dict.get("src", ""),
children=[],
)
for param in parameters:
G.add_edge(parsed_node.get_display_name(), param.get_display_name())
case "EventDefinition":
parameters = []
params_node = node_dict.get("parameters", {}).get("parameters", [])
for param in params_node:
parsed_param, G = parse_ast_node(param, G)
parameters.append(parsed_param)
parsed_node = EventDefinition(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
anonymous=node_dict.get("anonymous", False),
name=node_dict.get("name", ""),
parameters=ParameterList(
node_id=node_dict.get("parameters", {}).get("id", 0),
node_type=node_dict.get("parameters", {}).get("nodeType", ""),
parameters=parameters,
src=node_dict.get("parameters", {}).get("src", ""),
children=[],
),
src=node_dict.get("src", ""),
children=[],
)
for param in parameters:
G.add_edge(parsed_node.get_display_name(), param.get_display_name())
case "ModifierDefinition":
parameters, G = parse_ast_node(node_dict["parameters"], G)
body, G = parse_ast_node(node_dict["body"], G)
parsed_node = ModifierDefinition(
node_id=node_id,
node_type=node_type,
name=node_dict.get("name"),
name_location=node_dict.get("nameLocation"),
parameters=parameters,
body=body,
virtual=node_dict.get("virtual", False),
visibility=node_dict.get("visibility"),
src=src,
children=[],
)
G.add_edge(parsed_node.get_display_name(), parameters.get_display_name())
G.add_edge(parsed_node.get_display_name(), body.get_display_name())
case "Block" | "UncheckedBlock":
statements = []
for stmt in node_dict.get("statements", []):
if stmt:
parsed_stmt, G = parse_ast_node(stmt, G)
statements.append(parsed_stmt)
parsed_node = Block(
node_id=node_id,
node_type=node_type,
statements=statements,
src=src,
children=[],
)
for stmt in statements:
G.add_edge(parsed_node.get_display_name(), stmt.get_display_name())
case "Assignment":
left_hand_side, G = parse_ast_node(node_dict["leftHandSide"], G)
right_hand_side, G = parse_ast_node(node_dict["rightHandSide"], G)
parsed_node = Assignment(
node_id=node_id,
node_type=node_type,
left_hand_side=left_hand_side,
operator=node_dict.get("operator", ""),
right_hand_side=right_hand_side,
type_descriptions=node_dict.get("typeDescriptions", {}),
src=src,
children=[],
)
G.add_edge(
parsed_node.get_display_name(), left_hand_side.get_display_name()
)
G.add_edge(
parsed_node.get_display_name(), right_hand_side.get_display_name()
)
case "ExpressionStatement":
expression, G = parse_ast_node(node_dict["expression"], G)
parsed_node = ExpressionStatement(
node_id=node_id,
node_type=node_type,
expression=expression,
src=src,
children=[],
)
G.add_edge(parsed_node.get_display_name(), expression.get_display_name())
case "Identifier":
parsed_node = Identifier(
node_id=node_id,
node_type=node_type,
name=node_dict.get("name", ""),
overloaded_declarations=node_dict.get("overloadedDeclarations", []),
referenced_declaration=node_dict.get("referencedDeclaration", 0),
type_descriptions=node_dict.get("typeDescriptions", {}),
src=src,
children=[],
)
case "FunctionCall":
arguments = []
for arg in node_dict.get("arguments", []):
parsed_arg, G = parse_ast_node(arg, G)
arguments.append(parsed_arg)
expression, G = parse_ast_node(node_dict["expression"], G)
parsed_node = FunctionCall(
node_id=node_id,
node_type=node_type,
arguments=arguments,
expression=expression,
is_constant=node_dict.get("isConstant", False),
is_lvalue=node_dict.get("isLValue", False),
is_pure=node_dict.get("isPure", False),
kind=node_dict.get("kind", ""),
lvalue_requested=node_dict.get("lValueRequested", False),
type_descriptions=node_dict.get("typeDescriptions", {}),
src=src,
children=[],
)
for arg in arguments:
G.add_edge(parsed_node.get_display_name(), arg.get_display_name())
G.add_edge(parsed_node.get_display_name(), expression.get_display_name())
case "EmitStatement":
event_call, G = parse_ast_node(node_dict["eventCall"], G)
parsed_node = EmitStatement(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
event_call=event_call,
src=node_dict.get("src", ""),
children=[],
)
G.add_edge(parsed_node.get_display_name(), event_call.get_display_name())
case "PlaceholderStatement":
parsed_node = ASTNode(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
src=node_dict.get("src", ""),
children=[],
)
case "IfStatement":
condition, G = parse_ast_node(node_dict["condition"], G)
trueBody, G = parse_ast_node(node_dict["trueBody"], G)
falseBody, G = (
parse_ast_node(node_dict["falseBody"], G)
if "falseBody" in node_dict
else (None, G)
)
parsed_node = IfStatement(
node_id=node_dict.get("id", 0),
node_type=node_dict.get("nodeType", ""),
condition=condition,
trueBody=trueBody,
falseBody=falseBody,
src=node_dict.get("src", ""),
children=[],
)
G.add_edge(parsed_node.get_display_name(), condition.get_display_name())
G.add_edge(condition.get_display_name(), trueBody.get_display_name())
if falseBody:
G.add_edge(condition.get_display_name(), falseBody.get_display_name())
case "Return":
expression, G = (
parse_ast_node(node_dict["expression"], G)
if "expression" in node_dict
else (None, G)
) | parsed_node = ReturnValue( | 23 | 2023-12-03 07:44:01+00:00 | 12k |
wadiuvatzy/SAM-G | envs/robosuiteVGB/robosuitevgb/vgb_wrapper_rs100.py | [
{
"identifier": "CustomMujocoXML",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/custom_xml.py",
"snippet": "class CustomMujocoXML(ET.ElementTree):\n \"\"\"\n Class to modify the Mujoco XML of Mujoco-enabled env\n \"\"\"\n\n def __init__(self, element=None, file=None):\n super(CustomMujocoXML, self).__init__(element=element, file=file)\n\n @classmethod\n def build_from_element(cls, root):\n return cls(element=root)\n\n @classmethod\n def build_from_file(cls, xml_fname):\n return cls(file=xml_fname)\n\n @classmethod\n def build_from_env(cls, env):\n \"\"\"\n Build from a RobosuiteAdapter env or a robosuite env\n \"\"\"\n from .adapter import RobosuiteAdapter\n\n if isinstance(env, RobosuiteAdapter):\n env = env.env\n element = env.model.root\n return cls.build_from_element(element)\n\n def to_string(self):\n return ET.tostring(self.getroot(), encoding=\"unicode\")\n\n def get_elements_with_tag(self, tag):\n children = []\n for child in self.getroot().iter(tag):\n children.append(child)\n return children\n\n def get_element_with_name(self, tag, name):\n elems = self.get_elements_with_tag(tag)\n for elem in elems:\n if elem.get(\"name\") == name:\n return elem\n return None\n\n def remove_element(self, tag, name=None):\n cur = self.getroot()\n CustomMujocoXML._remove_helper(cur, tag, name)\n\n @staticmethod\n def _remove_helper(cur, tag, name=None):\n for elem in cur:\n if elem.tag == tag:\n if name is not None:\n if elem.get(\"name\") == name:\n cur.remove(elem)\n else:\n cur.remove(elem)\n else:\n CustomMujocoXML._remove_helper(elem, tag, name)\n\n def get_attributes(self, tag, name, attrib_name):\n elem = self.get_element_with_name(tag=tag, name=name)\n if elem is not None:\n return elem.get(attrib_name)\n else:\n raise ValueError(f\"Can't find element with tag {tag} and name {name}\")\n\n def set_attributes(self, tag_name, id_tuple, **kwargs):\n for child in self.getroot().iter(tag_name):\n if child.get(id_tuple[0], default=None) == id_tuple[1]:\n for k, v in kwargs.items():\n child.set(k, v)\n\n def add_element_to_parent(self, parent_tag, element):\n \"\"\"\n Add element as a child of a parent that's identified by its tag\n \"\"\"\n parent = self.getroot().find(parent_tag)\n parent.append(element)\n\n @staticmethod\n def save_elements(filename, root_tag, *elems):\n \"\"\"\n Append a list of elements to a root with a tag and save the tree to a file\n \"\"\"\n root = ET.Element(root_tag)\n for element in elems:\n root.append(element)\n tree = ET.ElementTree(element=root)\n tree.write(filename)\n\n def load_elements_from_file(self, filename, name_prefix=\"\"):\n \"\"\"\n Load elements from an xml file generated by .save_elements(). Update current\n elements with the new elements from file.\n\n Args:\n filename: file name\n name_prefix: prefix to be attached to every name in the xml as identifier\n \"\"\"\n new_assets = ET.parse(filename)\n root = new_assets.getroot()\n for child in root.getchildren():\n child.set(\"name\", name_prefix + child.get(\"name\"))\n elem = self.get_element_with_name(child.tag, child.get(\"name\"))\n if elem is not None:\n for k, v in child.items():\n elem.set(k, v)\n else:\n self.add_element_to_parent(root.tag, child)\n\n # ----------------------------------------------------\n # ------------ Asset: Texture & Material -------------\n # ----------------------------------------------------\n def set_texture_attributes(self, tex_name, **kwargs):\n self.set_attributes(\"texture\", (\"name\", tex_name), **kwargs)\n\n def set_material_attributes(self, mat_name, **kwargs):\n self.set_attributes(\"material\", (\"name\", mat_name), **kwargs)\n\n def save_current_tex_mat(self, filename, names_to_save):\n \"\"\"\n Save the texture/material elements in the xml.\n \"\"\"\n assets = []\n for tex in self.getroot().iter(\"texture\"):\n if tex.get(\"name\", None) in names_to_save:\n assets.append(tex)\n\n for mat in self.getroot().iter(\"material\"):\n if mat.get(\"name\", None) in names_to_save:\n assets.append(mat)\n CustomMujocoXML.save_elements(filename, \"asset\", *assets)\n\n def get_material_texture(self, mat_name):\n \"\"\"\n Get the texture file or built-in texture name from a material name\n \"\"\"\n tex_name = self.get_attributes(\n tag=\"material\", name=mat_name, attrib_name=\"texture\"\n )\n tex_file = self.get_attributes(tag=\"texture\", name=tex_name, attrib_name=\"file\")\n base_name = os.path.basename(os.path.normpath(tex_file))\n for name, file_name in TEXTURES.items():\n if base_name == file_name:\n return name\n return tex_file\n\n def change_material_texture(self, mat_name, tex_name=None, tex_element=None):\n \"\"\"\n Change the texture of a material identified by its name. If the given texture\n name is not in the xml file, the given texture element will be added.\n \"\"\"\n if tex_element is not None:\n self.remove_element(\"texture\", name=tex_element.name)\n self.add_element_to_parent(\"asset\", tex_element)\n self.set_material_attributes(mat_name, texture=tex_element.name)\n elif tex_name is not None:\n textures = self.get_elements_with_tag(\"texture\")\n tex_names = list(map(lambda t: t.get(\"name\", default=None), textures))\n if tex_name not in tex_names:\n if tex_name in ALL_TEXTURES:\n tex_element = TextureElement.build_from_file(\n name=tex_name, file_or_texture=tex_name, type=\"2d\"\n )\n assert tex_element is not None, (\n \"Texture name not found in tree. Must provide a texture element \"\n \"to add to the tree.\"\n )\n self.add_element_to_parent(\"asset\", tex_element)\n self.set_material_attributes(mat_name, texture=tex_name)\n else:\n raise AssertionError(\n \"Either a texture name or a texture element should be provided\"\n )\n\n def print_texture_material_info(self):\n \"\"\"\n Print information about the texture and material in the xml.\n \"\"\"\n\n def attrib_to_string(attrib):\n string = \"\"\n for k, v in attrib.items():\n string += f\"{k}={v}, \"\n return string[:-2]\n\n textures = self.get_elements_with_tag(\"texture\")\n materials = self.get_elements_with_tag(\"material\")\n\n for mat in materials:\n attrib = mat.attrib.copy()\n name = attrib.pop(\"name\")\n tex_name = attrib.pop(\"texture\")\n tex_attrib = None\n for tex in textures.copy():\n if tex.get(\"name\", default=None) == tex_name:\n tex_attrib = tex.attrib.copy()\n tex_attrib.pop(\"name\")\n textures.remove(tex)\n break\n assert (\n tex_attrib is not None\n ), f\"Texture '{tex_name}' does not exist in tree.\"\n print(f\"material: {name} || {attrib_to_string(attrib)}\")\n print(f\"\\ttexture: {tex_name} || {attrib_to_string(tex_attrib)}\")\n\n print(\"\\nOther textures:\")\n for tex in textures:\n name = tex.get(\"name\")\n print(f\"\\ttexture: {name} || {attrib_to_string(tex.attrib)}\")\n\n # ----------------------------------------------------\n # -------------------- Light -------------------------\n # ----------------------------------------------------\n def name_light(self):\n \"\"\"\n Rename all light elements. This function is needed because in some env light\n elements have no name.\n \"\"\"\n light_elems = self.get_elements_with_tag(\"light\")\n for i in range(len(light_elems)):\n elem = light_elems[i]\n elem.set(\"name\", f\"light{i+1}\")\n\n def remove_all_lights(self):\n self.remove_element(\"light\")\n\n def set_light_attributes(self, name, **attrib):\n self.set_attributes(\"light\", (\"name\", name), **attrib)\n\n def add_light(self, name, **attrib):\n elem = LightElement(name, **attrib)\n self.add_element_to_parent(\"worldbody\", elem)\n\n def save_current_lights(self, filename, lights_to_save=\"All\"):\n \"\"\"\n Save the light elements in the xml. Use \"All\" to save all the elements\n \"\"\"\n lights = []\n for tex in self.getroot().iter(\"light\"):\n if lights_to_save == \"All\":\n lights.append(tex)\n elif tex.get(\"name\") in lights_to_save:\n lights.append(tex)\n CustomMujocoXML.save_elements(filename, \"worldbody\", *lights)\n\n # ----------------------------------------------------\n # ------------------ Benchmarking --------------------\n # ----------------------------------------------------"
},
{
"identifier": "XMLTextureModder",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/custom_xml.py",
"snippet": "class XMLTextureModder:\n def __init__(\n self,\n seed=None,\n tex_candidate=None,\n tex_to_change=None,\n tex_diff_constraint=None,\n tex_type=DEFAULT_TEXTURE_TYPE,\n ):\n \"\"\"\n Initialize a modder that randomly changes the texture of specified objects.\n\n Args:\n seed (int): random seed used to randomize these\n modifications without impacting other numpy seeds / randomizations\n tex_candidate (dict): a dictionary that maps alias names of target objects\n (see DEFAULT_TEXTURE_ALIAS) to some texture candidates. Keys should be\n subsets of keys from DEFAULT_TEXTURE_ALIAS'. Values should be a texture\n candidate or a list of texture candidates. If a list of texture\n candidates is provided, a random candidate will be selected and\n applied to the corresponding target object.\n A texture candidate is either a string representing the path to a image\n texture file, or a tuple of RGB values normalized to [0, 1]. Note that\n you can use the names of robosuite's builtin texture files as texture\n candidate. Check `envs.robosuite.ALL_TEXTURES` for all the\n builtin texture names, or `envs.robosuite.TEXTURES` to get a\n dictionary that maps these names to their source files.\n tex_to_change (list): List of object alias names whose texture need to be\n modified. If None, use default texture list in\n DEFAULT_TASK_TEXTURE_LIST[task]\n tex_diff_constraint (list[set]): List of sets, where each set contains some\n texture keys. Each texture key from a set will be assigned a different\n texture from the other keys in the same set. This arg is used to enforce\n texture difference on important objects so that they are more easily\n identified by vision algorithms. If None, no constraint is used.\n \"\"\"\n self.seed = seed\n self.tex_candidate = tex_candidate if tex_candidate else {}\n self.tex_to_change = tex_to_change if tex_to_change else []\n self.tex_diff_constraint = tex_diff_constraint if tex_diff_constraint else []\n self.tex_type = tex_type\n\n def random_texture_change(self, mujoco_xml: CustomMujocoXML):\n if self.seed is None:\n random_state = np.random.mtrand._rand\n else:\n random_state = np.random.RandomState(self.seed)\n\n def change_texture(tex_key, texture):\n tex_type = self.tex_type.get(tex_key, \"cube\")\n if isinstance(texture, tuple):\n tex_elem = TextureElement.build_from_rgb(\n f\"tex-{tex_key}\", rgb1=texture, type=tex_type\n )\n else:\n tex_elem = TextureElement.build_from_file(\n f\"tex-{tex_key}\", file_or_texture=texture, type=tex_type\n )\n mat_name = DEFAULT_TEXTURE_ALIAS.get(tex_key, tex_key)\n mujoco_xml.change_material_texture(mat_name=mat_name, tex_element=tex_elem)\n\n if self.tex_to_change is None:\n tex_to_change = sorted(list(self.tex_candidate.keys()))\n else:\n tex_to_change = sorted(list(self.tex_to_change))\n\n if self.tex_diff_constraint is not None:\n for c_set in self.tex_diff_constraint:\n constraint = set()\n for key in c_set:\n if key in tex_to_change:\n candidate = self.tex_candidate.get(key, None)\n if candidate is None:\n continue\n elif isinstance(candidate, list):\n new_c = candidate.copy()\n for c in new_c:\n if c in constraint:\n new_c.remove(c)\n candidate = new_c[random_state.randint(len(new_c))]\n if candidate == 'black':\n candidate = (0, 0, 0)\n change_texture(key, candidate)\n tex_to_change.remove(key)\n constraint.add(candidate)\n while tex_to_change:\n key = tex_to_change.pop()\n candidate = self.tex_candidate.get(key, None)\n if candidate is None:\n continue\n elif isinstance(candidate, list):\n candidate = candidate[random_state.randint(len(candidate))]\n if candidate == 'black':\n candidate = (0, 0, 0)\n change_texture(key, candidate)"
},
{
"identifier": "render_img",
"path": "envs/robosuiteVGB/robosuitevgb/secant/utils/misc.py",
"snippet": "def render_img(img, backend=\"cv2\", waitkey=100):\n if backend == \"matplotlib\":\n plt.imshow(img, aspect=\"auto\")\n plt.show()\n elif backend == \"cv2\":\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n cv2.imshow(\"rendering\", img)\n cv2.waitKey(waitkey)\n else:\n raise AssertionError(\"only matplotlib and cv2 are supported.\")"
},
{
"identifier": "get_obs_shape_from_dict",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/utils.py",
"snippet": "def get_obs_shape_from_dict(obs_dict):\n import tree\n\n return wrap_dict_tuple_space(tree.map_structure(_gen_obs_space, obs_dict))"
},
{
"identifier": "DEFAULT_TEXTURE_ALIAS",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "DEFAULT_TEXTURE_ALIAS = {\n \"floor\": \"floorplane\",\n \"wall\": \"walls_mat\",\n \"table_legs\": \"table_legs_metal\",\n \"table\": \"table_ceramic\",\n \"door\": \"Door_MatDarkWood\",\n \"door_handle\": \"Door_MatMetal\",\n \"lift_object\": \"cube_redwood_mat\",\n \"na_metal1\": \"smetal\",\n \"na_metal2\": \"bmetal\",\n \"pp_table1\": \"light-wood\",\n \"pp_table2\": \"dark-wood\",\n \"stack_object1\": \"greenwood_mat\",\n \"stack_object2\": \"redwood_mat\",\n \"handoff_hammer_head\": \"metal_mat\",\n \"handoff_hammer_body\": \"wood_mat\",\n \"ta_lift_pot\": \"pot_mat\",\n \"ta_lift_handle1\": \"handle1_mat\",\n \"ta_lift_handle2\": \"handle2_mat\",\n \"ta_pih_plate\": \"plate_mat\",\n \"ta_pih_stick\": \"greenwood_mat\",\n}"
},
{
"identifier": "DEFAULT_TASK_TEXTURE_LIST",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "DEFAULT_TASK_TEXTURE_LIST = {\n \"Door\": [\"floor\", \"wall\", \"table_legs\", \"table\", \"door\", \"door_handle\"],\n \"Lift\": [\"floor\", \"wall\", \"table_legs\", \"table\", \"lift_object\"],\n \"NutAssembly\": [\"floor\", \"wall\", \"table_legs\", \"table\", \"na_metal1\", \"na_metal2\"],\n \"NutAssemblyRound\": [\n \"floor\",\n \"wall\",\n \"table_legs\",\n \"table\",\n \"na_metal1\",\n \"na_metal2\",\n ],\n \"NutAssemblySingle\": [\n \"floor\",\n \"wall\",\n \"table_legs\",\n \"table\",\n \"na_metal1\",\n \"na_metal2\",\n ],\n \"NutAssemblySquare\": [\n \"floor\",\n \"wall\",\n \"table_legs\",\n \"table\",\n \"na_metal1\",\n \"na_metal2\",\n ],\n \"PickPlace\": [\"floor\", \"wall\", \"table_legs\", \"pp_table1\", \"pp_table2\"],\n \"PickPlaceBread\": [\"floor\", \"wall\", \"table_legs\", \"pp_table1\", \"pp_table2\"],\n \"PickPlaceCan\": [\"floor\", \"wall\", \"table_legs\", \"pp_table1\", \"pp_table2\"],\n \"PickPlaceCereal\": [\"floor\", \"wall\", \"table_legs\", \"pp_table1\", \"pp_table2\"],\n \"PickPlaceMilk\": [\"floor\", \"wall\", \"table_legs\", \"pp_table1\", \"pp_table2\"],\n \"PickPlaceSingle\": [\"floor\", \"wall\", \"table_legs\", \"pp_table1\", \"pp_table2\"],\n \"Stack\": [\"floor\", \"wall\", \"table_legs\", \"table\", \"stack_object1\", \"stack_object2\"],\n \"TwoArmHandover\": [\n \"floor\",\n \"wall\",\n \"table_legs\",\n \"table\",\n \"handoff_hammer_head\",\n \"handoff_hammer_body\",\n ],\n \"TwoArmLift\": [\n \"floor\",\n \"wall\",\n \"table_legs\",\n \"table\",\n \"ta_lift_pot\",\n \"ta_lift_handle1\",\n \"ta_lift_handle2\",\n ],\n \"TwoArmPegInHole\": [\"floor\", \"wall\", \"ta_pih_plate\", \"ta_pih_stick\"],\n \"Wipe\": [\"floor\", \"wall\", \"table_legs\", \"table\"],\n}"
},
{
"identifier": "ALL_PRESET_ARGUMENTS",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "ALL_PRESET_ARGUMENTS = {\n \"SECANT\": dict(\n custom_texture=\"SECANT\",\n custom_color=\"SECANT\",\n custom_camera=\"SECANT\",\n custom_light=\"SECANT\",\n ),\n}"
},
{
"identifier": "ALL_TEXTURE_PRESETS",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "ALL_TEXTURE_PRESETS = {\n \"SECANT\": PRESET_TEXTURE_CONFIG,\n}"
},
{
"identifier": "ALL_COLOR_PRESETS",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "ALL_COLOR_PRESETS = {\"SECANT\": PRESET_COLOR_CONFIG}"
},
{
"identifier": "ALL_CAMERA_PRESETS",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "ALL_CAMERA_PRESETS = {\"SECANT\": PRESET_CAMERA_CONFIG}"
},
{
"identifier": "ALL_LIGHTING_PRESETS",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "ALL_LIGHTING_PRESETS = {\"SECANT\": PRESET_LIGHTING_CONFIG}"
},
{
"identifier": "get_custom_reset_config",
"path": "envs/robosuiteVGB/robosuitevgb/secant/envs/robosuite/preset_customization.py",
"snippet": "def get_custom_reset_config(task, mode, scene_id):\n custom_seed = TASK_RANDOM_SEED[task][mode][scene_id]\n custom_texture = custom_color = custom_camera = custom_light = custom_seed\n custom_reset_config = {}\n if custom_texture:\n custom_texture_config = copy.deepcopy(PRESET_TEXTURE_CONFIG)\n custom_texture_config[\"tex_candidate\"] = TASK_TEX_CANDIDATE[task][mode][scene_id]\n custom_texture_config[\"seed\"] = custom_texture\n custom_reset_config[\"custom_texture\"] = custom_texture_config\n if custom_color:\n custom_color_config = copy.deepcopy(PRESET_COLOR_CONFIG)\n custom_color_config[\"seed\"] = custom_color\n custom_reset_config[\"custom_color\"] = custom_color_config\n if custom_camera:\n custom_camera_config = copy.deepcopy(PRESET_CAMERA_CONFIG)\n custom_camera_config[\"seed\"] = custom_camera\n custom_reset_config[\"custom_camera\"] = custom_camera_config\n if custom_light:\n custom_light_config = copy.deepcopy(PRESET_LIGHTING_CONFIG)\n custom_light_config[\"randomize_active\"] = False\n custom_light_config[\"seed\"] = custom_light\n custom_reset_config[\"custom_light\"] = custom_light_config\n if not custom_reset_config:\n custom_reset_config = None\n return custom_reset_config"
}
] | import robosuite as suite
import gym
import numpy as np
import random
import copy
import xml.etree.ElementTree as ET
import numpy as np
from robosuite.controllers import load_controller_config
from gym.spaces import Box
from typing import Union, List, Optional, Dict
from .secant.envs.robosuite.custom_xml import CustomMujocoXML, XMLTextureModder
from .secant.utils.misc import render_img
from .secant.envs.robosuite.utils import get_obs_shape_from_dict
from .secant.envs.robosuite.preset_customization import (
DEFAULT_TEXTURE_ALIAS,
DEFAULT_TASK_TEXTURE_LIST,
ALL_PRESET_ARGUMENTS,
ALL_TEXTURE_PRESETS,
ALL_COLOR_PRESETS,
ALL_CAMERA_PRESETS,
ALL_LIGHTING_PRESETS,
get_custom_reset_config,
)
from robosuite.utils.mjmod import CameraModder, LightingModder, TextureModder
from robosuite.wrappers import Wrapper | 7,371 | # **self.dynamics_randomization_args,
# )
# self.modders.append(self.dynamics_modder)
def _initialize_xml_modder(self, custom_texture: Optional[Union[str, dict]]):
if custom_texture is not None:
if isinstance(custom_texture, str):
custom_texture = ALL_TEXTURE_PRESETS.get(custom_texture, None)
config = custom_texture.copy()
if custom_texture.get("tex_to_change", None) is None:
config["tex_to_change"] = DEFAULT_TASK_TEXTURE_LIST[self.task]
self.secant_modders["texture"] = XMLTextureModder(**config)
def _reformat_obs(self, obs_dict, disable_channel_first=False):
rgb_obs = {}
state_obs = []
reformatted = {}
for name, obs in obs_dict.items():
if name.endswith("_image") and "rgb" in self.obs_modality:
view_name = name[:-6]
if self._use_depth:
depth_obs = obs_dict[view_name + "_depth"]
obs = np.concatenate([obs, depth_obs[:, :, np.newaxis]], axis=2)
obs = np.flipud(obs)
if self._channel_first and not disable_channel_first:
obs = np.transpose(obs, (2, 0, 1))
rgb_obs[view_name] = obs
elif name.endswith("state") and "state" in self.obs_modality:
state_obs.append(obs)
if "rgb" in self.obs_modality:
if len(rgb_obs.keys()) == 1:
rgb_obs = list(rgb_obs.values())[0]
reformatted["rgb"] = rgb_obs
if "state" in self.obs_modality:
reformatted["state"] = np.concatenate(state_obs)
return reformatted
def get_mujoco_xml(self):
return CustomMujocoXML.build_from_env(self)
def reset(
self,
xml_string: Optional[Union[str, List]] = None,
custom_texture: Optional[Union[str, dict]] = None,
):
if xml_string is not None:
if xml_string != self._reset_config.get("xml_string", None):
self._reset_config["xml_string"] = xml_string.copy()
else:
xml_string = None
if custom_texture is not None:
if custom_texture != self._reset_config.get("custom_texture", None):
self._reset_config["custom_texture"] = copy.deepcopy(custom_texture)
self._initialize_xml_modder(custom_texture)
else:
custom_texture = None
# reset from xml should only be called if a different texture/xml is requested
reset_from_xml = (
xml_string is not None or custom_texture is not None or self.reset_xml_next
)
if reset_from_xml:
self._reset_from_xml(xml_string)
self.env.deterministic_reset = False
else:
self.env.reset()
# self.env.reset()
self.env._reset_internal()
self.reset_xml_next = False
if self.moving_light:
self.step_pos = 0.4
self.step_diffuse = 0.01
self.light_pos_range = [self.env.sim.model.light_pos[:,1] - 20, self.env.sim.model.light_pos[:,1] + 20]
self.light_diffuse_range = [max(self.env.sim.model.light_diffuse[:,1] - 0.5, 0.15), min(self.env.sim.model.light_diffuse[:,1] + 0.2, 0.95)]
# TODO may change the order
# self.restore_default_domain()
# # save the original env parameters
# self.save_default_domain()
# reset counter for doing domain randomization at a particular frequency
# self.step_counter = 0
# update sims
self._initialize_modders()
for modder in self.modders:
modder.update_sim(self.env.sim)
self.randomize_domain()
self.env.deterministic_reset = False
self.env._reset_internal()
return self._reformat_obs(self.env._get_observations(force_update=True))
def _reset_from_xml(self, xml_string):
if xml_string is not None:
if isinstance(xml_string, list):
xml_string = np.random.choice(xml_string)
try:
root = ET.fromstring(xml_string)
except ET.ParseError:
raise ValueError("Input xml_string is not a valid XML string")
mujoco_xml = CustomMujocoXML.build_from_element(root)
else:
mujoco_xml = self.get_mujoco_xml()
# f = open("twoarmsaaa.xml", 'w')
# f.write(mujoco_xml.to_string())
# f.close()
if self.secant_modders["texture"] is not None:
self.secant_modders["texture"].random_texture_change(mujoco_xml) # TODO may change the texture
self.env.reset_from_xml_string(mujoco_xml.to_string())
def get_tex_candidate(self):
"""
Get a tex_candidate dictionary from the current env. The tex_candidate
dictionary can be passed to custom_texture in reset()
"""
mujoco_xml = self.get_mujoco_xml()
texture_list = DEFAULT_TASK_TEXTURE_LIST[self.task]
tex_candidates = {}
for alias in texture_list:
| ALL_ROBOTS = list(suite.ALL_ROBOTS)
class VGBWrapper(gym.core.Env, Wrapper):
"""
A gym style adapter for Robosuite
"""
def __init__(
self,
env,
color_randomization_args,
camera_randomization_args,
lighting_randomization_args,
dynamics_randomization_args,
task: str,
obs_modality: Optional[List[str]] = ["rgb"],
episode_length: int = 500,
hard_reset: bool = False,
channel_first: bool = True,
camera_depths: bool = False,
custom_reset_config: Optional[Union[Dict, str]] = None,
mode: bool = "train",
scene_id: Optional[int] = 0,
verbose: bool = False,
seed=None,
randomize_color=True,
randomize_camera=True,
randomize_lighting=True,
randomize_dynamics=True,
randomize_on_reset=True,
moving_light=False,
):
super().__init__(env)
self.seed = seed
if seed is not None:
self.random_state = np.random.RandomState(seed)
else:
self.random_state = None
self.randomize_color = randomize_color
self.randomize_camera = randomize_camera
self.randomize_lighting = randomize_lighting
self.randomize_dynamics = randomize_dynamics
self.color_randomization_args = color_randomization_args
self.camera_randomization_args = camera_randomization_args
self.lighting_randomization_args = lighting_randomization_args
self.dynamics_randomization_args = dynamics_randomization_args
self.randomize_on_reset = randomize_on_reset
self.moving_light = moving_light
self.modders = []
# self._initialize_modders()
# self.save_default_domain()
if isinstance(scene_id, int):
if verbose:
print(f"{mode} scene_id: {scene_id}")
custom_reset_config = get_custom_reset_config(
task=task, mode=mode, scene_id=scene_id
)
self.task = task
self.headless = True
self.obs_modality = (
["rgb", "state"] if obs_modality is None else obs_modality.copy()
)
assert len(self.obs_modality) > 0, "Observation must have at least one modality"
for modal in self.obs_modality:
assert modal in [
"rgb",
"state",
], "Only 'rgb' and 'state' are supported as modality"
if "rgb" in self.obs_modality:
self._use_rgb = True
else:
self._use_rgb = False
self._render_camera = "frontview"
self._channel_first = channel_first
self._use_depth = camera_depths
self._max_episode_steps = episode_length
self._hard_reset = hard_reset
assert mode in ["train", "eval-easy", "eval-hard", "eval-extreme"]
self._mode = mode
self._scene_id = scene_id
self.env = env
self.secant_modders = dict.fromkeys(["texture"])
if custom_reset_config is not None:
if isinstance(custom_reset_config, str):
custom_reset_config = ALL_PRESET_ARGUMENTS.get(
custom_reset_config, None
)
self._reset_config = custom_reset_config
xml_string = custom_reset_config.get("xml_string")
self._initialize_xml_modder(custom_reset_config.get("custom_texture", None))
if (
xml_string is not None
or custom_reset_config.get("custom_texture") is not None
):
self.reset_xml_next = True
else:
self._reset_config = dict.fromkeys(
[
"xml_string",
"custom_texture",
]
)
self.reset_xml_next = False
obs_dict = self.env.observation_spec()
self.observation_space = get_obs_shape_from_dict(self._reformat_obs(obs_dict))
low, high = self.env.action_spec
self.action_space = Box(low=low, high=high)
def _initialize_modders(self):
if self.randomize_color:
self.tex_modder = TextureModder(
sim=self.env.sim, random_state=self.random_state, **self.color_randomization_args
)
self.modders.append(self.tex_modder)
if self.randomize_camera:
self.camera_modder = CameraModder(
sim=self.env.sim,
random_state=self.random_state,
**self.camera_randomization_args,
)
self.modders.append(self.camera_modder)
if self.randomize_lighting:
self.light_modder = LightingModder(
sim=self.env.sim,
random_state=self.random_state,
**self.lighting_randomization_args,
)
self.modders.append(self.light_modder)
# if self.randomize_dynamics:
# self.dynamics_modder = DynamicsModder(
# sim=self.env.sim,
# random_state=self.random_state,
# **self.dynamics_randomization_args,
# )
# self.modders.append(self.dynamics_modder)
def _initialize_xml_modder(self, custom_texture: Optional[Union[str, dict]]):
if custom_texture is not None:
if isinstance(custom_texture, str):
custom_texture = ALL_TEXTURE_PRESETS.get(custom_texture, None)
config = custom_texture.copy()
if custom_texture.get("tex_to_change", None) is None:
config["tex_to_change"] = DEFAULT_TASK_TEXTURE_LIST[self.task]
self.secant_modders["texture"] = XMLTextureModder(**config)
def _reformat_obs(self, obs_dict, disable_channel_first=False):
rgb_obs = {}
state_obs = []
reformatted = {}
for name, obs in obs_dict.items():
if name.endswith("_image") and "rgb" in self.obs_modality:
view_name = name[:-6]
if self._use_depth:
depth_obs = obs_dict[view_name + "_depth"]
obs = np.concatenate([obs, depth_obs[:, :, np.newaxis]], axis=2)
obs = np.flipud(obs)
if self._channel_first and not disable_channel_first:
obs = np.transpose(obs, (2, 0, 1))
rgb_obs[view_name] = obs
elif name.endswith("state") and "state" in self.obs_modality:
state_obs.append(obs)
if "rgb" in self.obs_modality:
if len(rgb_obs.keys()) == 1:
rgb_obs = list(rgb_obs.values())[0]
reformatted["rgb"] = rgb_obs
if "state" in self.obs_modality:
reformatted["state"] = np.concatenate(state_obs)
return reformatted
def get_mujoco_xml(self):
return CustomMujocoXML.build_from_env(self)
def reset(
self,
xml_string: Optional[Union[str, List]] = None,
custom_texture: Optional[Union[str, dict]] = None,
):
if xml_string is not None:
if xml_string != self._reset_config.get("xml_string", None):
self._reset_config["xml_string"] = xml_string.copy()
else:
xml_string = None
if custom_texture is not None:
if custom_texture != self._reset_config.get("custom_texture", None):
self._reset_config["custom_texture"] = copy.deepcopy(custom_texture)
self._initialize_xml_modder(custom_texture)
else:
custom_texture = None
# reset from xml should only be called if a different texture/xml is requested
reset_from_xml = (
xml_string is not None or custom_texture is not None or self.reset_xml_next
)
if reset_from_xml:
self._reset_from_xml(xml_string)
self.env.deterministic_reset = False
else:
self.env.reset()
# self.env.reset()
self.env._reset_internal()
self.reset_xml_next = False
if self.moving_light:
self.step_pos = 0.4
self.step_diffuse = 0.01
self.light_pos_range = [self.env.sim.model.light_pos[:,1] - 20, self.env.sim.model.light_pos[:,1] + 20]
self.light_diffuse_range = [max(self.env.sim.model.light_diffuse[:,1] - 0.5, 0.15), min(self.env.sim.model.light_diffuse[:,1] + 0.2, 0.95)]
# TODO may change the order
# self.restore_default_domain()
# # save the original env parameters
# self.save_default_domain()
# reset counter for doing domain randomization at a particular frequency
# self.step_counter = 0
# update sims
self._initialize_modders()
for modder in self.modders:
modder.update_sim(self.env.sim)
self.randomize_domain()
self.env.deterministic_reset = False
self.env._reset_internal()
return self._reformat_obs(self.env._get_observations(force_update=True))
def _reset_from_xml(self, xml_string):
if xml_string is not None:
if isinstance(xml_string, list):
xml_string = np.random.choice(xml_string)
try:
root = ET.fromstring(xml_string)
except ET.ParseError:
raise ValueError("Input xml_string is not a valid XML string")
mujoco_xml = CustomMujocoXML.build_from_element(root)
else:
mujoco_xml = self.get_mujoco_xml()
# f = open("twoarmsaaa.xml", 'w')
# f.write(mujoco_xml.to_string())
# f.close()
if self.secant_modders["texture"] is not None:
self.secant_modders["texture"].random_texture_change(mujoco_xml) # TODO may change the texture
self.env.reset_from_xml_string(mujoco_xml.to_string())
def get_tex_candidate(self):
"""
Get a tex_candidate dictionary from the current env. The tex_candidate
dictionary can be passed to custom_texture in reset()
"""
mujoco_xml = self.get_mujoco_xml()
texture_list = DEFAULT_TASK_TEXTURE_LIST[self.task]
tex_candidates = {}
for alias in texture_list: | mat_name = DEFAULT_TEXTURE_ALIAS[alias] | 4 | 2023-12-04 10:01:22+00:00 | 12k |
cmu-ci-lab/volumetric_opaque_solids | exp_runner.py | [
{
"identifier": "Dataset",
"path": "models/dataset.py",
"snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.scale_mats_np = []\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = []\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n P = world_mat @ scale_mat\n P = P[:3, :4]\n intrinsics, pose = load_K_Rt_from_P(None, P)\n self.intrinsics_all.append(torch.from_numpy(intrinsics).float())\n self.pose_all.append(torch.from_numpy(pose).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n\n print('Load data: End')\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n pixels_x = torch.randint(low=0, high=self.W, size=[batch_size])\n pixels_y = torch.randint(low=0, high=self.H, size=[batch_size])\n color = self.images[img_idx][(pixels_y.cpu(), pixels_x.cpu())] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y.cpu(), pixels_x.cpu())] # batch_size, 3\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l)\n ty = torch.linspace(0, self.H - 1, self.H // l)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)"
},
{
"identifier": "PointSampler",
"path": "models/sampler.py",
"snippet": "class PointSampler:\n def __init__(self,\n n_sdf_pts = 1024,\n n_fg_samples = 28,\n n_surf_samples = 8,\n n_bg_samples = 28,\n n_outside = 32,\n use_random_binary_search = False,\n use_sdf_offset = False):\n # number of initial evaluations of sdf along each ray\n self.n_sdf_pts = n_sdf_pts\n self.n_sdf_samples = 10\n\n # number of points sampled per ray in the foreground, surface interval, and background\n self.n_fg_samples = n_fg_samples\n self.n_surf_samples = n_surf_samples\n self.n_bg_samples = n_bg_samples\n\n # total number of (primary, non-background) samples along each ray\n self.n_total_samples = n_fg_samples + n_surf_samples + n_bg_samples\n\n # number of points sampled per ray in background\n self.n_outside = n_outside\n\n self.use_random_binary_search = use_random_binary_search\n self.use_sdf_offset = use_sdf_offset\n\n def eval_at_points(self, rays_o, rays_d, depth, f):\n pts = rays_o[:, None, :] + rays_d[:, None, :] * depth[:, :, None]\n with torch.no_grad():\n val = f(pts.reshape(-1, 3)).reshape(depth.shape[0], depth.shape[1]).squeeze(dim=-1)\n return val\n\n def sample_interval_with_random_binary_search(self, num_samples, start, stop, rays_o, rays_d, f):\n '''\n Performs a random binary search for the x such that f(x) = 0 given f(start) > 0 and f(stop) < 0\n returns the entire sequence of sampled points, sorted from smallest to largest z val.\n '''\n current_min, current_max = start, stop\n samples = torch.zeros((start.shape[0], num_samples))\n uniform_random = torch.rand(samples.shape)\n for i in range(num_samples):\n samples[:, i] = (current_max - current_min) * uniform_random[:, i] + current_min\n f_val = self.eval_at_points(rays_o, rays_d, samples[:, i].unsqueeze(dim=1), f)\n current_min = torch.where(f_val <= 0, current_min, samples[:, i])\n current_max = torch.where(f_val <= 0, samples[:, i], current_max)\n return torch.sort(samples)[0]\n \n def sample_interval_uniformly(self, n, start, stop):\n start = start if len(start.shape) == 1 else start.squeeze(dim=-1)\n stop = stop if len(stop.shape) == 1 else stop.squeeze(dim=-1)\n x = torch.linspace(0, 1.0 - 1.0 / n, n)[None, :]\n x = x * (stop - start)[:, None] + start[:, None]\n x += (torch.rand(start.shape[0]) * (stop - start) / n)[:, None]\n return x\n\n def _dense_sdf_evaluation(self, rays_o, rays_d, near, far, sdf_func):\n uniform_z = torch.linspace(0.0, 1.0, self.n_sdf_pts + 1)\n z = near + (far - near) * uniform_z[None, :]\n return z, self.eval_at_points(rays_o, rays_d, z, sdf_func)\n\n def _find_first_zero_crossing(self, sdf):\n prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]\n sign_change = (next_sdf * prev_sdf < 0).long()\n return sign_change.argmax(1).long()\n\n def _compute_surface_z_bound(self, isect_idx, z, near, far):\n z_bounds = torch.gather(z, dim=1, index=torch.cat([isect_idx[:, None], isect_idx[:, None]+1], dim=1)).squeeze(dim=-1)\n return z_bounds[:, 0], z_bounds[:, 1]\n\n def sample_intersection(self, rays_o, rays_d, near, far, sdf_func, inv_std):\n with torch.no_grad():\n z, sdf = self._dense_sdf_evaluation(rays_o, rays_d, near, far, sdf_func)\n if self.use_sdf_offset:\n sdf += torch.normal(0, 1.0 / inv_std)\n\n isect_idx = self._find_first_zero_crossing(sdf) \n surf_lower, surf_upper = self._compute_surface_z_bound(isect_idx, z, near, far)\n \n has_isect = (isect_idx > 0).bool()\n no_isect = torch.logical_not(has_isect)\n\n # final depth samples buffers\n z_vals = torch.empty((rays_o.shape[0], self.n_total_samples))\n\n # depth map for visualization\n surf_z_image = torch.zeros_like(rays_o)\n\n if torch.any(has_isect):\n fg_z = self.sample_interval_uniformly(self.n_fg_samples, near[has_isect], surf_lower[has_isect])\n bg_z = self.sample_interval_uniformly(self.n_bg_samples, surf_upper[has_isect], far[has_isect])\n if not self.use_random_binary_search:\n surf_z = self.sample_interval_uniformly(self.n_surf_samples, surf_lower[has_isect], surf_upper[has_isect])\n else:\n surf_z = self.sample_interval_with_random_binary_search(self.n_surf_samples,\n surf_lower[has_isect],\n surf_upper[has_isect],\n rays_o[has_isect],\n rays_d[has_isect],\n sdf_func)\n z_vals[has_isect, :] = torch.cat([fg_z, surf_z, bg_z], dim=-1)\t\n \n # return z-val in image for debugging\n surf_lower_unit_z = (surf_lower - near.squeeze()) / (far - near).squeeze()\n surf_z_image[has_isect, :] = surf_lower_unit_z[has_isect, None].repeat(1, 3)\n\n if torch.any(no_isect):\n z_vals[no_isect, :] = self.sample_interval_uniformly(self.n_total_samples, near[no_isect], far[no_isect])\n \n return z_vals, surf_z_image \n\n def sample_outside(self, rays_o, rays_d, far):\t\n # Same as NeuS: https://github.com/Totoro97/NeuS/blob/6f96f96005d72a7a358379d2b576c496a1ab68dd/models/renderer.py#L292C19-L313\n if self.n_outside == 0:\n return None\n batch_size = len(rays_o)\n z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside)\n mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1])\n upper = torch.cat([mids, z_vals_outside[..., -1:]], -1)\n lower = torch.cat([z_vals_outside[..., :1], mids], -1)\n t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]])\n z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand\n z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_total_samples\n return z_vals_outside"
},
{
"identifier": "RenderingNetwork",
"path": "models/fields.py",
"snippet": "class RenderingNetwork(nn.Module):\n def __init__(self,\n d_feature,\n mode,\n d_in,\n d_out,\n d_hidden,\n n_layers,\n weight_norm=True,\n multires_view=0,\n squeeze_out=True):\n super().__init__()\n\n self.mode = mode\n self.squeeze_out = squeeze_out\n dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]\n\n self.embedview_fn = None\n if multires_view > 0:\n embedview_fn, input_ch = get_embedder(multires_view)\n self.embedview_fn = embedview_fn\n dims[0] += (input_ch - 3)\n\n self.num_layers = len(dims)\n\n for l in range(0, self.num_layers - 1):\n out_dim = dims[l + 1]\n lin = nn.Linear(dims[l], out_dim)\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.relu = nn.ReLU()\n\n def forward(self, points, normals, view_dirs, feature_vectors):\n if self.embedview_fn is not None:\n view_dirs = self.embedview_fn(view_dirs)\n\n rendering_input = None\n\n if self.mode == 'idr':\n rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_view_dir':\n rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_normal':\n rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)\n\n x = rendering_input\n\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.relu(x)\n\n if self.squeeze_out:\n x = torch.sigmoid(x)\n return x"
},
{
"identifier": "SDFNetwork",
"path": "models/fields.py",
"snippet": "class SDFNetwork(nn.Module):\n def __init__(self,\n d_in,\n d_out,\n d_hidden,\n n_layers,\n skip_in=(4,),\n multires=0,\n bias=0.5,\n scale=1,\n geometric_init=True,\n weight_norm=True,\n inside_outside=False):\n super(SDFNetwork, self).__init__()\n\n dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]\n\n self.embed_fn_fine = None\n if multires > 0:\n embed_fn, input_ch = get_embedder(multires, input_dims=d_in)\n self.embed_fn_fine = embed_fn\n dims[0] = input_ch\n\n self.num_layers = len(dims)\n self.skip_in = skip_in\n self.scale = scale\n\n for l in range(0, self.num_layers - 1):\n if l + 1 in self.skip_in:\n out_dim = dims[l + 1] - dims[0]\n else:\n out_dim = dims[l + 1]\n\n lin = nn.Linear(dims[l], out_dim)\n\n if geometric_init:\n if l == self.num_layers - 2:\n if not inside_outside:\n torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, -bias)\n else:\n torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, bias)\n elif multires > 0 and l == 0:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.constant_(lin.weight[:, 3:], 0.0)\n torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))\n elif multires > 0 and l in self.skip_in:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0)\n else:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.activation = nn.Softplus(beta=100)\n\n def forward(self, inputs):\n inputs = inputs * self.scale\n if self.embed_fn_fine is not None:\n inputs = self.embed_fn_fine(inputs)\n\n x = inputs\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n if l in self.skip_in:\n x = torch.cat([x, inputs], 1) / np.sqrt(2)\n\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.activation(x)\n return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1)\n\n def sdf(self, x):\n return self.forward(x)[:, :1]\n\n def sdf_hidden_appearance(self, x):\n return self.forward(x)\n\n def sdf_with_gradient(self, x):\n x.requires_grad_(True)\n sdf_out = self.forward(x)\n sdf = sdf_out[:, :1]\n features = sdf_out[:, 1:]\n d_output = torch.ones_like(sdf, requires_grad=False, device=sdf.device)\n gradients = torch.autograd.grad(\n outputs=sdf,\n inputs=x,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return sdf, features, gradients"
},
{
"identifier": "SingleVarianceNetwork",
"path": "models/fields.py",
"snippet": "class SingleVarianceNetwork(nn.Module):\n def __init__(self, init_val):\n super(SingleVarianceNetwork, self).__init__()\n self.register_parameter('variance', nn.Parameter(torch.tensor(init_val)))\n\n def forward(self, x):\n return torch.ones([len(x), 1]) * torch.exp(self.variance * 10.0)"
},
{
"identifier": "NeRF",
"path": "models/fields.py",
"snippet": "class NeRF(nn.Module):\n def __init__(self,\n D=8,\n W=256,\n d_in=3,\n d_in_view=3,\n multires=0,\n multires_view=0,\n output_ch=4,\n skips=[4],\n use_viewdirs=False):\n super(NeRF, self).__init__()\n self.D = D\n self.W = W\n self.d_in = d_in\n self.d_in_view = d_in_view\n self.input_ch = 3\n self.input_ch_view = 3\n self.embed_fn = None\n self.embed_fn_view = None\n\n if multires > 0:\n embed_fn, input_ch = get_embedder(multires, input_dims=d_in)\n self.embed_fn = embed_fn\n self.input_ch = input_ch\n\n if multires_view > 0:\n embed_fn_view, input_ch_view = get_embedder(multires_view, input_dims=d_in_view)\n self.embed_fn_view = embed_fn_view\n self.input_ch_view = input_ch_view\n\n self.skips = skips\n self.use_viewdirs = use_viewdirs\n\n self.pts_linears = nn.ModuleList(\n [nn.Linear(self.input_ch, W)] +\n [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + self.input_ch, W) for i in range(D - 1)])\n\n ### Implementation according to the official code release\n ### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105)\n self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)])\n\n ### Implementation according to the paper\n # self.views_linears = nn.ModuleList(\n # [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)])\n\n if use_viewdirs:\n self.feature_linear = nn.Linear(W, W)\n self.alpha_linear = nn.Linear(W, 1)\n self.rgb_linear = nn.Linear(W // 2, 3)\n else:\n self.output_linear = nn.Linear(W, output_ch)\n\n def forward(self, input_pts, input_views):\n if self.embed_fn is not None:\n input_pts = self.embed_fn(input_pts)\n if self.embed_fn_view is not None:\n input_views = self.embed_fn_view(input_views)\n\n h = input_pts\n for i, l in enumerate(self.pts_linears):\n h = self.pts_linears[i](h)\n h = F.relu(h)\n if i in self.skips:\n h = torch.cat([input_pts, h], -1)\n\n if self.use_viewdirs:\n alpha = self.alpha_linear(h)\n feature = self.feature_linear(h)\n h = torch.cat([feature, input_views], -1)\n\n for i, l in enumerate(self.views_linears):\n h = self.views_linears[i](h)\n h = F.relu(h)\n\n rgb = self.rgb_linear(h)\n return alpha, rgb\n else:\n assert False"
},
{
"identifier": "AnisotropyNetwork",
"path": "models/fields.py",
"snippet": "class AnisotropyNetwork(nn.Module):\n def __init__(self, d_feature):\n super(AnisotropyNetwork, self).__init__()\n self.anisotropy_layer = nn.Linear(d_feature, 1)\n self.anisotropy_activation = lambda x: 1.0 - torch.sigmoid(x)\n \n def forward(self, x):\n out = self.anisotropy_layer(x)\n return self.anisotropy_activation(out)"
},
{
"identifier": "AttenuationCoefficient",
"path": "models/attenuation_coefficient.py",
"snippet": "class AttenuationCoefficient:\n def __init__(self, \n implicit_distribution = 'gaussian', \n normal_distribution = 'linear_mixture'):\n self.implicit_distribution = implicit_distribution\n self.normal_distribution = normal_distribution\n self.density = Density.get(implicit_distribution)\n self.projected_area = ProjectedArea.get(normal_distribution)\n\n def __call__(self, ray_dir, mean_implicit, grad_mean_implicit, inv_std, anisotropy_param):\n sigma_perp = self.projected_area(ray_dir, grad_mean_implicit, anisotropy_param)\n sigma_parallel = self.density(mean_implicit, inv_std)\n return sigma_perp * sigma_parallel"
},
{
"identifier": "Renderer",
"path": "models/renderer.py",
"snippet": "class Renderer:\n def __init__(self,\n nerf,\n sdf_network,\n deviation_network,\n color_network,\n anisotropy_network,\n attenuation_coefficient,\n sampler):\n self.nerf = nerf\n self.sdf_network = sdf_network\n self.deviation_network = deviation_network\n self.color_network = color_network\n self.anisotropy_network = anisotropy_network\n self.attenuation_coefficient = attenuation_coefficient\n self.sampler = sampler\n\n def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None):\n \"\"\"\n Render background\n \"\"\"\n batch_size, n_samples = z_vals.shape\n\n # section length\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)\n mid_z_vals = z_vals + dists * 0.5\n\n # section midpoints\n pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3\n\n dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10)\n pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4\n\n dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3)\n\n pts = pts.reshape(-1, 4)\n dirs = dirs.reshape(-1, 3)\n\n # query neural fields\n density, sampled_color = nerf(pts, dirs)\n sampled_color = torch.sigmoid(sampled_color)\n\n # compute alpha\n alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists)\n alpha = alpha.reshape(batch_size, n_samples)\n\n # aggregate along rays\n weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n sampled_color = sampled_color.reshape(batch_size, n_samples, 3)\n color = (weights[:, :, None] * sampled_color).sum(dim=1)\n if background_rgb is not None:\n color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True))\n\n return {\n 'color': color,\n 'sampled_color': sampled_color,\n 'alpha': alpha,\n 'weights': weights,\n }\n\n def render_core(self,\n rays_o,\n rays_d,\n z_vals,\n sample_dist,\n sdf_network,\n deviation_network,\n color_network,\n background_alpha=None,\n background_sampled_color=None,\n background_rgb=None,\n annealed_anisotropy=1.0):\n batch_size, n_samples = z_vals.shape\n\n # section length\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1)\n mid_z_vals = z_vals + dists * 0.5\n\n # section midpoints\n pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3\n dirs = rays_d[:, None, :].expand(pts.shape)\n\n pts = pts.reshape(-1, 3)\n dirs = dirs.reshape(-1, 3)\n \n # query neural fields\n sdf, feature_vector, sdf_gradients = sdf_network.sdf_with_gradient(pts)\n sampled_color = color_network(pts, sdf_gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3)\n inv_s = deviation_network(sdf).clip(1e-6, 1e6)\n\n anisotropy_param = annealed_anisotropy\n if self.anisotropy_network is not None:\n anisotropy_param = self.anisotropy_network(feature_vector) \n\n # compute transmittance based on SOS\n interval_lengths = dists.reshape(-1, 1)\n sigma = self.attenuation_coefficient(dirs, sdf, sdf_gradients, inv_s, anisotropy_param)\n alpha = 1.0 - torch.exp(-sigma * interval_lengths).reshape(batch_size, n_samples)\n \n pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples)\n inside_sphere = (pts_norm < 1.0).float().detach()\n relax_inside_sphere = (pts_norm < 1.2).float().detach()\n\n # aggregate along rays\n if background_alpha is not None:\n alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere)\n alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1)\n sampled_color = sampled_color * inside_sphere[:, :, None] +\\\n background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None]\n sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1)\n\n weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n weights_sum = weights.sum(dim=-1, keepdim=True)\n\n color = (sampled_color * weights[:, :, None]).sum(dim=1)\n if background_rgb is not None: # Fixed background, usually black\n color = color + background_rgb * (1.0 - weights_sum)\n\n # Eikonal loss\n gradient_error = (torch.linalg.norm(sdf_gradients.reshape(batch_size, n_samples, 3), ord=2,\n dim=-1) - 1.0) ** 2\n gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5)\n\n return {\n 'color': color,\n 'sdf': sdf,\n 'dists': dists,\n 'gradients': sdf_gradients.reshape(batch_size, n_samples, 3),\n 's_val': 1.0 / inv_s,\n 'mid_z_vals': mid_z_vals,\n 'weights': weights,\n 'gradient_error': gradient_error,\n 'inside_sphere': inside_sphere\n }\n\n def render(self, rays_o, rays_d, near, far, background_rgb=None, annealed_anisotropy=1.0):\n # sample points along rays\n inv_s = self.deviation_network(torch.tensor([1])).clip(1e-6, 1e6)\n z_vals, surf_z_image = self.sampler.sample_intersection(rays_o, rays_d, near, far, self.sdf_network.sdf, inv_s)\n z_vals_outside = self.sampler.sample_outside(rays_o, rays_d, far)\n sample_dist = 2.0 / self.sampler.n_total_samples\n\n background_alpha = None\n background_sampled_color = None\n\n # Background model\n if z_vals_outside is not None:\n z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1)\n z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1)\n ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf)\n \n background_sampled_color = ret_outside['sampled_color']\n background_alpha = ret_outside['alpha']\n\n # render core\n ret_fine = self.render_core(rays_o,\n rays_d,\n z_vals,\n sample_dist,\n self.sdf_network,\n self.deviation_network,\n self.color_network,\n background_rgb=background_rgb,\n background_alpha=background_alpha,\n background_sampled_color=background_sampled_color,\n annealed_anisotropy=annealed_anisotropy)\n\n batch_size = len(rays_o)\n color_fine = ret_fine['color']\n weights = ret_fine['weights']\n weights_sum = weights.sum(dim=-1, keepdim=True)\n gradients = ret_fine['gradients']\n s_val = ret_fine['s_val'].reshape(batch_size, self.sampler.n_total_samples).mean(dim=-1, keepdim=True)\n\n return {\n 'color_fine': color_fine,\n 's_val': s_val,\n 'weight_sum': weights_sum,\n 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0],\n 'gradients': gradients,\n 'weights': weights,\n 'gradient_error': ret_fine['gradient_error'],\n 'inside_sphere': ret_fine['inside_sphere'],\n 'depth': surf_z_image.detach().cpu().numpy()\n } "
},
{
"identifier": "read_mesh",
"path": "models/util.py",
"snippet": "def read_mesh(path, quad = False):\n V = []\n F = []\n with open(path) as file:\n for line in file:\n tokens = line.strip('\\n').split(' ')\n if tokens[0] == 'v':\n V.append(np.array([float(tokens[1]), float(tokens[2]), float(tokens[3])]))\n \n if tokens[0] == 'f':\n if quad:\n F.append(np.array([int(tokens[1]), int(tokens[2]), int(tokens[3]), int(tokens[4])]))\n else:\n F.append(np.array([int(tokens[1]), int(tokens[2]), int(tokens[3])]))\n\n return np.array(V), np.array(F)"
},
{
"identifier": "write_mesh",
"path": "models/util.py",
"snippet": "def write_mesh(path, vertices, faces, data, quad = False):\n with open(path, 'w') as out:\n out.write('# OBJ file\\n')\n\n for i in range(vertices.shape[0]):\n out.write('v {:.8f} {:.8f} {:.8f} \\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2]))\n\n for i in range(data.shape[0]):\n out.write('vt {:.8f} 0 \\n'.format(data[i]))\n\n for i in range(faces.shape[0]):\n fi = faces[i, 0]\n fj = faces[i, 1]\n fk = faces[i, 2]\n if quad:\n fl = faces[i, 3]\n out.write('f {:d}/{:d} {:d}/{:d} {:d}/{:d} {:d}/{:d}\\n'.format(fi, fi, fj, fj, fk, fk, fl, fl))\n else:\n out.write('f {:d}/{:d} {:d}/{:d} {:d}/{:d}\\n'.format(fi, fi, fj, fj, fk, fk))"
},
{
"identifier": "extract_geometry",
"path": "models/util.py",
"snippet": "def extract_geometry(bound_min, bound_max, resolution, threshold, query_func):\n print('threshold: {}'.format(threshold))\n u = extract_fields(bound_min, bound_max, resolution, query_func)\n vertices, triangles = mcubes.marching_cubes(u, threshold)\n b_max_np = bound_max.detach().cpu().numpy()\n b_min_np = bound_min.detach().cpu().numpy()\n vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]\n return vertices, triangles"
}
] | import os
import time
import logging
import argparse
import numpy as np
import cv2 as cv
import trimesh
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from shutil import copyfile
from tqdm import tqdm
from pyhocon import ConfigFactory
from models.dataset import Dataset
from models.sampler import PointSampler
from models.fields import (
RenderingNetwork,
SDFNetwork,
SingleVarianceNetwork,
NeRF,
AnisotropyNetwork
)
from models.attenuation_coefficient import AttenuationCoefficient
from models.renderer import Renderer
from models.util import read_mesh, write_mesh, extract_geometry | 10,252 |
logging.getLogger('matplotlib.font_manager').disabled = True
class Runner:
def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False, max_n_training_images=-1):
self.device = torch.device('cuda')
# Configuration
self.conf_path = conf_path
f = open(self.conf_path)
conf_text = f.read()
conf_text = conf_text.replace('CASE_NAME', case)
f.close()
self.conf = ConfigFactory.parse_string(conf_text)
self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case)
self.base_exp_dir = self.conf['general.base_exp_dir']
os.makedirs(self.base_exp_dir, exist_ok=True)
self.dataset = Dataset(self.conf['dataset'])
self.iter_step = 0
# Training parameters
self.end_iter = self.conf.get_int('train.end_iter')
self.save_freq = self.conf.get_int('train.save_freq')
self.report_freq = self.conf.get_int('train.report_freq')
self.val_freq = self.conf.get_int('train.val_freq')
self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq')
self.viz_deviation_freq = self.conf.get_int('train.viz_deviation_freq', 0)
self.batch_size = self.conf.get_int('train.batch_size')
self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level')
self.learning_rate = self.conf.get_float('train.learning_rate')
self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha')
self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd')
self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0)
self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0)
self.max_n_training_images = max_n_training_images
# Weights
self.igr_weight = self.conf.get_float('train.igr_weight')
self.mask_weight = self.conf.get_float('train.mask_weight')
self.is_continue = is_continue
self.mode = mode
self.model_list = []
self.writer = None
# Networks
params_to_train = []
self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device)
|
logging.getLogger('matplotlib.font_manager').disabled = True
class Runner:
def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False, max_n_training_images=-1):
self.device = torch.device('cuda')
# Configuration
self.conf_path = conf_path
f = open(self.conf_path)
conf_text = f.read()
conf_text = conf_text.replace('CASE_NAME', case)
f.close()
self.conf = ConfigFactory.parse_string(conf_text)
self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case)
self.base_exp_dir = self.conf['general.base_exp_dir']
os.makedirs(self.base_exp_dir, exist_ok=True)
self.dataset = Dataset(self.conf['dataset'])
self.iter_step = 0
# Training parameters
self.end_iter = self.conf.get_int('train.end_iter')
self.save_freq = self.conf.get_int('train.save_freq')
self.report_freq = self.conf.get_int('train.report_freq')
self.val_freq = self.conf.get_int('train.val_freq')
self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq')
self.viz_deviation_freq = self.conf.get_int('train.viz_deviation_freq', 0)
self.batch_size = self.conf.get_int('train.batch_size')
self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level')
self.learning_rate = self.conf.get_float('train.learning_rate')
self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha')
self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd')
self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0)
self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0)
self.max_n_training_images = max_n_training_images
# Weights
self.igr_weight = self.conf.get_float('train.igr_weight')
self.mask_weight = self.conf.get_float('train.mask_weight')
self.is_continue = is_continue
self.mode = mode
self.model_list = []
self.writer = None
# Networks
params_to_train = []
self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device) | self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device) | 4 | 2023-11-28 03:13:44+00:00 | 12k |
strollby/graphene-directives | graphene_directives/main.py | [
{
"identifier": "Schema",
"path": "graphene_directives/schema.py",
"snippet": "class Schema(GrapheneSchema):\n def __init__(\n self,\n query: graphene.ObjectType = None,\n mutation: graphene.ObjectType = None,\n subscription: graphene.ObjectType = None,\n types: list[graphene.ObjectType] = None,\n directives: Union[Collection[GraphQLDirective], None] = None,\n auto_camelcase: bool = True,\n schema_directives: Collection[SchemaDirective] = None,\n include_graphql_spec_directives: bool = True,\n ):\n \"\"\"\n Schema Definition.\n\n Args:\n query (Type[ObjectType]): Root query *ObjectType*. Describes entry point for fields to *read*\n data in your Schema.\n mutation (Optional[Type[ObjectType]]): Root mutation *ObjectType*. Describes entry point for\n fields to *create, update or delete* data in your API.\n subscription (Optional[Type[ObjectType]]): Root subscription *ObjectType*. Describes entry point\n for fields to receive continuous updates.\n types (Optional[Collection[Type[ObjectType]]]): List of any types to include in schema that\n may not be introspected through root types.\n directives (List[GraphQLDirective], optional): List of custom directives to include in the\n GraphQL schema.\n auto_camelcase (bool): Fieldnames will be transformed in Schema's TypeMap from snake_case\n to camelCase (preferred by GraphQL standard). Default True.\n schema_directives (Collection[SchemaDirective]): Directives that can be defined at DIRECTIVE_LOCATION.SCHEMA\n with their argument values.\n include_graphql_spec_directives (bool): Includes directives defined by GraphQL spec (@include, @skip,\n @deprecated, @specifiedBy)\n \"\"\"\n\n self.custom_directives = directives or []\n self.schema_directives = schema_directives or []\n self.auto_camelcase = auto_camelcase\n self.directives_used: dict[str, GraphQLDirective] = {}\n\n directives = tuple(self.custom_directives) + (\n tuple(specified_directives) if include_graphql_spec_directives else ()\n )\n super().__init__(\n query=query,\n mutation=mutation,\n subscription=subscription,\n types=types,\n directives=directives,\n auto_camelcase=auto_camelcase,\n )\n\n def field_name_to_type_attribute(\n self, model: graphene.ObjectType\n ) -> Callable[[str], str]:\n \"\"\"\n Create field name conversion method (from schema name to actual graphene_type attribute name).\n\n Args:\n model (ObjectType): model whose field name is to be converted\n\n Returns:\n (str) -> (str)\n \"\"\"\n field_names = {}\n if self.auto_camelcase:\n field_names = {\n to_camel_case(attr_name): attr_name\n for attr_name in getattr(model._meta, \"fields\", []) # noqa\n }\n return lambda schema_field_name: field_names.get(\n schema_field_name, schema_field_name\n )\n\n def type_attribute_to_field_name(self, attribute: str) -> str:\n \"\"\"\n Create a conversion method to convert from graphene_type attribute name to the schema field name.\n \"\"\"\n if self.auto_camelcase:\n return to_camel_case(attribute)\n return attribute\n\n def _add_argument_decorators(\n self,\n entity_name: str,\n required_directive_field_types: set[DirectiveLocation],\n args: dict[str, GraphQLArgument],\n ) -> str:\n \"\"\"\n For a given field, go through all its args and see if any directive decorator needs to be added.\n \"\"\"\n\n if not args:\n return \"\"\n\n # If every arg does not have a description, print them on one line.\n print_single_line = not any(arg.description for arg in args.values())\n indentation: str = \" \"\n new_args = []\n\n str_field = \"(\" if print_single_line else \"(\\n\"\n\n for i, (name, arg) in enumerate(args.items()):\n if print_single_line:\n base_str = f\"{print_input_value(name, arg)} \"\n else:\n base_str = (\n print_description(arg, f\" {indentation}\", not i)\n + f\" {indentation}\"\n + f\"{print_input_value(name, arg)} \"\n )\n directives = []\n for directive in self.custom_directives:\n if has_field_attribute(arg, directive):\n directive_values = get_field_attribute_value(arg, directive)\n meta_data: CustomDirectiveMeta = getattr(\n directive, \"_graphene_directive\"\n )\n\n if (\n not required_directive_field_types.intersection(\n set(directive.locations)\n )\n and len(required_directive_field_types) != 0\n ):\n raise DirectiveValidationError(\n \"\\n\".join(\n [\n f\"{str(directive)} cannot be used at argument {name} level\",\n f\"\\tat {entity_name}\",\n f\"\\tallowed: {directive.locations}\",\n f\"\\trequired: {required_directive_field_types}\",\n ]\n )\n )\n\n for directive_value in directive_values:\n if meta_data.input_transform is not None:\n directive_value = arg_camel_case(\n meta_data.input_transform(\n arg_snake_case(directive_value), self\n )\n )\n\n directive_str = decorator_string(directive, **directive_value)\n directives.append(directive_str)\n\n new_args.append(base_str + \" \".join(directives))\n\n if print_single_line:\n str_field += \", \".join(new_args) + \")\"\n else:\n str_field += \"\\n\".join(new_args) + f\"\\n{indentation})\"\n\n return str_field\n\n def _add_field_decorators(self, graphene_types: set, string_schema: str) -> str:\n \"\"\"\n For a given entity, go through all its fields and see if any directive decorator needs to be added.\n\n This method simply goes through the fields that need to be modified and replace them with their annotated\n version in the schema string representation.\n \"\"\"\n\n for graphene_type in graphene_types:\n entity_name = graphene_type._meta.name # noqa\n\n entity_type = self.graphql_schema.get_type(entity_name)\n get_field_graphene_type = self.field_name_to_type_attribute(graphene_type)\n\n required_directive_locations = set()\n\n if is_object_type(entity_type) or is_interface_type(entity_type):\n required_directive_locations.union(\n {\n DirectiveLocation.FIELD_DEFINITION,\n DirectiveLocation.ARGUMENT_DEFINITION,\n }\n )\n elif is_enum_type(entity_type):\n required_directive_locations.add(DirectiveLocation.ENUM_VALUE)\n elif is_input_type(entity_type):\n required_directive_locations.add(\n DirectiveLocation.INPUT_FIELD_DEFINITION\n )\n else:\n continue\n\n if is_enum_type(entity_type):\n fields: dict = entity_type.values\n else:\n fields: dict = entity_type.fields\n\n str_fields = []\n\n for field_name, field in fields.items():\n if is_enum_type(entity_type):\n str_field = enum_type_to_fields_string(\n get_single_field_type(\n entity_type, field_name, field, is_enum_type=True\n )\n )\n elif isinstance(field, GraphQLInputField):\n str_field = input_type_to_fields_string(\n get_single_field_type(entity_type, field_name, field)\n )\n elif isinstance(field, GraphQLField):\n str_field = entity_type_to_fields_string(\n get_single_field_type(entity_type, field_name, field)\n )\n\n # Replace Arguments with directives\n if hasattr(entity_type, \"_fields\"):\n _arg = entity_type._fields.args[0] # noqa\n if hasattr(_arg, self.type_attribute_to_field_name(field_name)):\n arg_field = getattr(\n _arg, self.type_attribute_to_field_name(field_name)\n )\n else:\n arg_field = {}\n\n if (\n hasattr(arg_field, \"args\")\n and arg_field.args is not None\n and isinstance(arg_field.args, dict)\n ):\n original_args = print_args(\n args=field.args, indentation=\" \"\n )\n replacement_args = self._add_argument_decorators(\n entity_name=entity_name,\n required_directive_field_types=required_directive_locations,\n args=arg_field.args,\n )\n str_field = str_field.replace(\n original_args, replacement_args\n )\n else:\n continue\n\n # Check if we need to annotate the field by checking if it has the decorator attribute set on the field.\n field = getattr(\n graphene_type, get_field_graphene_type(field_name), None\n )\n if field is None:\n # Append the string, but skip the directives\n str_fields.append(str_field)\n continue\n\n for directive in self.custom_directives:\n if not has_field_attribute(field, directive):\n continue\n directive_values = get_field_attribute_value(field, directive)\n\n meta_data: CustomDirectiveMeta = getattr(\n directive, \"_graphene_directive\"\n )\n\n if (\n not required_directive_locations.intersection(\n set(directive.locations)\n )\n and len(required_directive_locations) != 0\n ):\n raise DirectiveValidationError(\n \"\\n\".join(\n [\n f\"{str(directive)} cannot be used at field level\",\n f\"\\tat {entity_name}\",\n f\"\\tallowed: {directive.locations}\",\n f\"\\trequired: {required_directive_locations}\",\n ]\n )\n )\n\n for directive_value in directive_values:\n if (\n meta_data.field_validator is not None\n and not meta_data.field_validator(\n entity_type,\n field,\n arg_snake_case(directive_value),\n self,\n )\n ):\n raise DirectiveCustomValidationError(\n \", \".join(\n [\n f\"Custom Validation Failed for {str(directive)} with args: ({directive_value})\"\n f\"at field level {entity_name}:{field}\"\n ]\n )\n )\n\n if meta_data.input_transform is not None:\n directive_value = arg_camel_case(\n meta_data.input_transform(\n arg_snake_case(directive_value), self\n )\n )\n\n str_field += (\n f\" {decorator_string(directive, **directive_value)}\"\n )\n\n str_fields.append(str_field)\n\n str_fields_annotated = \"\\n\".join(str_fields)\n\n # Replace the original field declaration by the annotated one\n if is_object_type(entity_type):\n entity_type_name = \"type\"\n str_fields_original = entity_type_to_fields_string(entity_type)\n elif is_interface_type(entity_type):\n entity_type_name = \"interface\"\n str_fields_original = entity_type_to_fields_string(entity_type)\n elif is_enum_type(entity_type):\n entity_type_name = \"enum\"\n str_fields_original = enum_type_to_fields_string(entity_type)\n elif is_input_type(entity_type):\n entity_type_name = \"input\"\n str_fields_original = input_type_to_fields_string(entity_type)\n else:\n continue\n\n pattern = re.compile(\n r\"(%s\\s%s\\s[^\\{]*)\\{\\s*%s\\s*\\}\" # noqa\n % (entity_type_name, entity_name, re.escape(str_fields_original))\n )\n string_schema = pattern.sub(\n r\"\\g<1> {\\n%s\\n}\" % str_fields_annotated, string_schema\n )\n return string_schema\n\n def add_non_field_decorators(\n self, non_fields_type: set[GraphQLNamedType], string_schema: str\n ) -> str:\n for non_field in non_fields_type:\n entity_name = non_field._meta.name # noqa\n entity_type = self.graphql_schema.get_type(entity_name)\n\n required_directive_locations = set()\n\n if is_scalar_type(entity_type):\n non_field_pattern = rf\"(scalar {entity_name})\"\n required_directive_locations.add(DirectiveLocation.SCALAR)\n elif is_union_type(entity_type):\n non_field_pattern = rf\"(union {entity_name} )\"\n required_directive_locations.add(DirectiveLocation.UNION)\n elif is_object_type(entity_type):\n non_field_pattern = rf\"(type {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.OBJECT)\n elif is_interface_type(entity_type):\n non_field_pattern = rf\"(interface {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.INTERFACE)\n elif is_enum_type(entity_type):\n non_field_pattern = rf\"(enum {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.ENUM)\n elif is_input_type(entity_type):\n non_field_pattern = rf\"(input {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.INPUT_OBJECT)\n else:\n continue\n\n directive_annotations = []\n for directive in self.custom_directives:\n if has_non_field_attribute(non_field, directive):\n meta_data: CustomDirectiveMeta = getattr(\n directive, \"_graphene_directive\"\n )\n directive_values = get_non_field_attribute_value(\n non_field, directive\n )\n\n if (\n not required_directive_locations.intersection(\n set(directive.locations)\n )\n and len(required_directive_locations) != 0\n ):\n raise DirectiveValidationError(\n \"\\n\".join(\n [\n f\"{str(directive)} cannot be used at non field level\",\n f\"\\tat {entity_name}\",\n f\"\\tallowed: {directive.locations}\",\n f\"\\trequired: {required_directive_locations}\",\n ]\n )\n )\n\n for directive_value in directive_values:\n if (\n meta_data.non_field_validator is not None\n and not meta_data.non_field_validator(\n non_field, arg_snake_case(directive_value), self\n )\n ):\n raise DirectiveCustomValidationError(\n \", \".join(\n [\n f\"Custom Validation Failed for {str(directive)} with args: ({directive_value})\"\n f\"at non-field level {entity_name}\"\n ]\n )\n )\n if meta_data.input_transform is not None:\n directive_value = arg_camel_case(\n meta_data.input_transform(\n arg_snake_case(directive_value), self\n )\n )\n\n directive_annotations.append(\n f\"{decorator_string(directive, **directive_value)}\"\n )\n\n annotation = \" \".join(directive_annotations)\n annotation = (\n f\" {annotation}\" if is_scalar_type(entity_type) else f\"{annotation} \"\n )\n replace_str = rf\"\\1{annotation}\"\n pattern = re.compile(non_field_pattern)\n string_schema = pattern.sub(replace_str, string_schema)\n\n return string_schema\n\n def _get_directive_applied_non_field_types(self) -> set:\n \"\"\"\n Find all the directive applied non-field types from the schema.\n \"\"\"\n directives_types = set()\n schema_types = {\n **self.graphql_schema.type_map,\n **{\n \"Query\": self.graphql_schema.query_type,\n \"Mutation\": self.graphql_schema.mutation_type,\n },\n }\n\n for schema_type in schema_types.values():\n if not hasattr(schema_type, \"graphene_type\"):\n continue\n for directive in self.custom_directives:\n if has_non_field_attribute(schema_type.graphene_type, directive):\n self.directives_used[directive.name] = directive\n directives_types.add(schema_type.graphene_type)\n return directives_types\n\n def _get_directive_applied_field_types(self) -> set:\n \"\"\"\n Find all the directive applied field types from the schema.\n \"\"\"\n directives_fields = set()\n schema_types = {\n **self.graphql_schema.type_map,\n **{\n \"Query\": self.graphql_schema.query_type, # noqa\n \"Mutation\": self.graphql_schema.mutation_type, # noqa\n },\n }\n\n for _, entity_type in schema_types.items():\n if (\n not hasattr(entity_type, \"graphene_type\") # noqa:SIM101\n or isinstance(entity_type.graphene_type._meta, UnionOptions) # noqa\n or isinstance(entity_type.graphene_type._meta, ScalarOptions) # noqa\n ):\n continue\n\n fields = (\n list(entity_type.values.values()) # Enum class fields\n if is_enum_type(entity_type)\n else list(entity_type.fields) # noqa\n )\n\n for field in fields:\n field_type = (\n # auto-camelcasing can cause problems\n getattr(entity_type.graphene_type, to_camel_case(field), None)\n or getattr(entity_type.graphene_type, to_snake_case(field), None)\n if not is_enum_type(entity_type)\n else field.value\n )\n for directive_ in self.custom_directives:\n if has_field_attribute(field_type, directive_):\n self.directives_used[directive_.name] = directive_\n directives_fields.add(entity_type.graphene_type)\n\n # Handle Argument Decorators\n if (\n hasattr(field_type, \"args\")\n and field_type.args is not None\n and isinstance(field_type.args, dict)\n ):\n for arg_name, arg_type in field_type.args.items():\n if has_field_attribute(arg_type, directive_):\n if (\n DirectiveLocation.ARGUMENT_DEFINITION\n not in directive_.locations\n ):\n raise DirectiveValidationError(\n f\"{directive_} cannot be used at argument level at {entity_type}->{field}\"\n )\n self.directives_used[directive_.name] = directive_\n directives_fields.add(entity_type.graphene_type)\n\n return directives_fields\n\n def get_directives_used(self) -> list[GraphQLDirective]:\n \"\"\"\n Returns a list of directives used in the schema\n\n \"\"\"\n self._get_directive_applied_field_types()\n self._get_directive_applied_non_field_types()\n return list(self.directives_used.values())\n\n def __str__(self):\n string_schema = \"\"\n string_schema += extend_schema_string(string_schema, self.schema_directives)\n string_schema += print_schema(self.graphql_schema)\n\n field_types = self._get_directive_applied_field_types()\n non_field_types = self._get_directive_applied_non_field_types()\n\n string_schema = self._add_field_decorators(field_types, string_schema)\n string_schema = self.add_non_field_decorators(non_field_types, string_schema)\n\n for directive in self.custom_directives:\n meta_data: CustomDirectiveMeta = getattr(directive, \"_graphene_directive\")\n if not meta_data.add_definition_to_schema:\n string_schema = string_schema.replace(\n print_directive(directive) + \"\\n\\n\", \"\"\n )\n\n return string_schema.strip()"
},
{
"identifier": "DirectiveValidationError",
"path": "graphene_directives/exceptions.py",
"snippet": "class DirectiveValidationError(Exception):\n def __init__(self, message: str):\n super().__init__(message)"
},
{
"identifier": "SchemaDirective",
"path": "graphene_directives/data_models/schema_directive.py",
"snippet": "class SchemaDirective:\n target_directive: GraphQLDirective\n arguments: dict[str, Any]\n\n def __post_init__(self):\n if GrapheneDirectiveLocation.SCHEMA not in self.target_directive.locations:\n raise DirectiveValidationError(\n \". \".join(\n [\n f\"{self.target_directive} cannot be used as schema directive\",\n \"Missing DirectiveLocation.SCHEMA in locations\",\n ]\n )\n )"
},
{
"identifier": "Schema",
"path": "graphene_directives/schema.py",
"snippet": "class Schema(GrapheneSchema):\n def __init__(\n self,\n query: graphene.ObjectType = None,\n mutation: graphene.ObjectType = None,\n subscription: graphene.ObjectType = None,\n types: list[graphene.ObjectType] = None,\n directives: Union[Collection[GraphQLDirective], None] = None,\n auto_camelcase: bool = True,\n schema_directives: Collection[SchemaDirective] = None,\n include_graphql_spec_directives: bool = True,\n ):\n \"\"\"\n Schema Definition.\n\n Args:\n query (Type[ObjectType]): Root query *ObjectType*. Describes entry point for fields to *read*\n data in your Schema.\n mutation (Optional[Type[ObjectType]]): Root mutation *ObjectType*. Describes entry point for\n fields to *create, update or delete* data in your API.\n subscription (Optional[Type[ObjectType]]): Root subscription *ObjectType*. Describes entry point\n for fields to receive continuous updates.\n types (Optional[Collection[Type[ObjectType]]]): List of any types to include in schema that\n may not be introspected through root types.\n directives (List[GraphQLDirective], optional): List of custom directives to include in the\n GraphQL schema.\n auto_camelcase (bool): Fieldnames will be transformed in Schema's TypeMap from snake_case\n to camelCase (preferred by GraphQL standard). Default True.\n schema_directives (Collection[SchemaDirective]): Directives that can be defined at DIRECTIVE_LOCATION.SCHEMA\n with their argument values.\n include_graphql_spec_directives (bool): Includes directives defined by GraphQL spec (@include, @skip,\n @deprecated, @specifiedBy)\n \"\"\"\n\n self.custom_directives = directives or []\n self.schema_directives = schema_directives or []\n self.auto_camelcase = auto_camelcase\n self.directives_used: dict[str, GraphQLDirective] = {}\n\n directives = tuple(self.custom_directives) + (\n tuple(specified_directives) if include_graphql_spec_directives else ()\n )\n super().__init__(\n query=query,\n mutation=mutation,\n subscription=subscription,\n types=types,\n directives=directives,\n auto_camelcase=auto_camelcase,\n )\n\n def field_name_to_type_attribute(\n self, model: graphene.ObjectType\n ) -> Callable[[str], str]:\n \"\"\"\n Create field name conversion method (from schema name to actual graphene_type attribute name).\n\n Args:\n model (ObjectType): model whose field name is to be converted\n\n Returns:\n (str) -> (str)\n \"\"\"\n field_names = {}\n if self.auto_camelcase:\n field_names = {\n to_camel_case(attr_name): attr_name\n for attr_name in getattr(model._meta, \"fields\", []) # noqa\n }\n return lambda schema_field_name: field_names.get(\n schema_field_name, schema_field_name\n )\n\n def type_attribute_to_field_name(self, attribute: str) -> str:\n \"\"\"\n Create a conversion method to convert from graphene_type attribute name to the schema field name.\n \"\"\"\n if self.auto_camelcase:\n return to_camel_case(attribute)\n return attribute\n\n def _add_argument_decorators(\n self,\n entity_name: str,\n required_directive_field_types: set[DirectiveLocation],\n args: dict[str, GraphQLArgument],\n ) -> str:\n \"\"\"\n For a given field, go through all its args and see if any directive decorator needs to be added.\n \"\"\"\n\n if not args:\n return \"\"\n\n # If every arg does not have a description, print them on one line.\n print_single_line = not any(arg.description for arg in args.values())\n indentation: str = \" \"\n new_args = []\n\n str_field = \"(\" if print_single_line else \"(\\n\"\n\n for i, (name, arg) in enumerate(args.items()):\n if print_single_line:\n base_str = f\"{print_input_value(name, arg)} \"\n else:\n base_str = (\n print_description(arg, f\" {indentation}\", not i)\n + f\" {indentation}\"\n + f\"{print_input_value(name, arg)} \"\n )\n directives = []\n for directive in self.custom_directives:\n if has_field_attribute(arg, directive):\n directive_values = get_field_attribute_value(arg, directive)\n meta_data: CustomDirectiveMeta = getattr(\n directive, \"_graphene_directive\"\n )\n\n if (\n not required_directive_field_types.intersection(\n set(directive.locations)\n )\n and len(required_directive_field_types) != 0\n ):\n raise DirectiveValidationError(\n \"\\n\".join(\n [\n f\"{str(directive)} cannot be used at argument {name} level\",\n f\"\\tat {entity_name}\",\n f\"\\tallowed: {directive.locations}\",\n f\"\\trequired: {required_directive_field_types}\",\n ]\n )\n )\n\n for directive_value in directive_values:\n if meta_data.input_transform is not None:\n directive_value = arg_camel_case(\n meta_data.input_transform(\n arg_snake_case(directive_value), self\n )\n )\n\n directive_str = decorator_string(directive, **directive_value)\n directives.append(directive_str)\n\n new_args.append(base_str + \" \".join(directives))\n\n if print_single_line:\n str_field += \", \".join(new_args) + \")\"\n else:\n str_field += \"\\n\".join(new_args) + f\"\\n{indentation})\"\n\n return str_field\n\n def _add_field_decorators(self, graphene_types: set, string_schema: str) -> str:\n \"\"\"\n For a given entity, go through all its fields and see if any directive decorator needs to be added.\n\n This method simply goes through the fields that need to be modified and replace them with their annotated\n version in the schema string representation.\n \"\"\"\n\n for graphene_type in graphene_types:\n entity_name = graphene_type._meta.name # noqa\n\n entity_type = self.graphql_schema.get_type(entity_name)\n get_field_graphene_type = self.field_name_to_type_attribute(graphene_type)\n\n required_directive_locations = set()\n\n if is_object_type(entity_type) or is_interface_type(entity_type):\n required_directive_locations.union(\n {\n DirectiveLocation.FIELD_DEFINITION,\n DirectiveLocation.ARGUMENT_DEFINITION,\n }\n )\n elif is_enum_type(entity_type):\n required_directive_locations.add(DirectiveLocation.ENUM_VALUE)\n elif is_input_type(entity_type):\n required_directive_locations.add(\n DirectiveLocation.INPUT_FIELD_DEFINITION\n )\n else:\n continue\n\n if is_enum_type(entity_type):\n fields: dict = entity_type.values\n else:\n fields: dict = entity_type.fields\n\n str_fields = []\n\n for field_name, field in fields.items():\n if is_enum_type(entity_type):\n str_field = enum_type_to_fields_string(\n get_single_field_type(\n entity_type, field_name, field, is_enum_type=True\n )\n )\n elif isinstance(field, GraphQLInputField):\n str_field = input_type_to_fields_string(\n get_single_field_type(entity_type, field_name, field)\n )\n elif isinstance(field, GraphQLField):\n str_field = entity_type_to_fields_string(\n get_single_field_type(entity_type, field_name, field)\n )\n\n # Replace Arguments with directives\n if hasattr(entity_type, \"_fields\"):\n _arg = entity_type._fields.args[0] # noqa\n if hasattr(_arg, self.type_attribute_to_field_name(field_name)):\n arg_field = getattr(\n _arg, self.type_attribute_to_field_name(field_name)\n )\n else:\n arg_field = {}\n\n if (\n hasattr(arg_field, \"args\")\n and arg_field.args is not None\n and isinstance(arg_field.args, dict)\n ):\n original_args = print_args(\n args=field.args, indentation=\" \"\n )\n replacement_args = self._add_argument_decorators(\n entity_name=entity_name,\n required_directive_field_types=required_directive_locations,\n args=arg_field.args,\n )\n str_field = str_field.replace(\n original_args, replacement_args\n )\n else:\n continue\n\n # Check if we need to annotate the field by checking if it has the decorator attribute set on the field.\n field = getattr(\n graphene_type, get_field_graphene_type(field_name), None\n )\n if field is None:\n # Append the string, but skip the directives\n str_fields.append(str_field)\n continue\n\n for directive in self.custom_directives:\n if not has_field_attribute(field, directive):\n continue\n directive_values = get_field_attribute_value(field, directive)\n\n meta_data: CustomDirectiveMeta = getattr(\n directive, \"_graphene_directive\"\n )\n\n if (\n not required_directive_locations.intersection(\n set(directive.locations)\n )\n and len(required_directive_locations) != 0\n ):\n raise DirectiveValidationError(\n \"\\n\".join(\n [\n f\"{str(directive)} cannot be used at field level\",\n f\"\\tat {entity_name}\",\n f\"\\tallowed: {directive.locations}\",\n f\"\\trequired: {required_directive_locations}\",\n ]\n )\n )\n\n for directive_value in directive_values:\n if (\n meta_data.field_validator is not None\n and not meta_data.field_validator(\n entity_type,\n field,\n arg_snake_case(directive_value),\n self,\n )\n ):\n raise DirectiveCustomValidationError(\n \", \".join(\n [\n f\"Custom Validation Failed for {str(directive)} with args: ({directive_value})\"\n f\"at field level {entity_name}:{field}\"\n ]\n )\n )\n\n if meta_data.input_transform is not None:\n directive_value = arg_camel_case(\n meta_data.input_transform(\n arg_snake_case(directive_value), self\n )\n )\n\n str_field += (\n f\" {decorator_string(directive, **directive_value)}\"\n )\n\n str_fields.append(str_field)\n\n str_fields_annotated = \"\\n\".join(str_fields)\n\n # Replace the original field declaration by the annotated one\n if is_object_type(entity_type):\n entity_type_name = \"type\"\n str_fields_original = entity_type_to_fields_string(entity_type)\n elif is_interface_type(entity_type):\n entity_type_name = \"interface\"\n str_fields_original = entity_type_to_fields_string(entity_type)\n elif is_enum_type(entity_type):\n entity_type_name = \"enum\"\n str_fields_original = enum_type_to_fields_string(entity_type)\n elif is_input_type(entity_type):\n entity_type_name = \"input\"\n str_fields_original = input_type_to_fields_string(entity_type)\n else:\n continue\n\n pattern = re.compile(\n r\"(%s\\s%s\\s[^\\{]*)\\{\\s*%s\\s*\\}\" # noqa\n % (entity_type_name, entity_name, re.escape(str_fields_original))\n )\n string_schema = pattern.sub(\n r\"\\g<1> {\\n%s\\n}\" % str_fields_annotated, string_schema\n )\n return string_schema\n\n def add_non_field_decorators(\n self, non_fields_type: set[GraphQLNamedType], string_schema: str\n ) -> str:\n for non_field in non_fields_type:\n entity_name = non_field._meta.name # noqa\n entity_type = self.graphql_schema.get_type(entity_name)\n\n required_directive_locations = set()\n\n if is_scalar_type(entity_type):\n non_field_pattern = rf\"(scalar {entity_name})\"\n required_directive_locations.add(DirectiveLocation.SCALAR)\n elif is_union_type(entity_type):\n non_field_pattern = rf\"(union {entity_name} )\"\n required_directive_locations.add(DirectiveLocation.UNION)\n elif is_object_type(entity_type):\n non_field_pattern = rf\"(type {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.OBJECT)\n elif is_interface_type(entity_type):\n non_field_pattern = rf\"(interface {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.INTERFACE)\n elif is_enum_type(entity_type):\n non_field_pattern = rf\"(enum {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.ENUM)\n elif is_input_type(entity_type):\n non_field_pattern = rf\"(input {entity_name} [^\\{{]*)\"\n required_directive_locations.add(DirectiveLocation.INPUT_OBJECT)\n else:\n continue\n\n directive_annotations = []\n for directive in self.custom_directives:\n if has_non_field_attribute(non_field, directive):\n meta_data: CustomDirectiveMeta = getattr(\n directive, \"_graphene_directive\"\n )\n directive_values = get_non_field_attribute_value(\n non_field, directive\n )\n\n if (\n not required_directive_locations.intersection(\n set(directive.locations)\n )\n and len(required_directive_locations) != 0\n ):\n raise DirectiveValidationError(\n \"\\n\".join(\n [\n f\"{str(directive)} cannot be used at non field level\",\n f\"\\tat {entity_name}\",\n f\"\\tallowed: {directive.locations}\",\n f\"\\trequired: {required_directive_locations}\",\n ]\n )\n )\n\n for directive_value in directive_values:\n if (\n meta_data.non_field_validator is not None\n and not meta_data.non_field_validator(\n non_field, arg_snake_case(directive_value), self\n )\n ):\n raise DirectiveCustomValidationError(\n \", \".join(\n [\n f\"Custom Validation Failed for {str(directive)} with args: ({directive_value})\"\n f\"at non-field level {entity_name}\"\n ]\n )\n )\n if meta_data.input_transform is not None:\n directive_value = arg_camel_case(\n meta_data.input_transform(\n arg_snake_case(directive_value), self\n )\n )\n\n directive_annotations.append(\n f\"{decorator_string(directive, **directive_value)}\"\n )\n\n annotation = \" \".join(directive_annotations)\n annotation = (\n f\" {annotation}\" if is_scalar_type(entity_type) else f\"{annotation} \"\n )\n replace_str = rf\"\\1{annotation}\"\n pattern = re.compile(non_field_pattern)\n string_schema = pattern.sub(replace_str, string_schema)\n\n return string_schema\n\n def _get_directive_applied_non_field_types(self) -> set:\n \"\"\"\n Find all the directive applied non-field types from the schema.\n \"\"\"\n directives_types = set()\n schema_types = {\n **self.graphql_schema.type_map,\n **{\n \"Query\": self.graphql_schema.query_type,\n \"Mutation\": self.graphql_schema.mutation_type,\n },\n }\n\n for schema_type in schema_types.values():\n if not hasattr(schema_type, \"graphene_type\"):\n continue\n for directive in self.custom_directives:\n if has_non_field_attribute(schema_type.graphene_type, directive):\n self.directives_used[directive.name] = directive\n directives_types.add(schema_type.graphene_type)\n return directives_types\n\n def _get_directive_applied_field_types(self) -> set:\n \"\"\"\n Find all the directive applied field types from the schema.\n \"\"\"\n directives_fields = set()\n schema_types = {\n **self.graphql_schema.type_map,\n **{\n \"Query\": self.graphql_schema.query_type, # noqa\n \"Mutation\": self.graphql_schema.mutation_type, # noqa\n },\n }\n\n for _, entity_type in schema_types.items():\n if (\n not hasattr(entity_type, \"graphene_type\") # noqa:SIM101\n or isinstance(entity_type.graphene_type._meta, UnionOptions) # noqa\n or isinstance(entity_type.graphene_type._meta, ScalarOptions) # noqa\n ):\n continue\n\n fields = (\n list(entity_type.values.values()) # Enum class fields\n if is_enum_type(entity_type)\n else list(entity_type.fields) # noqa\n )\n\n for field in fields:\n field_type = (\n # auto-camelcasing can cause problems\n getattr(entity_type.graphene_type, to_camel_case(field), None)\n or getattr(entity_type.graphene_type, to_snake_case(field), None)\n if not is_enum_type(entity_type)\n else field.value\n )\n for directive_ in self.custom_directives:\n if has_field_attribute(field_type, directive_):\n self.directives_used[directive_.name] = directive_\n directives_fields.add(entity_type.graphene_type)\n\n # Handle Argument Decorators\n if (\n hasattr(field_type, \"args\")\n and field_type.args is not None\n and isinstance(field_type.args, dict)\n ):\n for arg_name, arg_type in field_type.args.items():\n if has_field_attribute(arg_type, directive_):\n if (\n DirectiveLocation.ARGUMENT_DEFINITION\n not in directive_.locations\n ):\n raise DirectiveValidationError(\n f\"{directive_} cannot be used at argument level at {entity_type}->{field}\"\n )\n self.directives_used[directive_.name] = directive_\n directives_fields.add(entity_type.graphene_type)\n\n return directives_fields\n\n def get_directives_used(self) -> list[GraphQLDirective]:\n \"\"\"\n Returns a list of directives used in the schema\n\n \"\"\"\n self._get_directive_applied_field_types()\n self._get_directive_applied_non_field_types()\n return list(self.directives_used.values())\n\n def __str__(self):\n string_schema = \"\"\n string_schema += extend_schema_string(string_schema, self.schema_directives)\n string_schema += print_schema(self.graphql_schema)\n\n field_types = self._get_directive_applied_field_types()\n non_field_types = self._get_directive_applied_non_field_types()\n\n string_schema = self._add_field_decorators(field_types, string_schema)\n string_schema = self.add_non_field_decorators(non_field_types, string_schema)\n\n for directive in self.custom_directives:\n meta_data: CustomDirectiveMeta = getattr(directive, \"_graphene_directive\")\n if not meta_data.add_definition_to_schema:\n string_schema = string_schema.replace(\n print_directive(directive) + \"\\n\\n\", \"\"\n )\n\n return string_schema.strip()"
}
] | from typing import Collection, Type, Union
from graphene import Schema as GrapheneSchema
from graphql import GraphQLDirective
from graphql import specified_directives
from . import DirectiveValidationError
from .data_models import SchemaDirective
from .schema import Schema
import graphene | 9,384 |
def build_schema(
query: Union[graphene.ObjectType, Type[graphene.ObjectType]] = None,
mutation: Union[graphene.ObjectType, Type[graphene.ObjectType]] = None,
subscription: Union[graphene.ObjectType, Type[graphene.ObjectType]] = None,
types: Collection[Union[graphene.ObjectType, Type[graphene.ObjectType]]] = None,
directives: Union[Collection[GraphQLDirective], None] = None,
auto_camelcase: bool = True,
|
def build_schema(
query: Union[graphene.ObjectType, Type[graphene.ObjectType]] = None,
mutation: Union[graphene.ObjectType, Type[graphene.ObjectType]] = None,
subscription: Union[graphene.ObjectType, Type[graphene.ObjectType]] = None,
types: Collection[Union[graphene.ObjectType, Type[graphene.ObjectType]]] = None,
directives: Union[Collection[GraphQLDirective], None] = None,
auto_camelcase: bool = True, | schema_directives: Collection[SchemaDirective] = None, | 2 | 2023-12-04 05:17:51+00:00 | 12k |
weijiawu/CisDQ | mask2former_video/data_video/ytvis_eval.py | [
{
"identifier": "YTVOS",
"path": "mask2former_video/data_video/datasets/ytvis_api/ytvos.py",
"snippet": "class YTVOS:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset,self.anns,self.cats,self.vids = dict(),dict(),dict(),dict()\n self.vidToAnns, self.catToVids = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()\n\n def createIndex(self):\n # create index\n print('creating index...')\n anns, cats, vids = {}, {}, {}\n vidToAnns,catToVids = defaultdict(list),defaultdict(list)\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n vidToAnns[ann['video_id']].append(ann)\n anns[ann['id']] = ann\n\n if 'videos' in self.dataset:\n for vid in self.dataset['videos']:\n vids[vid['id']] = vid\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n\n if 'annotations' in self.dataset and 'categories' in self.dataset:\n for ann in self.dataset['annotations']:\n catToVids[ann['category_id']].append(ann['video_id'])\n\n print('index created!')\n\n # create class members\n self.anns = anns\n self.vidToAnns = vidToAnns\n self.catToVids = catToVids\n self.vids = vids\n self.cats = cats\n\n def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))\n\n def getAnnIds(self, vidIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param vidIds (int array) : get anns for given vids\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(vidIds) == 0:\n lists = [self.vidToAnns[vidId] for vidId in vidIds if vidId in self.vidToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['avg_area'] > areaRng[0] and ann['avg_area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids\n\n def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n supNms = supNms if _isArrayLike(supNms) else [supNms]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids\n\n def getVidIds(self, vidIds=[], catIds=[]):\n '''\n Get vid ids that satisfy given filter conditions.\n :param vidIds (int array) : get vids for given ids\n :param catIds (int array) : get vids with all given cats\n :return: ids (int array) : integer array of vid ids\n '''\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == 0:\n ids = self.vids.keys()\n else:\n ids = set(vidIds)\n for i, catId in enumerate(catIds):\n if i == 0 and len(ids) == 0:\n ids = set(self.catToVids[catId])\n else:\n ids &= set(self.catToVids[catId])\n return list(ids)\n\n def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]\n\n def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]\n\n def loadVids(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying vid\n :return: vids (object array) : loaded vid objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.vids[id] for id in ids]\n elif type(ids) == int:\n return [self.vids[ids]]\n\n\n def loadRes(self, resFile):\n \"\"\"\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = YTVOS()\n res.dataset['videos'] = [img for img in self.dataset['videos']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsVidIds = [ann['video_id'] for ann in anns]\n assert set(annsVidIds) == (set(annsVidIds) & set(self.getVidIds())), \\\n 'Results do not correspond to current coco set'\n if 'segmentations' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n ann['areas'] = []\n if not 'bboxes' in ann:\n ann['bboxes'] = []\n for seg in ann['segmentations']:\n # now only support compressed RLE format as segmentation results\n if seg:\n ann['areas'].append(maskUtils.area(seg))\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(maskUtils.toBbox(seg))\n else:\n ann['areas'].append(None)\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(None)\n ann['id'] = id+1\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n ann['iscrowd'] = 0\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res\n\n def annToRLE(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n t = self.vids[ann['video_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentations'][frameId]\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = segm\n return rle\n\n def annToMask(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann, frameId)\n m = maskUtils.decode(rle)\n return m"
},
{
"identifier": "YTVOSeval",
"path": "mask2former_video/data_video/datasets/ytvis_api/ytvoseval.py",
"snippet": "class YTVOSeval:\n # Interface for evaluating video instance segmentation on the YouTubeVIS dataset.\n #\n # The usage for YTVOSeval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = YTVOSeval(cocoGt,cocoDt); # initialize YTVOSeval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.params = {} # evaluation parameters\n self.evalVids = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.vidIds = sorted(cocoGt.getVidIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n for i, a in enumerate(ann['segmentations']):\n if a:\n rle = coco.annToRLE(ann, i)\n ann['segmentations'][i] = rle\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['video_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['video_id'], dt['category_id']].append(dt)\n self.evalVids = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalVids\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.vidIds = list(np.unique(p.vidIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(vidId, catId): computeIoU(vidId, catId) \\\n for vidId in p.vidIds\n for catId in catIds}\n\n evaluateVid = self.evaluateVid\n maxDet = p.maxDets[-1]\n \n \n self.evalImgs = [evaluateVid(vidId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for vidId in p.vidIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, vidId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentations'] for g in gt]\n d = [d['segmentations'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bboxes'] for g in gt]\n d = [d['bboxes'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n #ious = maskUtils.iou(d,g,iscrowd)\n def iou_seq(d_seq, g_seq):\n i = .0\n u = .0\n for d, g in zip(d_seq, g_seq):\n if d and g:\n i += maskUtils.area(maskUtils.merge([d, g], True))\n u += maskUtils.area(maskUtils.merge([d, g], False))\n elif not d and g:\n u += maskUtils.area(g)\n elif d and not g:\n u += maskUtils.area(d)\n if not u > .0:\n print(\"Mask sizes in video {} and category {} may not match!\".format(vidId, catId))\n iou = i / u if u > .0 else .0\n return iou\n ious = np.zeros([len(d), len(g)])\n for i, j in np.ndindex(ious.shape):\n ious[i, j] = iou_seq(d[i], g[j])\n #print(vidId, catId, ious.shape, ious)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['avg_area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateVid(self, vidId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['avg_area']<aRng[0] or g['avg_area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[vidId, catId][:, gtind] if len(self.ious[vidId, catId]) > 0 else self.ious[vidId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['avg_area']<aRng[0] or d['avg_area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'video_id': vidId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.vidIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.vidIds) if i in setI]\n I0 = len(_pe.vidIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()"
}
] | import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
import torch
import detectron2.utils.comm as comm
from collections import OrderedDict
from .datasets.ytvis_api.ytvos import YTVOS
from .datasets.ytvis_api.ytvoseval import YTVOSeval
from tabulate import tabulate
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table | 10,663 | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
| # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()): | self._ytvis_api = YTVOS(json_file) | 0 | 2023-11-28 10:33:40+00:00 | 12k |
KieDani/Towards_3D_Object_Localization | general/train.py | [
{
"identifier": "create_heatmaps",
"path": "helper.py",
"snippet": "def create_heatmaps(joint, output_size=(480, 480), sigma=8, factor=1):\n '''\n create heatmap from keypoints x, y\n joint: (y, x)\n output_size: (height, width)\n '''\n gaus2d = lambda x, y: 100 * np.exp(-((x ** 2 + y ** 2) / (2 * sigma ** 2)))\n y = np.arange(int(output_size[0] / factor))\n x = np.arange(int(output_size[1] / factor))\n X, Y = np.meshgrid(x, y)\n heatmap = np.zeros((1, int(output_size[0] / factor), int(output_size[1] / factor)), dtype=np.float32)\n y0, x0 = joint[0] / factor, joint[1] / factor\n heatmap[0] = gaus2d(Y - y0, X - x0)\n return heatmap"
},
{
"identifier": "seed_worker",
"path": "helper.py",
"snippet": "def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2**32\n numpy.random.seed(worker_seed)\n random.seed(worker_seed)"
},
{
"identifier": "save_models",
"path": "helper.py",
"snippet": "def save_models(coord_model, forec_model, coord_optimizer, forec_optimizer, lossesandmetrics, epoch, identifier='', training_parameters = None, config=None):\n if config is not None:\n identifier = config.get_pathforsaving()\n training_parameters = config.__dict__\n\n logs_path = get_logs_path()\n save_path = os.path.join(logs_path, 'checkpoints', identifier)\n os.makedirs(save_path, exist_ok=True)\n save_path = os.path.join(save_path, f'{epoch}.pth')\n\n torch.save({\n 'epoch': epoch,\n 'coord_model_state_dict': coord_model.state_dict(),\n 'coord_optimizer_state_dict': coord_optimizer.state_dict(),\n 'coord_loss': lossesandmetrics['loss_2Dcoord'],\n 'forec_model_state_dict': forec_model.state_dict(),\n 'forec_optimizer_state_dict': forec_optimizer.state_dict(),\n 'forec_loss': lossesandmetrics['loss_reproject'],\n 'training_parameters': training_parameters,\n 'DtG': lossesandmetrics['distance_to_gt'],\n 'rDtG': lossesandmetrics['rel_distance_to_gt'],\n }, save_path)\n\n return save_path"
},
{
"identifier": "get_reprojectionloss",
"path": "helper.py",
"snippet": "def get_reprojectionloss(loss_coord_title):\n assert loss_coord_title in ['L1', 'scale_invariant', 'alpha']\n if loss_coord_title == 'L1':\n loss_fn_coord = torch.nn.L1Loss(reduction='none')\n elif loss_coord_title == 'scale_invariant':\n loss_fn_coord = lambda a, b: LA.norm((a - b), dim=-1) / (LA.norm(a, dim=-1) + LA.norm(b, dim=-1))\n else:\n loss_fn_coord = lambda a, b: torch.abs(LA.norm(a, dim=-1) - LA.norm(b, dim=-1)) / (\n LA.norm(a, dim=-1) + LA.norm(b, dim=-1)) + 0.5 * (1 - torch.einsum('... d, ... d -> ...', a, b) / (\n LA.norm(a, dim=-1) * LA.norm(b, dim=-1)))\n return loss_fn_coord"
},
{
"identifier": "img_to_cam",
"path": "helper.py",
"snippet": "def img_to_cam(coords, Mint, original_size, resized_size):\n '''\n coords: (..., 3) with ordering (y, x, z)\n Mint: (3, 3) or (B, 3, 3)\n original_size: order (height, width)\n resized_size: order (height, width)\n ----------------\n return: (..., 3) with ordering (y, x, z)\n '''\n coords_c = coords.clone()\n coords_c[..., :2] = resized_to_original_keypopints(coords_c[..., :2], original_size, resized_size)\n coords_c[..., [0, 1]] = coords_c[..., [1, 0]] * coords[..., 2:3]\n if len(Mint.shape) == 3:\n inv_Mint = torch.linalg.inv(Mint[:, :3, :3])\n coords_c = torch.einsum('b i d, b ... d -> b ... i', inv_Mint, coords_c)\n elif len(Mint.shape) == 2:\n inv_Mint = torch.linalg.inv(Mint[:3, :3])\n coords_c = torch.einsum('i d, ... d -> ... i', inv_Mint, coords_c)\n else:\n raise ValueError('Mint should be 2D or 3D tensor')\n coords_c[..., [0, 1, 2]] = coords_c[..., [1, 0, 2]]\n return coords_c"
},
{
"identifier": "cam_to_img",
"path": "helper.py",
"snippet": "def cam_to_img(coords_c, Mint, original_size, resized_size):\n '''\n coords_c: (..., 3) with ordering (y, x, z)\n Mint: (3, 3) or (B, 3, 3)\n original_size: order (height, width)\n resized_size: order (height, width)\n ----------------\n returns: (..., 3) with ordering (y, x, z)\n '''\n coords = coords_c.clone()\n coords[..., [0, 1]] = coords[..., [1, 0]]\n if len(Mint.shape) == 3:\n orig_Mint = Mint[:, :3, :3]\n coords = torch.einsum('b i d, b ... d -> b ... i', orig_Mint, coords)\n elif len(Mint.shape) == 2:\n orig_Mint = Mint[:3, :3]\n coords = torch.einsum('i d, ... d -> ... i', orig_Mint, coords)\n else:\n raise ValueError('Mint should be 2D or 3D tensor')\n coords[..., [0, 1]] = coords[..., [1, 0]] / coords[..., 2:3].clone()\n coords = original_to_resized_keypopints(coords, original_size, resized_size)\n return coords"
},
{
"identifier": "cam_to_world",
"path": "helper.py",
"snippet": "def cam_to_world(coords_c, extrinsic_matrix):\n #coords_c[..., [0, 1]] = coords_c[..., [1, 0]]\n tmp = coords_c[..., [1, 0, 2]]\n inverse_extrinsic_matrix = torch.linalg.inv(extrinsic_matrix)\n #coords_c = torch.cat((coords_c, torch.ones_like(coords_c[..., 0:1])), dim=-1)\n tmp = torch.cat((tmp, torch.ones_like(tmp[..., 0:1])), dim=-1)\n if len(tmp.shape) == 3: inverse_extrinsic_matrix = inverse_extrinsic_matrix.unsqueeze(-3)\n #coords_w = torch.einsum('i d, ... d -> ... i', inverse_extrinsic_matrix, coords_c)\n coords_w = torch.einsum('... i d, ... d -> ... i', inverse_extrinsic_matrix, tmp)\n coords_w = coords_w[..., :3] / coords_w[..., 3:4]\n return coords_w"
},
{
"identifier": "update_ema",
"path": "helper.py",
"snippet": "def update_ema(model, model_ema, alpha=0.95):\n with torch.no_grad():\n for name, param in model_ema.named_parameters():\n model.state_dict()[name]\n param.data = alpha * param + (1 - alpha) * model.state_dict()[name].data\n for name, param in model_ema.named_buffers():\n param.data = alpha * param + (1 - alpha) * model.state_dict()[name].data\n return model_ema"
},
{
"identifier": "get_PEN",
"path": "general/model.py",
"snippet": "def get_PEN(name='resnet34', depth_mode='depthmap', environment_name=None):\n assert name in ['resnet34', 'resnet50', 'convnext', 'hrnet', 'convnextv2']\n if name in ['resnet34', 'resnet50', 'convnext', 'hrnet', 'convnextv2']:\n model = ResNetPyramid(depth_mode=depth_mode, backbone=name)\n else:\n if depth_mode != 'depthmap':\n raise NotImplementedError\n else:\n raise ValueError\n\n if environment_name in ['realball']:\n model.min_depth = 0.05\n model.max_depth = 3\n elif environment_name in ['parcour_singleenv_singlecam', 'parcour_singleenv_multicam', 'parcour_multienv_singlecam',\n 'parcour_multienv_multicam', 'falling', 'carousel', 'parcour_dualenv_multicam']:\n model.min_depth = 2\n model.max_depth = 12\n else:\n raise ValueError\n\n return model"
},
{
"identifier": "get_PAF",
"path": "general/model.py",
"snippet": "def get_PAF(device, mode='exactNDE', environment_name='parcour'):\n assert mode in ['exactNDE', 'analytic']\n assert environment_name in ['falling', 'carousel', 'parcour_singleenv_singlecam', 'parcour_singleenv_multicam',\n 'parcour_multienv_singlecam', 'parcour_multienv_multicam', 'realball',\n 'parcour_dualenv_multicam'\n ]\n if mode == 'analytic' and environment_name not in ['falling', 'carousel']:\n raise NotImplementedError\n if mode == 'exactNDE':\n return ForecastNetwork(environment_name, device=device, mode=mode)\n elif mode == 'analytic':\n return AnalyticForecastNetwork(environment_name, device=device)\n else:\n raise NotImplementedError"
},
{
"identifier": "get_dataset",
"path": "general/dataset.py",
"snippet": "def get_dataset(dataset_name='parcour', mode='train', transforms=None, video_len=None):\n assert mode in ['train', 'val', 'test']\n assert dataset_name in ['falling', 'carousel', 'parcour_singleenv_singlecam', 'parcour_singleenv_multicam',\n 'parcour_multienv_singlecam', 'parcour_multienv_multicam', 'realball',\n 'parcour_dualenv_multicam'\n ]\n\n global video_length\n if dataset_name == 'realball':\n video_length = 16 if video_len is None else video_len\n else:\n video_length = video_length if video_len is None else video_len\n\n if dataset_name == 'parcour_singleenv_singlecam':\n return ParcourDataset(mode=mode, multi_camera=False, num_env=1, transform=transforms)\n elif dataset_name == 'parcour_singleenv_multicam':\n return ParcourDataset(mode=mode, multi_camera=True, num_env=1, transform=transforms)\n elif dataset_name == 'falling':\n return BallDataset(mode=mode, transform=transforms)\n elif dataset_name == 'carousel':\n return CarouselDataset(mode=mode, transform=transforms)\n elif dataset_name == 'parcour_multienv_multicam':\n return ParcourDataset(mode=mode, multi_camera=True, num_env=3, transform=transforms)\n elif dataset_name == 'parcour_multienv_singlecam':\n return ParcourDataset(mode=mode, multi_camera=False, num_env=3, transform=transforms)\n elif dataset_name == 'realball':\n return RealBallDataset(mode=mode, transform=transforms)\n elif dataset_name == 'parcour_dualenv_multicam':\n return ParcourDataset(mode=mode, multi_camera=True, num_env=2, transform=transforms)\n else:\n print('Dataset not found!!!')\n exit()"
},
{
"identifier": "plot2d",
"path": "general/evaluate.py",
"snippet": "def plot2d(coords, groundtruth, limits, title=''):\n min_lim, max_lim, x_lim = limits[0], limits[1], limits[2]\n fig = plt.figure()\n plt.title(title, fontsize=20)\n plt.ylabel(r'camera depth $z^{({C})}$', fontsize=18)\n plt.xlabel(r'frame index $n$', fontsize=18)\n plt.ylim(min_lim - 0.1*np.abs(max_lim - min_lim), max_lim + 0.1*np.abs(max_lim - min_lim))\n plt.xlim(0, x_lim)\n plt.plot(groundtruth[:, 2], color='red', label='ground truth')\n plt.plot(coords[:, 2], color='blue', label='estimated depth')\n plt.legend(fontsize=16)\n with io.BytesIO() as buff:\n fig.savefig(buff, format='raw')\n buff.seek(0)\n data = np.frombuffer(buff.getvalue(), dtype=np.uint8)\n w, h = fig.canvas.get_width_height()\n im = data.reshape((int(h), int(w), -1))\n plt.close()\n return im"
},
{
"identifier": "plot3d",
"path": "general/evaluate.py",
"snippet": "def plot3d(coords, groundtruth, limits, title=''):\n min_lim, max_lim = limits[0], limits[1]\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n ax.scatter(coords[:, 0], coords[:, 1], coords[:, 2], marker='o', label='estimated')\n ax.scatter(groundtruth[:, 0], groundtruth[:, 1], groundtruth[:, 2], marker='^', label='groundtruth')\n ax.set_xlabel('x', fontsize=18)\n ax.set_ylabel('y', fontsize=18)\n ax.set_zlabel('z', fontsize=18)\n ax.set_xlim3d(min_lim[0].item(), max_lim[0].item())\n ax.set_ylim3d(min_lim[1].item(), max_lim[1].item())\n ax.set_zlim3d(min_lim[2].item(), max_lim[2].item())\n ax.set_title(title, fontsize=20)\n plt.legend(bbox_to_anchor=(0.7, 1.02), loc=\"upper left\", fontsize=16)\n with io.BytesIO() as buff:\n fig.savefig(buff, format='raw')\n buff.seek(0)\n data = np.frombuffer(buff.getvalue(), dtype=np.uint8)\n w, h = fig.canvas.get_width_height()\n im = data.reshape((int(h), int(w), -1))\n fig.clear()\n plt.close()\n\n return im"
},
{
"identifier": "MyConfig",
"path": "general/config.py",
"snippet": "class MyConfig(BaseConfig):\n def __init__(self, lr_coord, timesteps, loss_coord_title, forec_title, sin_title, environment_name, folder, lossmode):\n super(MyConfig, self).__init__()\n self.lr_coord = lr_coord\n self.timesteps = timesteps\n self.loss_coord_title = loss_coord_title\n self.forec_title = forec_title\n self.environment_name = map_environment_name(environment_name)\n self.folder = folder\n self.sin_title = sin_title\n self.lossmode = lossmode"
},
{
"identifier": "train_transform",
"path": "general/transforms.py",
"snippet": "class Normalize(object):\nclass Compose(object):\nclass DeNormalize(object):\n def __init__(self, mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):\n def __call__(self, x):\n def __init__(self, transforms):\n def __call__(self, x):\n def __init__(self, mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]):\n def __call__(self, x):"
}
] | import os
import argparse
import torch
import random
import einops as eo
import matplotlib.pyplot as plt
import numpy as np
import scipy
import cv2
import os
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from helper import create_heatmaps, seed_worker, save_models, get_reprojectionloss
from helper import img_to_cam, cam_to_img, cam_to_world, update_ema
from general.model import get_PEN, get_PAF
from general.dataset import get_dataset
from general.evaluate import plot2d, plot3d
from general.config import MyConfig
from general.transforms import train_transform, val_transform, denorm | 8,594 | for i, data_dict in enumerate(tqdm(valloader)):
for xml_num, stuff in data_dict.items():
video, vidlabel, vidheatmap, eM, iM, d3label, cam_num, timestamps = stuff
t = 1
t_steps = [t + ts for ts in timesteps]
images_0 = video[:, t - 1:t + 2]
images_T = video[:, t_steps]
timestamps_T = timestamps[:, t-1:t+forecast_length+1]
images_all = video[:, t:t+forecast_length]
d3labels_all = d3label[:, t:t+forecast_length]
imlabels = vidlabel[:, [t] + t_steps]
imheatmaps = vidheatmap[:, t_steps]
images_0, images_T, imlabels, imheatmaps = images_0.to(device), images_T.to(device), imlabels.to(device), imheatmaps.to(device)
images_all, d3labels_all = images_all.to(device), d3labels_all.to(device)
eM, iM = eM.to(device), iM.to(device)
timestamps_T = timestamps_T.to(device)
images_0 = eo.rearrange(images_0, 'b t c h w -> (b t) c h w')
heatmap, depthmap = coord_model(images_0)
coords = coord_model.get_coords3D(heatmap, depthmap)
coords = eo.rearrange(coords, '(b t) d -> b t d', t=3)
#heatmap_0 = eo.rearrange(heatmap, '(b t) c h w -> b t c h w', t=3)[:, 1]
coords_0 = coords[:, 1]
images_T = eo.rearrange(images_T, 'b t c h w -> (b t) c h w')
heatmap_T, depthmap_T = coord_model(images_T)
coords_T = coord_model.get_coords3D(heatmap_T, depthmap_T)
coords_T = eo.rearrange(coords_T, '(b t) d -> b t d', t=len(t_steps))
images_T = eo.rearrange(images_T, '(b t) c h w -> b t c h w', t=len(t_steps))
heatmap_T = eo.rearrange(heatmap_T, '(b t) c h w -> b t c h w', t=len(t_steps))
#f = iM[:, 1, 1]
__, __, H, W = images_0.shape
coords_c = img_to_cam(coords, iM, original_size, (H, W))
#pred_coords = forec_model(coords_c, forecast_length=forecast_length)
pred_coords = forec_model(coords_c, timestamps=timestamps_T, forecast_length=forecast_length, extrinsic_matrix = eM.to(device), xml_num=xml_num)
pred_coords_T = pred_coords[:, timesteps]
images_all = eo.rearrange(images_all, 'b t c h w -> (b t) c h w')
heatmap_all, depthmap_all = coord_model(images_all)
coords_all = coord_model.get_coords3D(heatmap_all, depthmap_all)
coords_all = eo.rearrange(coords_all, '(b t) d -> b t d', t=forecast_length)
f = iM[:, 1, 1]
__, __, H, W = images_all.shape
coords_all = img_to_cam(coords_all, iM, original_size, (H, W))
distance = (coords_all - d3labels_all).double().pow(2).sum(2).sqrt()
rel_distance = distance / d3labels_all.double().pow(2).sum(2).sqrt()
pred_coords_T_B = cam_to_img(pred_coords_T, iM, original_size, (H, W))[..., :2]
loss_reproject_tmp = loss_fn_coord(pred_coords_T_B, coords_T[..., :2])
loss_2Dcoord_tmp = loss_fn_coord(coords_0[:, :2], imlabels[:, 0]) * 1e-3
# heatmap_T = eo.rearrange(heatmap_T, 'b t c h w -> b t c (h w)').softmax(dim=-1) * imheatmaps.sum(dim=[-1, -2]).unsqueeze(-1)
# heatmap_T = eo.rearrange(heatmap_T, 'b t c (h w) -> b t c h w', h=H, w=W)
loss_2Dheatmap_tmp = loss_fn(heatmap_T, imheatmaps)
for cn_ind in range(cam_num.shape[0]):
cn = cam_num[cn_ind].item()
number_metrics[xml_num][cn] += 1
losses_metrics[xml_num]['distance_to_gt'][cn] += distance[cn_ind].mean().item()
losses_metrics[xml_num]['rel_distance_to_gt'][cn] += rel_distance[cn_ind].mean().item()
losses_metrics[xml_num]['loss_reproject'][cn] += loss_reproject_tmp[cn_ind].mean().cpu().item()
# maybe also add at time t
losses_metrics[xml_num]['loss_2Dcoord'][cn] += loss_2Dcoord_tmp[cn_ind].mean().cpu().item()
losses_metrics[xml_num]['loss_2Dheatmap'][cn] += loss_2Dheatmap_tmp[cn_ind].mean().cpu().item()
distance_to_gt = np.mean([losses_metrics[xml_num]['distance_to_gt'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
loss_2Dheatmap = np.mean([losses_metrics[xml_num]['loss_2Dheatmap'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
loss_2Dcoord = np.mean([losses_metrics[xml_num]['loss_2Dcoord'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
loss_reproject = np.mean([losses_metrics[xml_num]['loss_reproject'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
rel_distance_to_gt = np.mean([losses_metrics[xml_num]['rel_distance_to_gt'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
for xml_num in range(num_environments):
for metric in losses_metrics[xml_num].keys():
for cn in range(num_cameras):
losses_metrics[xml_num][metric][cn] /= number_metrics[xml_num][cn]
for xmlnum in range(num_environments):
for camnum in range(num_cameras):
writer.add_scalar(f'Validation Loss/env{xmlnum}/camera{camnum}/reprojection loss', losses_metrics[xml_num]['loss_reproject'][camnum], epoch)
writer.add_scalar(f'Validation Loss/env{xmlnum}/camera{camnum}/2D coordinate loss', losses_metrics[xml_num]['loss_2Dcoord'][camnum], epoch)
writer.add_scalar(f'Validation Loss/env{xmlnum}/camera{camnum}/2D heatmap loss', losses_metrics[xml_num]['loss_2Dheatmap'][camnum], epoch)
writer.add_scalar(f'Validation Metric/env{xmlnum}/camera{camnum}/Distance to groundtruth', losses_metrics[xml_num]['distance_to_gt'][camnum], epoch)
writer.add_scalar(f'Validation relative Metric/env{xmlnum}/camera{camnum}/Relative distance to groundtruth', losses_metrics[xml_num]['rel_distance_to_gt'][camnum], epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/coord_model t=0:', ''.join((str(coords_c[0, 1, 0].item()), ', ', str(coords_c[0, 1, 1].item()), ', ', str(coords_c[0, 1, 2].item()))), epoch)
coords_T_c = img_to_cam(coords_T, iM, original_size, (H, W))[:, -1]
writer.add_text(f'Validation/env{0}/camera{camnum}/coord_model t=T:', ''.join((str(coords_T_c[0, 1].item()), ', ', str(coords_T_c[0, 0].item()), ', ', str(coords_T_c[0, 2].item()))), epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/forec_model t=T:', ''.join((str(pred_coords_T[0, -1, 0].item()), ', ', str(pred_coords_T[0, -1, 1].item()), ', ', str(pred_coords_T[0, -1, 2].item()))), epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/ground truth t=0:', ''.join((str(d3label[0, t, 1].item()), ', ', str(d3label[0, t, 0].item()), ', ', str(d3label[0, t, 2].item()))), epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/ground truth t=T:', ''.join((str(d3label[0, t + forecast_length - 1, 1].item()), ', ', str(d3label[0, t + forecast_length - 1, 0].item()), ', ', str(d3label[0, t + forecast_length - 1, 2].item()))), epoch)
if hasattr(forec_model, 'analyticPhysics') and hasattr(forec_model.analyticPhysics, 'g'):
writer.add_text('model parameters:', f'{forec_model.analyticPhysics.g.item()}', epoch)
elif hasattr(forec_model, 'torchdynnets') and hasattr(forec_model.torchdynnets[0].vf.vf, 'g'):
writer.add_text('model parameters:', f'{forec_model.torchdynnets[0].vf.vf.g.item()}', epoch)
if hasattr(forec_model, 'analyticPhysics') and hasattr(forec_model.analyticPhysics, 'k'):
writer.add_text('model parameters:', f'{forec_model.analyticPhysics.k.item()}', epoch)
elif hasattr(forec_model, 'dynnet') and hasattr(forec_model.torchdynnets[0].vf.vf, 'k'):
writer.add_text('model parameters:', f'{forec_model.torchdynnets[0].vf.vf.k.item()}', epoch)
vmin = 0
vmax = imheatmaps.max().item()
fig = plt.figure()
img = eo.rearrange(images_0, '(b t) c h w -> b t c h w', t=3)
img = denorm({'image': img[0, 1]})['image'].cpu().numpy()
img = eo.rearrange(img, 'c h w -> h w c')
heatmap = eo.rearrange(heatmap, '(b t) c h w -> b t c h w', t=3)[0, 1].squeeze(0)
heatmap = heatmap.cpu().numpy()
plt.title(''.join((str(coords_0[0, 0].cpu().numpy().item()), ', ', str(coords_0[0, 1].cpu().numpy().item()))))
plt.imshow(img.astype(np.uint8))
plt.imshow(heatmap, cmap=plt.cm.viridis, alpha=0.65, vmin=heatmap.min(), vmax=heatmap.max())
writer.add_figure('validation heatmap coord_model t=0', fig, epoch)
plt.close()
fig = plt.figure()
img = eo.rearrange(images_0, '(b t) c h w -> b t c h w', t=3)
img = denorm({'image': img[0, 1]})['image'].cpu().numpy()
img = eo.rearrange(img, 'c h w -> h w c')
gt_coord = imlabels[0, 0]
H, W, __ = img.shape
joint = (gt_coord[0].cpu().numpy().item(), gt_coord[1].cpu().numpy().item(), 1.0)
| if __name__ == '__main__':
#os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--environment_name', type=str, default='parcour')
parser.add_argument('--lossmode', type=str, default='2Dpred_nograd')
parser.add_argument('--folder', type=str, default='tmp')
parser.add_argument('--sin_title', type=str, default='convnext')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--loss_coord_title', type=str, default='L1')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
# if __name__ == '__main__':
# torch.autograd.set_detect_anomaly(True)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
debug = False
def train(config):
torch.manual_seed(config.seed)
random.seed(config.seed)
np.random.seed(config.seed)
g = torch.Generator()
g.manual_seed(config.seed)
forecast_length = max(config.timesteps)
logs_path = config.get_logs_path(debug)
writer = SummaryWriter(logs_path)
coord_model = get_PEN(config.sin_title, config.depth_mode, config.environment_name).to(device)
coord_model_ema = get_PEN(config.sin_title, config.depth_mode, config.environment_name).to(device).eval()
coord_model_ema.load_state_dict(coord_model.state_dict())
coord_model_ema = update_ema(coord_model, coord_model_ema, 0)
if hasattr(coord_model, 'temperature'):
coord_model.temperature = config.temperature
coord_model_ema.temperature = config.temperature
forec_model = get_PAF(device, config.forec_title, config.environment_name).to(device)
trainset = get_dataset(config.environment_name, mode='train', transforms=train_transform)
valset = get_dataset(config.environment_name, mode='val', transforms=val_transform)
original_size = trainset.original_size
num_workers = 0 if debug else 8
trainloader = torch.utils.data.DataLoader(trainset, batch_size=config.BATCH_SIZE, shuffle=True,
num_workers=num_workers, worker_init_fn=seed_worker, generator=g)
valloader = torch.utils.data.DataLoader(valset, batch_size=config.BATCH_SIZE//4, shuffle=False,
num_workers=num_workers, worker_init_fn=seed_worker, generator=g)
loss_fn = torch.nn.MSELoss(reduction='none')
loss_fn_coord = get_reprojectionloss(config.loss_coord_title)
optim1 = torch.optim.Adam(coord_model.parameters(), lr=config.lr_coord)
optim2 = torch.optim.Adam(forec_model.parameters(), lr=config.lr_forec)
loss_weight = lambda i: config.reprojection_weight * \
max(0, min(i - config.warmup_iterations, 400 + config.warmup_iterations)) \
/ (400 + config.warmup_iterations)
min_losses = [(1e7, -1), (1e7, -1), (1e7, -1), (1e7, -1), (1e7, -1)]
iteration = 0
for epoch in range(config.NUM_EPOCHS):
print('Epoch', epoch)
for i, data_dict in enumerate(tqdm(trainloader)):
for xml_num, stuff in data_dict.items():
optim1.zero_grad()
optim2.zero_grad()
video, vidlabel, vidheatmap, eM, iM, d3label, cam_num, timestamps = stuff
t = random.randint(1, video.shape[1] - 2 - forecast_length)
timestamps_T = timestamps[:, t-1:t+forecast_length+1]
images_0 = video[:, t - 1:t + 2]
t_steps = [t + ts for ts in config.timesteps]
images_t = video[:, t_steps]
imlabels = vidlabel[:, [t] + t_steps]
imheatmaps = vidheatmap[:, t_steps]
images_0, images_t, imlabels, imheatmaps = images_0.to(device), images_t.to(device), imlabels.to(device), imheatmaps.to(device)
eM, iM = eM.to(device), iM.to(device)
timestamps_T = timestamps_T.to(device)
images_0 = eo.rearrange(images_0, 'b t c h w -> (b t) c h w')
heatmap, depthmap = coord_model(images_0)
coords = coord_model.get_coords3D(heatmap, depthmap)
coords = eo.rearrange(coords, '(b t) d -> b t d', t=3)
#heatmap_0 = eo.rearrange(heatmap, '(b t) c h w -> b t c h w', t=3)[:, 1:2]
#f = iM[:, 1, 1]
__, __, H, W = images_0.shape
coords = img_to_cam(coords, iM, original_size, (H, W))
pred_coords = forec_model(coords, timestamps=timestamps_T, forecast_length=forecast_length, extrinsic_matrix = eM.to(device), xml_num=xml_num)
pred_coords_t = pred_coords[:, config.timesteps]
if config.lossmode in ['2Dgt', '2Dpred', '2Dpred_nograd']:
pred_coords_t = cam_to_img(pred_coords_t, iM, original_size, (H, W))[..., :2]
images_t = eo.rearrange(images_t, 'b t c h w -> (b t) c h w')
heatmap_t, depthmap_t = coord_model(images_t)
coords_t = coord_model.get_coords3D(heatmap_t, depthmap_t)
coords_t = eo.rearrange(coords_t, '(b t) d -> b t d', t=len(config.timesteps))
coords_c_t = img_to_cam(coords_t, iM, original_size, (H, W))
heatmap_t = eo.rearrange(heatmap_t, '(b t) c h w -> b t c h w', t=len(config.timesteps))
if config.lossmode == '2Dgt':
loss_reproject = loss_fn_coord(pred_coords_t, imlabels[:, 1:]).mean()
elif config.lossmode == '2Dpred':
loss_reproject = loss_fn_coord(pred_coords_t, coords_t[..., :2]).mean()
elif config.lossmode == '2Dpred_nograd':
loss_reproject = loss_fn_coord(pred_coords_t, coords_t[..., :2].detach()).mean()
elif config.lossmode == '3Dgt':
d3gt = d3label[:, t_steps].to(device)
loss_reproject = loss_fn_coord(pred_coords_t, d3gt).mean()
elif config.lossmode == '3Dpred':
loss_reproject = loss_fn_coord(pred_coords_t, coords_c_t).mean()
elif config.lossmode == '3Dpred_nograd':
loss_reproject = loss_fn_coord(pred_coords_t, coords_c_t.detach()).mean()
else:
raise ValueError('Unknown lossmode')
# heatmap_t = eo.rearrange(heatmap_t, 'b t c h w -> b t c (h w)').softmax(dim=-1) * imheatmaps.sum(dim=[-1, -2]).unsqueeze(-1)
# heatmap_t = eo.rearrange(heatmap_t, 'b t c (h w) -> b t c h w', h=H, w=W)
loss_2Dheatmap = loss_fn(heatmap_t, imheatmaps).mean()
loss = (loss_weight(iteration) * loss_reproject + loss_2Dheatmap) / len(data_dict.keys())
loss.backward(retain_graph=False)
torch.nn.utils.clip_grad_norm_(coord_model.parameters(), 5)
torch.nn.utils.clip_grad_norm_(forec_model.parameters(), 5)
if iteration % 2 == 0 or iteration < config.warmup_iterations:
optim1.step()
if iteration % 2 == 1:# and iteration > config.warmup_iterations:
#TODO: remove comment, if forec_model should be trained, too
#optim2.step()
optim1.step()
coord_model_ema = update_ema(coord_model, coord_model_ema, config.ema_decay)
writer.add_scalar('Training/reprojection loss', loss_reproject, epoch*len(trainloader)+i)
writer.add_scalar('Training/2D heatmap loss', loss_2Dheatmap, epoch * len(trainloader) + i)
#__, loss_2Dcoord, loss_reproject, distance_to_gt, rel_distance_to_gt = validation((coord_model, forec_model), valloader, writer, epoch, device, config)
__, loss_2Dcoord, loss_reproject, distance_to_gt, rel_distance_to_gt = validation((coord_model_ema, forec_model), valloader, writer, epoch, device, config)
coord_model_ema.eval()
# save model, if it is one of the three best ones
min_loss5, epoch5 = min_losses[0]
if rel_distance_to_gt < min_loss5 and debug == False:
min_losses[0] = (rel_distance_to_gt, epoch)
min_losses = sorted(min_losses, reverse=True)
lossesandmetrics = {
'loss_2Dcoord': loss_2Dcoord,
'loss_reproject': loss_reproject,
'distance_to_gt': distance_to_gt,
'rel_distance_to_gt': rel_distance_to_gt
}
save_path = save_models(coord_model, forec_model, optim1, optim2, lossesandmetrics, epoch, config=config)
save_path = os.path.dirname(save_path)
if epoch > 4: os.remove(os.path.join(save_path, f'{epoch5}.pth'))
iteration += 1
def validation(models, valloader, writer, epoch, device, config):
coord_model, forec_model = models
coord_model.eval()
forec_model.eval()
original_size = valloader.dataset.original_size
forecast_length = 15
timesteps = [i for i in range(1, forecast_length)]
num_cameras = valloader.sampler.data_source.num_cameras
num_environments = valloader.sampler.data_source.num_environments
loss_fn = torch.nn.MSELoss(reduction='none')
loss_fn_coord = get_reprojectionloss(config.loss_coord_title)
with torch.no_grad():
losses_metrics = {}
number_metrics = {}
for i in range(num_environments):
losses_metrics[i] = {
'loss_reproject': {},
'loss_2Dheatmap': {},
'loss_2Dcoord': {},
'loss_z_c': {},
'distance_to_gt': {},
'rel_distance_to_gt': {},
}
number_metrics[i] = {}
for j in range(num_cameras):
for v in losses_metrics[i].values():
v[j] = 0
number_metrics[i][j] = 0
for i, data_dict in enumerate(tqdm(valloader)):
for xml_num, stuff in data_dict.items():
video, vidlabel, vidheatmap, eM, iM, d3label, cam_num, timestamps = stuff
t = 1
t_steps = [t + ts for ts in timesteps]
images_0 = video[:, t - 1:t + 2]
images_T = video[:, t_steps]
timestamps_T = timestamps[:, t-1:t+forecast_length+1]
images_all = video[:, t:t+forecast_length]
d3labels_all = d3label[:, t:t+forecast_length]
imlabels = vidlabel[:, [t] + t_steps]
imheatmaps = vidheatmap[:, t_steps]
images_0, images_T, imlabels, imheatmaps = images_0.to(device), images_T.to(device), imlabels.to(device), imheatmaps.to(device)
images_all, d3labels_all = images_all.to(device), d3labels_all.to(device)
eM, iM = eM.to(device), iM.to(device)
timestamps_T = timestamps_T.to(device)
images_0 = eo.rearrange(images_0, 'b t c h w -> (b t) c h w')
heatmap, depthmap = coord_model(images_0)
coords = coord_model.get_coords3D(heatmap, depthmap)
coords = eo.rearrange(coords, '(b t) d -> b t d', t=3)
#heatmap_0 = eo.rearrange(heatmap, '(b t) c h w -> b t c h w', t=3)[:, 1]
coords_0 = coords[:, 1]
images_T = eo.rearrange(images_T, 'b t c h w -> (b t) c h w')
heatmap_T, depthmap_T = coord_model(images_T)
coords_T = coord_model.get_coords3D(heatmap_T, depthmap_T)
coords_T = eo.rearrange(coords_T, '(b t) d -> b t d', t=len(t_steps))
images_T = eo.rearrange(images_T, '(b t) c h w -> b t c h w', t=len(t_steps))
heatmap_T = eo.rearrange(heatmap_T, '(b t) c h w -> b t c h w', t=len(t_steps))
#f = iM[:, 1, 1]
__, __, H, W = images_0.shape
coords_c = img_to_cam(coords, iM, original_size, (H, W))
#pred_coords = forec_model(coords_c, forecast_length=forecast_length)
pred_coords = forec_model(coords_c, timestamps=timestamps_T, forecast_length=forecast_length, extrinsic_matrix = eM.to(device), xml_num=xml_num)
pred_coords_T = pred_coords[:, timesteps]
images_all = eo.rearrange(images_all, 'b t c h w -> (b t) c h w')
heatmap_all, depthmap_all = coord_model(images_all)
coords_all = coord_model.get_coords3D(heatmap_all, depthmap_all)
coords_all = eo.rearrange(coords_all, '(b t) d -> b t d', t=forecast_length)
f = iM[:, 1, 1]
__, __, H, W = images_all.shape
coords_all = img_to_cam(coords_all, iM, original_size, (H, W))
distance = (coords_all - d3labels_all).double().pow(2).sum(2).sqrt()
rel_distance = distance / d3labels_all.double().pow(2).sum(2).sqrt()
pred_coords_T_B = cam_to_img(pred_coords_T, iM, original_size, (H, W))[..., :2]
loss_reproject_tmp = loss_fn_coord(pred_coords_T_B, coords_T[..., :2])
loss_2Dcoord_tmp = loss_fn_coord(coords_0[:, :2], imlabels[:, 0]) * 1e-3
# heatmap_T = eo.rearrange(heatmap_T, 'b t c h w -> b t c (h w)').softmax(dim=-1) * imheatmaps.sum(dim=[-1, -2]).unsqueeze(-1)
# heatmap_T = eo.rearrange(heatmap_T, 'b t c (h w) -> b t c h w', h=H, w=W)
loss_2Dheatmap_tmp = loss_fn(heatmap_T, imheatmaps)
for cn_ind in range(cam_num.shape[0]):
cn = cam_num[cn_ind].item()
number_metrics[xml_num][cn] += 1
losses_metrics[xml_num]['distance_to_gt'][cn] += distance[cn_ind].mean().item()
losses_metrics[xml_num]['rel_distance_to_gt'][cn] += rel_distance[cn_ind].mean().item()
losses_metrics[xml_num]['loss_reproject'][cn] += loss_reproject_tmp[cn_ind].mean().cpu().item()
# maybe also add at time t
losses_metrics[xml_num]['loss_2Dcoord'][cn] += loss_2Dcoord_tmp[cn_ind].mean().cpu().item()
losses_metrics[xml_num]['loss_2Dheatmap'][cn] += loss_2Dheatmap_tmp[cn_ind].mean().cpu().item()
distance_to_gt = np.mean([losses_metrics[xml_num]['distance_to_gt'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
loss_2Dheatmap = np.mean([losses_metrics[xml_num]['loss_2Dheatmap'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
loss_2Dcoord = np.mean([losses_metrics[xml_num]['loss_2Dcoord'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
loss_reproject = np.mean([losses_metrics[xml_num]['loss_reproject'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
rel_distance_to_gt = np.mean([losses_metrics[xml_num]['rel_distance_to_gt'][cn] / number_metrics[xml_num][cn] for cn in range(num_cameras) for xml_num in range(num_environments)])
for xml_num in range(num_environments):
for metric in losses_metrics[xml_num].keys():
for cn in range(num_cameras):
losses_metrics[xml_num][metric][cn] /= number_metrics[xml_num][cn]
for xmlnum in range(num_environments):
for camnum in range(num_cameras):
writer.add_scalar(f'Validation Loss/env{xmlnum}/camera{camnum}/reprojection loss', losses_metrics[xml_num]['loss_reproject'][camnum], epoch)
writer.add_scalar(f'Validation Loss/env{xmlnum}/camera{camnum}/2D coordinate loss', losses_metrics[xml_num]['loss_2Dcoord'][camnum], epoch)
writer.add_scalar(f'Validation Loss/env{xmlnum}/camera{camnum}/2D heatmap loss', losses_metrics[xml_num]['loss_2Dheatmap'][camnum], epoch)
writer.add_scalar(f'Validation Metric/env{xmlnum}/camera{camnum}/Distance to groundtruth', losses_metrics[xml_num]['distance_to_gt'][camnum], epoch)
writer.add_scalar(f'Validation relative Metric/env{xmlnum}/camera{camnum}/Relative distance to groundtruth', losses_metrics[xml_num]['rel_distance_to_gt'][camnum], epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/coord_model t=0:', ''.join((str(coords_c[0, 1, 0].item()), ', ', str(coords_c[0, 1, 1].item()), ', ', str(coords_c[0, 1, 2].item()))), epoch)
coords_T_c = img_to_cam(coords_T, iM, original_size, (H, W))[:, -1]
writer.add_text(f'Validation/env{0}/camera{camnum}/coord_model t=T:', ''.join((str(coords_T_c[0, 1].item()), ', ', str(coords_T_c[0, 0].item()), ', ', str(coords_T_c[0, 2].item()))), epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/forec_model t=T:', ''.join((str(pred_coords_T[0, -1, 0].item()), ', ', str(pred_coords_T[0, -1, 1].item()), ', ', str(pred_coords_T[0, -1, 2].item()))), epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/ground truth t=0:', ''.join((str(d3label[0, t, 1].item()), ', ', str(d3label[0, t, 0].item()), ', ', str(d3label[0, t, 2].item()))), epoch)
writer.add_text(f'Validation/env{0}/camera{camnum}/ground truth t=T:', ''.join((str(d3label[0, t + forecast_length - 1, 1].item()), ', ', str(d3label[0, t + forecast_length - 1, 0].item()), ', ', str(d3label[0, t + forecast_length - 1, 2].item()))), epoch)
if hasattr(forec_model, 'analyticPhysics') and hasattr(forec_model.analyticPhysics, 'g'):
writer.add_text('model parameters:', f'{forec_model.analyticPhysics.g.item()}', epoch)
elif hasattr(forec_model, 'torchdynnets') and hasattr(forec_model.torchdynnets[0].vf.vf, 'g'):
writer.add_text('model parameters:', f'{forec_model.torchdynnets[0].vf.vf.g.item()}', epoch)
if hasattr(forec_model, 'analyticPhysics') and hasattr(forec_model.analyticPhysics, 'k'):
writer.add_text('model parameters:', f'{forec_model.analyticPhysics.k.item()}', epoch)
elif hasattr(forec_model, 'dynnet') and hasattr(forec_model.torchdynnets[0].vf.vf, 'k'):
writer.add_text('model parameters:', f'{forec_model.torchdynnets[0].vf.vf.k.item()}', epoch)
vmin = 0
vmax = imheatmaps.max().item()
fig = plt.figure()
img = eo.rearrange(images_0, '(b t) c h w -> b t c h w', t=3)
img = denorm({'image': img[0, 1]})['image'].cpu().numpy()
img = eo.rearrange(img, 'c h w -> h w c')
heatmap = eo.rearrange(heatmap, '(b t) c h w -> b t c h w', t=3)[0, 1].squeeze(0)
heatmap = heatmap.cpu().numpy()
plt.title(''.join((str(coords_0[0, 0].cpu().numpy().item()), ', ', str(coords_0[0, 1].cpu().numpy().item()))))
plt.imshow(img.astype(np.uint8))
plt.imshow(heatmap, cmap=plt.cm.viridis, alpha=0.65, vmin=heatmap.min(), vmax=heatmap.max())
writer.add_figure('validation heatmap coord_model t=0', fig, epoch)
plt.close()
fig = plt.figure()
img = eo.rearrange(images_0, '(b t) c h w -> b t c h w', t=3)
img = denorm({'image': img[0, 1]})['image'].cpu().numpy()
img = eo.rearrange(img, 'c h w -> h w c')
gt_coord = imlabels[0, 0]
H, W, __ = img.shape
joint = (gt_coord[0].cpu().numpy().item(), gt_coord[1].cpu().numpy().item(), 1.0) | heatmap = create_heatmaps(joint, (H, W)).squeeze(0) | 0 | 2023-11-30 12:08:47+00:00 | 12k |
aliyun/pai-python-sdk | pai/predictor.py | [
{
"identifier": "FrameworkTypes",
"path": "pai/common/consts.py",
"snippet": "class FrameworkTypes(object):\n PyTorch = \"PyTorch\"\n TFLite = \"TFLite\"\n Keras = \"Keras\"\n Caffe = \"Caffe\"\n Blade = \"Blade\"\n Alink = \"Alink\"\n TensorFlow = \"TensorFlow\""
},
{
"identifier": "ContainerRun",
"path": "pai/common/docker_utils.py",
"snippet": "class ContainerRun(object):\n \"\"\"A class represent a container run in local.\"\"\"\n\n CONTAINER_STATUS_RUNNING = \"running\"\n CONTAINER_STATUS_EXITED = \"exited\"\n CONTAINER_STATUS_PAUSED = \"paused\"\n\n def __init__(self, container, port: Optional[int] = None):\n \"\"\"Initialize a container run.\n\n Args:\n container: A docker container object.\n port (int): The host port that container is exposed to.\n\n \"\"\"\n self.container = container\n self.port = port\n\n @property\n def status(self):\n self.container.reload()\n return self.container.status\n\n def is_running(self):\n \"\"\"Return True if container is running, otherwise False.\"\"\"\n return self.status == self.CONTAINER_STATUS_RUNNING\n\n def is_terminated(self):\n \"\"\"Return True if container is terminated, otherwise False.\"\"\"\n return self.status in [\n self.CONTAINER_STATUS_EXITED,\n self.CONTAINER_STATUS_PAUSED,\n ]\n\n def is_succeeded(self):\n \"\"\"Return True if container is succeeded, otherwise False.\"\"\"\n return (\n self.status == \"exited\" and self.container.attrs[\"State\"][\"ExitCode\"] == 0\n )\n\n def wait_for_ready(self, interval=5):\n \"\"\"Wait until container enter running state or terminated state.\"\"\"\n while True:\n status = self.status\n if status == self.CONTAINER_STATUS_RUNNING:\n break\n elif status in [self.CONTAINER_STATUS_EXITED, self.CONTAINER_STATUS_PAUSED]:\n raise RuntimeError(\n \"Container is terminated : id={} status={}\".format(\n self.container.id, self.container.status\n )\n )\n time.sleep(interval)\n\n def stop(self):\n if self.is_running():\n self.container.stop()\n\n def start(self):\n if not self.is_running():\n self.container.start()\n\n def delete(self):\n if self.is_running():\n self.container.stop()\n self.container.remove()\n\n def watch(self, show_logs: bool = True):\n \"\"\"Watch container log and wait for container to exit.\"\"\"\n if not show_logs:\n self.container.wait()\n else:\n log_iter = self.container.logs(\n stream=True,\n follow=True,\n )\n for log in log_iter:\n print(log.decode())\n\n self.container.reload()\n exit_code = self.container.attrs[\"State\"][\"ExitCode\"]\n if exit_code != 0:\n raise RuntimeError(\n \"Container run exited failed: exit_code={}\".format(exit_code)\n )"
},
{
"identifier": "http_user_agent",
"path": "pai/common/utils.py",
"snippet": "def http_user_agent(user_agent: Optional[Union[Dict, str]] = None) -> str:\n \"\"\"Generate HTTP User-Agent that represents current client.\"\"\"\n ua = f\"pai-python-sdk/{VERSION}; python/{sys.version.split()[0]}\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua"
},
{
"identifier": "PredictionException",
"path": "pai/exception.py",
"snippet": "class PredictionException(PAIException):\n def __init__(self, code, message):\n super(PredictionException, self).__init__(message)\n self.code = code\n self.message = message\n\n def __repr__(self):\n return \"{}: code={}, {}\".format(type(self).__name__, self.code, self.message)"
},
{
"identifier": "JsonSerializer",
"path": "pai/serializers.py",
"snippet": "class JsonSerializer(SerializerBase):\n \"\"\"A Serializer object that serialize input data into JSON format and deserialize\n JSON formatted data into python object.\"\"\"\n\n def serialize(self, data) -> bytes:\n if isinstance(data, six.string_types):\n return data\n\n if _is_pandas_dataframe(data):\n data = data.to_numpy().tolist()\n elif _is_numpy_ndarray(data):\n data = data.tolist()\n return json.dumps(data).encode()\n\n def deserialize(self, data):\n return json.loads(data)"
},
{
"identifier": "PyTorchSerializer",
"path": "pai/serializers.py",
"snippet": "class PyTorchSerializer(SerializerBase):\n \"\"\"A serializer responsible for transforming input/output data for PyTorch\n processor service.\n\n \"\"\"\n\n NUMPY_DATA_TYPE_MAPPING = {\n \"DT_FLOAT\": np.float32,\n \"DT_DOUBLE\": np.float64,\n \"DT_INT8\": np.int8,\n \"DT_INT16\": np.int16,\n \"DT_INT32\": np.int32,\n \"DT_INT64\": np.int64,\n \"DT_UINT8\": np.uint8,\n \"DT_UINT16\": np.uint16,\n \"DT_BOOL\": np.bool_,\n \"DT_STRING\": np.str_,\n }\n\n def __init__(\n self,\n ):\n self._output_filter = []\n\n def _np_dtype_to_torch_dtype(self, np_dtype):\n \"\"\"Get PredictRequest data_type from dtype of input np.ndarray.\"\"\"\n rev_map = {value: key for key, value in self.NUMPY_DATA_TYPE_MAPPING.items()}\n if np_dtype not in rev_map:\n raise ValueError(\n f\"Numpy dtype {np_dtype} is not supported in PyTorchSerializer.\"\n )\n return pt_pb.ArrayDataType.Value(rev_map[np_dtype])\n\n def _torch_dtype_to_numpy_dtype(self, data_type):\n data_type_name = pt_pb.ArrayDataType.Name(data_type)\n if data_type_name not in self.NUMPY_DATA_TYPE_MAPPING:\n raise ValueError(\n f\"Data type {data_type_name} is not supported in PyTorchSerializer.\"\n )\n return self.NUMPY_DATA_TYPE_MAPPING.get(data_type_name)\n\n def serialize(self, data: Union[np.ndarray, List, Tuple]) -> bytes:\n request = pt_pb.PredictRequest()\n if _is_pil_image(data):\n data = np.asarray(data)\n elif isinstance(data, (bytes, str)):\n data = np.asarray(data)\n\n if isinstance(data, np.ndarray):\n # if input data type is np.ndarray, we assume there is only one input data\n # for the prediction request.\n self._put_value(\n request,\n index=0,\n shape=data.shape,\n data_type=self._np_dtype_to_torch_dtype(data.dtype.type),\n data=np.ravel(data).tolist(),\n )\n elif isinstance(data, (List, Tuple)):\n # if input data type is List or Tuple, we assume there is multi input data.\n # for the prediction request.\n for idx, item in enumerate(data):\n if not isinstance(item, np.ndarray):\n item = np.asarray(item)\n if not item:\n continue\n self._put_value(\n request,\n index=0,\n shape=item.shape,\n data_type=self._np_dtype_to_torch_dtype(item.dtype.type),\n data=np.ravel(item).tolist(),\n )\n else:\n raise ValueError(\n \"PyTorchSerializer accept List, Tuple as input request data.\"\n )\n return request.SerializeToString()\n\n def deserialize(self, data: bytes):\n resp = pt_pb.PredictResponse()\n resp.ParseFromString(data)\n if len(resp.outputs) > 1:\n results = []\n for idx in range(resp.outputs):\n results.append(self._get_value(resp, idx))\n return results\n elif len(resp.outputs) == 1:\n return self._get_value(resp, index=0)\n\n def _put_value(\n self, request: pt_pb.PredictRequest, index: int, shape, data_type, data\n ):\n while len(request.inputs) < index + 1:\n request.inputs.add()\n request.inputs[index].dtype = data_type\n request.inputs[index].array_shape.dim.extend(shape)\n if data_type == pt_pb.DT_FLOAT:\n request.inputs[index].float_val.extend(data)\n elif data_type == pt_pb.DT_DOUBLE:\n request.inputs[index].double_val.extend(data)\n elif data_type in (\n pt_pb.DT_INT8,\n pt_pb.DT_INT16,\n pt_pb.DT_INT32,\n pt_pb.DT_UINT8,\n ):\n request.inputs[index].int_val.extend(data)\n elif data_type == pt_pb.DT_INT64:\n request.inputs[index].int64_val.extend(data)\n else:\n raise ValueError(f\"Not supported PyTorch request data type: {data_type}\")\n\n def _get_value(self, response: pt_pb.PredictResponse, index: int):\n output = response.outputs[index]\n if output.dtype == pt_pb.DT_INVALID:\n return\n\n np_dtype = self._torch_dtype_to_numpy_dtype(output.dtype)\n shape = list(output.array_shape.dim)\n if output.dtype == pt_pb.DT_FLOAT:\n return np.asarray(output.float_val, np_dtype).reshape(shape)\n elif output.dtype in (\n pt_pb.DT_INT8,\n pt_pb.DT_INT16,\n pt_pb.DT_INT32,\n pt_pb.DT_UINT8,\n ):\n return np.asarray(output.int_val, np_dtype).reshape(shape)\n elif output.dtype == pt_pb.DT_INT64:\n return np.asarray(output.int64_val, np_dtype).reshape(shape)\n elif output.dtype == pt_pb.DT_DOUBLE:\n return np.asarray(output.double_val, np_dtype).reshape(shape)\n else:\n raise ValueError(\n f\"Not supported PyTorch response data type: {output.dtype}\"\n )"
},
{
"identifier": "SerializerBase",
"path": "pai/serializers.py",
"snippet": "class SerializerBase(ABC):\n \"\"\"Abstract class for creating a Serializer class for predictor.\"\"\"\n\n @abstractmethod\n def serialize(self, data) -> bytes:\n \"\"\"Serialize the input data to bytes for transmitting.\"\"\"\n\n @abstractmethod\n def deserialize(self, data: bytes):\n \"\"\"Deserialize the data from raw bytes to Python object .\"\"\"\n\n def inspect_from_service(\n self, service_name: str, *, session: Optional[Session] = None\n ):\n \"\"\"Inspect the online prediction service to complete the serializer instance\n initialization.\n\n The implementation of the `inspect_from_service` method is optional. You only\n need to implement it if your serializer requires additional information from\n service metadata or if it needs to send a request to the service in order to\n be initialized.\n\n \"\"\""
},
{
"identifier": "TensorFlowSerializer",
"path": "pai/serializers.py",
"snippet": "class TensorFlowSerializer(SerializerBase):\n \"\"\"A Serializer class that responsible for transforming input/output data for\n TensorFlow processor service.\"\"\"\n\n NUMPY_DATA_TYPE_MAPPING = {\n \"DT_FLOAT\": np.float32,\n \"DT_DOUBLE\": np.float64,\n \"DT_INT8\": np.int8,\n \"DT_INT16\": np.int16,\n \"DT_INT32\": np.int32,\n \"DT_INT64\": np.int64,\n \"DT_UINT8\": np.uint8,\n \"DT_UINT16\": np.uint16,\n \"DT_BOOL\": np.bool_,\n \"DT_STRING\": np.str_,\n }\n\n def __init__(\n self,\n ):\n \"\"\"TensorflowSerializer initializer.\"\"\"\n\n self._input_specs = []\n self._output_filter = []\n self._signature_name = None\n super(TensorFlowSerializer, self).__init__()\n\n def inspect_from_service(\n self, service_name: str, *, session: Optional[Session] = None\n ):\n \"\"\"Inspect the service to complete serializer instance initialization.\n\n Args:\n service_name (str): Name of the online prediction service.\n session (:class:`pai.session.Session`): A PAI session instance used for\n communicating with PAI services.\n\n \"\"\"\n session = session or get_default_session()\n sig_def = self.inspect_model_signature_def(service_name, session=session)\n self._init_from_signature_def(sig_def)\n\n @classmethod\n def inspect_model_signature_def(\n cls, service_name: str, *, session: Session = None\n ) -> Dict[str, Any]:\n \"\"\"Inspect the TensorFlow serving model signature by sending a request to\n the service.\n\n TensorFlow processor creates a prediction service and exposes an HTTP API for\n model signature definition.\n\n Example API returns::\n\n {\n \"signature_name\": \"serving_default\",\n \"inputs\": [\n {\n \"name\": \"flatten_input\",\n \"shape\": [\n -1,\n 28,\n 28\n ],\n \"type\": \"DT_FLOAT\"\n }\n ],\n \"outputs\": [\n {\n \"name\": \"dense_1\",\n \"shape\": [\n -1,\n 10\n ],\n \"type\": \"DT_FLOAT\"\n }\n ]\n }\n\n Returns:\n A dictionary that represents the model signature definition.\n\n \"\"\"\n from pai.predictor import ServiceStatus\n\n session = session or get_default_session()\n\n service_api_object = session.service_api.get(service_name)\n if service_api_object[\"Status\"] != ServiceStatus.Running:\n raise RuntimeError(\n f\"Service is not ready, cannot send request to the service to inspect \"\n f\"model signature definition: \"\n f\"name={service_api_object['ServiceName']} \"\n f\"status={service_api_object['Status']} \"\n f\"reason={service_api_object['Reason']} \"\n f\"message={service_api_object['Message']}.\"\n )\n\n @backoff.on_exception(\n backoff.expo,\n exception=HTTPError,\n max_tries=3,\n max_time=10,\n )\n def _send_request():\n request = urllib.request.Request(\n url=service_api_object[\"InternetEndpoint\"],\n headers={\n \"Authorization\": service_api_object[\"AccessToken\"],\n },\n )\n resp = urllib.request.urlopen(request)\n return resp\n\n resp = _send_request()\n signature_def = json.load(resp)\n return signature_def\n\n def serialize(self, data: Union[Dict[str, Any], tf_pb.PredictRequest]) -> bytes:\n\n if isinstance(data, tf_pb.PredictRequest):\n return data.SerializeToString()\n\n request = tf_pb.PredictRequest()\n if self._output_filter:\n for output_name in self._output_filter:\n request.output_filter.append(output_name)\n\n if not isinstance(data, dict):\n if not self._input_specs or len(self._input_specs) > 1:\n raise ValueError(\n \"TensorFlowSerializer accepts a dictionary as input data, \"\n \"with each input value having a name.\"\n )\n else:\n # TensorFlow Processor expects key-value pairs for input data. However,\n # if the input data is not a dictionary and the deployed model accepts\n # exactly one input (by model signature), the key will be inferred\n # from model signature and the current input data will be taken as\n # value.\n value = numpy.asarray(data)\n input_spec = self._input_specs[0]\n if (\n input_spec.shape\n and len([dim for dim in input_spec.shape if dim == -1]) == 1\n ):\n value = value.reshape(input_spec.shape)\n data_type = (\n input_spec.data_type\n if input_spec and input_spec.data_type is not None\n else self._np_dtype_to_tf_dtype(value.dtype.type)\n )\n self._put_value(\n request=request,\n name=input_spec.name,\n data_type=data_type,\n shape=value.shape,\n data=np.ravel(value).tolist(),\n )\n else:\n input_specs_dict = (\n {input_spec.name: input_spec for input_spec in self._input_specs}\n if self._input_specs\n else {}\n )\n for name, value in data.items():\n input_spec = input_specs_dict.get(name)\n if not isinstance(value, np.ndarray):\n value = np.asarray(value)\n data_type = (\n input_spec.data_type\n if input_spec and input_spec.data_type is not None\n else self._np_dtype_to_tf_dtype(value.dtype.type)\n )\n\n if (\n input_spec\n and input_spec.shape\n and len([dim for dim in input_spec.shape if dim == -1]) == 1\n ):\n value = value.reshape(input_spec.shape)\n self._put_value(\n request=request,\n name=input_spec.name,\n data_type=data_type,\n shape=value.shape,\n data=np.ravel(value).tolist(),\n )\n\n return request.SerializeToString()\n\n def _init_from_signature_def(self, signature_def):\n \"\"\"Build TensorFlowSerializer from signature def.\n\n Args:\n signature_def: Signature def returns from PAI-EAS tensorflow processor.\n\n Returns:\n TensorFlowSerializer:\n \"\"\"\n inputs = signature_def[\"inputs\"]\n signature_def_key = signature_def[\"signature_name\"]\n input_specs = []\n output_specs = []\n for input_def in inputs:\n data_type = tf_pb.ArrayDataType.Value(input_def[\"type\"])\n input_spec = TensorFlowIOSpec(\n name=input_def[\"name\"],\n data_type=data_type,\n # use batch_size=1\n shape=input_def[\"shape\"][1:],\n )\n input_specs.append(input_spec)\n\n for output_def in signature_def[\"outputs\"]:\n data_type = tf_pb.ArrayDataType.Value(output_def[\"type\"])\n output_spec = TensorFlowIOSpec(\n name=output_def[\"name\"],\n data_type=data_type,\n shape=output_def[\"shape\"],\n )\n output_specs.append(output_spec)\n\n if not self._signature_name:\n self._signature_name = signature_def_key\n\n if not self._input_specs:\n self._input_specs = input_specs\n if not self._output_filter:\n self._output_filter = [spec.name for spec in output_specs]\n\n def deserialize(self, data: bytes):\n response = tf_pb.PredictResponse()\n response.ParseFromString(data)\n output_names = response.outputs.keys()\n results = {}\n for name in output_names:\n results[name] = self._get_value(\n response=response,\n name=name,\n )\n return results\n\n def _np_dtype_to_tf_dtype(self, np_dtype):\n rev_map = {value: key for key, value in self.NUMPY_DATA_TYPE_MAPPING.items()}\n if np_dtype not in rev_map:\n raise ValueError(\n f\"Numpy dtype {np_dtype} is not supported in TensorFlowSerializer.\"\n )\n\n return tf_pb.ArrayDataType.Value(rev_map[np_dtype])\n\n def _tf_dtype_to_np_dtype(self, data_type):\n data_type_name = tf_pb.ArrayDataType.Name(data_type)\n if data_type_name not in self.NUMPY_DATA_TYPE_MAPPING:\n raise ValueError(\n f\"Data type {data_type_name} is not supported in TensorFlowSerializer.\"\n )\n return self.NUMPY_DATA_TYPE_MAPPING.get(data_type_name)\n\n def _put_value(\n self, request: tf_pb.PredictRequest, name: str, data_type, shape, data\n ):\n request.inputs[name].dtype = data_type\n request.inputs[name].array_shape.dim.extend(shape)\n\n integer_types = [\n tf_pb.DT_INT8,\n tf_pb.DT_INT16,\n tf_pb.DT_INT32,\n tf_pb.DT_UINT8,\n tf_pb.DT_UINT16,\n tf_pb.DT_QINT8,\n tf_pb.DT_QINT16,\n tf_pb.DT_QINT32,\n tf_pb.DT_QUINT8,\n tf_pb.DT_QUINT16,\n ]\n if data_type == tf_pb.DT_FLOAT:\n request.inputs[name].float_val.extend(data)\n elif data_type == tf_pb.DT_DOUBLE:\n request.inputs[name].double_val.extend(data)\n elif data_type in integer_types:\n request.inputs[name].int_val.extend(data)\n elif data_type == tf_pb.DT_INT64:\n request.inputs[name].int64_val.extend(data)\n elif data_type == tf_pb.DT_BOOL:\n request.inputs[name].bool_val.extend(data)\n elif data_type == tf_pb.DT_STRING:\n request.inputs[name].string_val.extend(data)\n else:\n raise ValueError(\n f\"Not supported input data type for TensorFlow PredictRequest: {data_type}\"\n )\n\n def _get_value(self, response: tf_pb.PredictResponse, name):\n output = response.outputs[name]\n\n if (\n name not in response.outputs\n or tf_pb.DT_INVALID == response.outputs[name].dtype\n ):\n return\n np_dtype = self._tf_dtype_to_np_dtype(response.outputs[name].dtype)\n shape = list(output.array_shape.dim)\n\n if output.dtype == tf_pb.DT_FLOAT:\n return np.asarray(output.float_val, np_dtype).reshape(shape)\n elif output.dtype in (tf_pb.DT_INT8, tf_pb.DT_INT16, tf_pb.DT_INT32):\n return np.asarray(output.int_val, np_dtype).reshape(shape)\n elif output.dtype == tf_pb.DT_INT64:\n return np.asarray(output.int64_val, np_dtype).reshape(shape)\n elif output.dtype == tf_pb.DT_DOUBLE:\n return np.asarray(output.double_val, np_dtype).reshape(shape)\n elif output.dtype == tf_pb.DT_STRING:\n return np.asarray(output.string_val, np_dtype).reshape(shape)\n elif output.dtype == tf_pb.DT_BOOL:\n return np.asarray(output.bool_val, np_dtype).reshape(shape)\n else:\n raise ValueError(f\"Not support data_type: {output.dtype}\")"
},
{
"identifier": "Session",
"path": "pai/session.py",
"snippet": "class Session(ResourceAPIsContainerMixin):\n \"\"\"A class responsible for communicating with PAI services.\"\"\"\n\n def __init__(\n self,\n region_id: str,\n workspace_id: Optional[str] = None,\n credential_config: Optional[CredentialConfig] = None,\n oss_bucket_name: Optional[str] = None,\n oss_endpoint: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"PAI Session Initializer.\n\n Args:\n credential_config (:class:`alibabacloud_credentials.models.Config`, optional):\n The credential config used to access the Alibaba Cloud.\n region_id (str): The ID of the Alibaba Cloud region where the service\n is located.\n workspace_id (str, optional): ID of the workspace used in the default\n session.\n oss_bucket_name (str, optional): The name of the OSS bucket used in the\n session.\n oss_endpoint (str, optional): The endpoint for the OSS bucket.\n \"\"\"\n\n if not region_id:\n raise ValueError(\"Region ID must be provided.\")\n\n self._credential_config = credential_config\n self._region_id = region_id\n self._workspace_id = workspace_id\n self._oss_bucket_name = oss_bucket_name\n self._oss_endpoint = oss_endpoint\n\n header = kwargs.pop(\"header\", None)\n super(Session, self).__init__(header=header)\n\n @property\n def region_id(self) -> str:\n return self._region_id\n\n @property\n def is_inner(self) -> bool:\n return self._region_id in INNER_REGION_IDS\n\n @property\n def oss_bucket_name(self) -> str:\n return self._oss_bucket_name\n\n @property\n def oss_endpoint(self) -> str:\n return self._oss_endpoint\n\n @property\n def credential_config(self) -> CredentialConfig:\n return self._credential_config\n\n @property\n def workspace_name(self):\n if hasattr(self, \"_workspace_name\") and self._workspace_name:\n return self._workspace_name\n\n if not self._workspace_id:\n raise ValueError(\"Workspace id is not set.\")\n workspace_api_obj = self.workspace_api.get(workspace_id=self._workspace_id)\n self._workspace_name = workspace_api_obj[\"WorkspaceName\"]\n return self._workspace_name\n\n @property\n def provider(self) -> str:\n caller_identity = self._acs_sts_client.get_caller_identity().body\n return caller_identity.account_id\n\n @property\n def workspace_id(self) -> str:\n \"\"\"ID of the workspace used by the session.\"\"\"\n return self._workspace_id\n\n @property\n def console_uri(self) -> str:\n \"\"\"The web console URI for PAI service.\"\"\"\n if self.is_inner:\n return \"https://pai-next.alibaba-inc.com\"\n else:\n return \"https://pai.console.aliyun.com/console\"\n\n def _init_oss_config(\n self,\n ):\n \"\"\"Initialize a OssConfig instance.\"\"\"\n if not self._oss_bucket_name:\n # If OSS bucket name is not provided, use the default OSS storage URI\n # that is configured for the workspace.\n default_oss_uri = self.workspace_api.get_default_storage_uri(\n self.workspace_id\n )\n if not default_oss_uri:\n raise RuntimeError(\n \"No default OSS URI is configured for the workspace.\"\n )\n oss_uri_obj = OssUriObj(default_oss_uri)\n self._oss_bucket_name = oss_uri_obj.bucket_name\n\n if not self._oss_endpoint:\n self._oss_endpoint = self._get_default_oss_endpoint()\n\n def _get_oss_auth(self):\n auth = oss2.ProviderAuth(\n credentials_provider=CredentialProviderWrapper(\n config=self._credential_config,\n )\n )\n return auth\n\n @property\n def oss_bucket(self):\n \"\"\"A OSS2 bucket instance used by the session.\"\"\"\n if not self._oss_bucket_name or not self._oss_endpoint:\n self._init_oss_config()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=self._oss_endpoint,\n bucket_name=self._oss_bucket_name,\n )\n return oss_bucket\n\n def save_config(self, config_path=None):\n \"\"\"Save the configuration of the session to a local file.\"\"\"\n attrs = {key.lstrip(\"_\"): value for key, value in vars(self).items()}\n config = {\n key: value\n for key, value in attrs.items()\n if key in _DEFAULT_CONFIG_KEYS and value is not None\n }\n\n config_path = config_path or DEFAULT_CONFIG_PATH\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n with open(config_path, \"w\") as f:\n f.write(json.dumps(config, indent=4))\n logger.info(\"Write PAI config succeed: config_path=%s\" % config_path)\n\n def patch_oss_endpoint(self, oss_uri: str):\n oss_uri_obj = OssUriObj(oss_uri)\n if oss_uri_obj.endpoint:\n return oss_uri\n\n # patch endpoint using current OSS bucket endpoint.\n endpoint = self.oss_bucket.endpoint\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n return \"oss://{bucket_name}.{endpoint}/{key}\".format(\n bucket_name=oss_uri_obj.bucket_name,\n endpoint=endpoint,\n key=oss_uri_obj.object_key,\n )\n\n def _get_default_oss_endpoint(self) -> str:\n \"\"\"Returns a default OSS endpoint.\"\"\"\n\n # OSS Endpoint document:\n # https://help.aliyun.com/document_detail/31837.html\n internet_endpoint = \"oss-{}.aliyuncs.com\".format(self.region_id)\n internal_endpoint = \"oss-{}-internal.aliyuncs.com\".format(self.region_id)\n\n return (\n internet_endpoint\n if is_domain_connectable(internal_endpoint)\n else internet_endpoint\n )\n\n def get_oss_bucket(self, bucket_name: str, endpoint: str = None) -> oss2.Bucket:\n \"\"\"Get a OSS bucket using the credentials of the session.\n\n Args:\n bucket_name (str): The name of the bucket.\n endpoint (str): Endpoint of the bucket.\n\n Returns:\n :class:`oss2.Bucket`: A OSS bucket instance.\n\n \"\"\"\n endpoint = endpoint or self._oss_endpoint or self._get_default_oss_endpoint()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=endpoint,\n bucket_name=bucket_name,\n )\n return oss_bucket\n\n @classmethod\n def get_storage_path_by_category(\n cls, category: str, dir_name: Optional[str] = None\n ) -> str:\n \"\"\"Get an OSS storage path for the resource.\n\n Args:\n category (str): The category of the resource.\n dir_name (str, optional): The directory name of the resource.\n\n Returns:\n str: A OSS storage path.\n\n \"\"\"\n dir_name = dir_name or datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n storage_path = posixpath.join(\"pai\", category, dir_name).strip()\n\n if not storage_path.endswith(\"/\"):\n storage_path += \"/\"\n return storage_path\n\n def is_supported_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n return bool(machine_spec)\n\n def is_gpu_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n if not machine_spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for training job. \"\n \"Please provide a supported instance type.\"\n )\n return machine_spec[\"AcceleratorType\"] == \"GPU\"\n\n def is_supported_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n return bool(spec)\n\n def is_gpu_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n\n if not spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for deploying. \"\n \"Please provide a supported instance type.\"\n )\n return bool(spec[\"GPU\"])"
},
{
"identifier": "get_default_session",
"path": "pai/session.py",
"snippet": "def get_default_session() -> \"Session\":\n \"\"\"Get the default session used by the program.\n\n If the global default session is set, the function will try to initialize\n a session from config file.\n\n Returns:\n :class:`pai.session.Session`: The default session.\n\n \"\"\"\n global _default_session\n if not _default_session:\n config = load_default_config_file()\n if not config:\n return\n _default_session = Session(**config)\n return _default_session"
}
] | import asyncio
import base64
import functools
import json
import logging
import posixpath
import time
import aiohttp
import requests
import docker
from abc import ABC, abstractmethod
from concurrent.futures import Future, ThreadPoolExecutor
from io import IOBase
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from urllib.parse import urlencode
from .common.consts import FrameworkTypes
from .common.docker_utils import ContainerRun
from .common.utils import http_user_agent
from .exception import PredictionException
from .serializers import (
JsonSerializer,
PyTorchSerializer,
SerializerBase,
TensorFlowSerializer,
)
from .session import Session, get_default_session
from pai.model import _BuiltinProcessor | 10,011 | **kwargs,
) -> AsyncTask:
"""Make a prediction with the online prediction service.
Args:
data (Any): Input data to be sent to the prediction service. If it is a
file-like object, bytes, or string, it will be sent as the request body.
Otherwise, it will be treated as a JSON serializable object and sent as
JSON.
callback (Union[Callable, List[Callable]], optional): A Callback function,
or a list of callback functions used to process the prediction result.
path (str, optional): Path for the request to be sent to. If it is provided,
it will be appended to the endpoint URL (Default None).
headers (dict, optional): Request headers.
method (str, optional): Request method, default to 'POST'.
**kwargs: Additional keyword arguments for the request.
Returns:
AsyncTask: The task object that can be used to retrieve the prediction
result.
Examples:
from pai.predictor import AsyncPredictor, AsyncTask
predictor = AsyncPredictor()
task: AsyncTask = predictor.raw_predict(data="YourPredictionData")
print(task.result())
"""
future = self.executor.submit(
self._raw_predict_fn, data, method, path, headers, **kwargs
)
cbs = [callback] if isinstance(callback, Callable) else callback
if cbs:
for cb in cbs:
future.add_done_callback(self._wrap_callback_fn(cb))
return AsyncTask(future=future)
async def raw_predict_async(
self,
data,
wait_config: WaitConfig = WaitConfig(),
method: str = "POST",
headers: Optional[Dict[str, str]] = None,
path: Optional[str] = None,
**kwargs,
) -> RawResponse:
"""Make a prediction with the online prediction service.
Args:
data (Any): Input data to be sent to the prediction service. If it is a
file-like object, bytes, or string, it will be sent as the request body.
Otherwise, it will be treated as a JSON serializable object and sent as
JSON.
wait_config (WaitConfig): A config object that controls the behavior of
polling the prediction result.
path (str, optional): Path for the request to be sent to. If it is provided,
it will be appended to the endpoint URL (Default None).
headers (dict, optional): Request headers.
method (str, optional): Request method, default to 'POST'.
**kwargs: Additional keyword arguments for the request.
Returns:
RawResponse: Prediction result.
"""
if self.service_status not in ServiceStatus.completed_status():
self.wait_for_ready()
json_data, data = self._handle_raw_input(data)
resp = await self._send_request_async(
data=data,
method=method,
json=json_data,
path=path,
headers=headers,
**kwargs,
)
request_id = await self._get_request_id_async(resp)
# Polling the prediction result.
status_code, headers, content = await self._poll_result_async(
request_id=request_id, wait_config=wait_config
)
return self._handle_raw_output(status_code, headers, content)
class LocalPredictor(PredictorBase):
"""Perform prediction to a local service running with docker."""
def __init__(
self,
port: int,
container_id: Optional[str] = None,
serializer: Optional[SerializerBase] = None,
):
"""LocalPredictor initializer.
Args:
port (int): The port of the local service.
container_id (str, optional): The container id of the local service.
serializer (SerializerBase, optional): A serializer object that transforms.
"""
self.container_id = container_id
self.port = port
self.serializer = serializer or JsonSerializer()
self._container_run = (
self._build_container_run(container_id, port=port)
if self.container_id
else None
)
@classmethod
def _build_container_run(cls, container_id, port):
try:
except ImportError:
raise ImportError("Please install docker first: pip install docker")
client = docker.from_env()
container = client.containers.get(container_id)
| # Copyright 2023 Alibaba, Inc. or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.getLogger(__name__)
_PAI_SERVICE_CONSOLE_URI_PATTERN = (
"https://pai.console.aliyun.com/?regionId={region_id}#"
"/eas/serviceDetail/{service_name}/detail"
)
_QUEUE_SERVICE_REQUEST_ID_HEADER = "X-Eas-Queueservice-Request-Id"
_QUEUE_SERVICE_SINK_PATH = "sink"
_DEFAULT_ASYNC_WORKER_COUNT = 30
class ServiceStatus(object):
"""All EAS inference service status."""
Running = "Running"
Waiting = "Waiting"
Scaling = "Scaling"
Stopped = "Stopped"
Failed = "Failed"
DeleteFailed = "DeleteFailed"
@classmethod
def completed_status(cls):
return [
cls.Running,
cls.Stopped,
cls.Failed,
cls.DeleteFailed,
]
class EndpointType(object):
# Public Internet Endpoint
INTERNET = "INTERNET"
# VPC Endpoint
INTRANET = "INTRANET"
class ServiceType(object):
Standard = "Standard"
Async = "Async"
class PredictorBase(ABC):
@abstractmethod
def predict(self, *args, **kwargs) -> Any:
"""Perform inference on the provided data and return prediction result."""
@abstractmethod
def raw_predict(
self,
data: Any = None,
path: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
method: str = "POST",
timeout: Optional[Union[float, Tuple[float, float]]] = None,
**kwargs,
):
pass
class RawResponse(object):
"""Response object returned by the predictor.raw_predict."""
def __init__(self, status_code: int, headers: Dict[str, str], content: bytes):
"""Initialize a RawResponse object.
Args:
status_code (int):
headers (dict):
content (bytes):
"""
self.status_code = status_code
self.headers = headers
self.content = content
def json(self):
"""Returns the json-encoded content of a response
Returns:
Dict[str, Any]: The json-encoded content of a response.
"""
return json.loads(self.content)
class _ServicePredictorMixin(object):
def __init__(
self,
service_name: str,
session: Optional[Session] = None,
endpoint_type: str = EndpointType.INTERNET,
serializer: Optional[SerializerBase] = None,
):
self.service_name = service_name
self.session = session or get_default_session()
self._service_api_object = self.describe_service()
self.endpoint_type = endpoint_type
self.serializer = serializer or self._get_default_serializer()
self._request_session = requests.Session()
def __repr__(self):
return "{}(service_name={}, endpoint_type={})".format(
type(self).__name__,
self.service_name,
self.endpoint_type,
)
def __del__(self):
self._request_session.close()
def refresh(self):
self._service_api_object = self.describe_service()
@property
def endpoint(self):
if self.endpoint_type == EndpointType.INTRANET:
return self._service_api_object["IntranetEndpoint"]
else:
return self._service_api_object["InternetEndpoint"]
@property
def intranet_endpoint(self):
return self._service_api_object["IntranetEndpoint"]
@property
def internet_endpoint(self):
return self._service_api_object["InternetEndpoint"]
@property
def service_status(self):
"""Returns the status of the service."""
return self._service_api_object["Status"]
@property
def access_token(self):
"""Access token of the service."""
return self._service_api_object["AccessToken"]
@property
def console_uri(self):
"""Returns the console URI of the service."""
return _PAI_SERVICE_CONSOLE_URI_PATTERN.format(
region_id=self.session.region_id,
service_name=self.service_name,
)
def _get_default_serializer(self):
"""Get default serializer for the predictor by inspecting the service config."""
service_config = json.loads(self._service_api_object["ServiceConfig"])
processor_code = service_config.get("processor")
# If the prediction service is serving with custom processor or custom
# container, use JsonSerializer as default serializer.
if not processor_code:
return JsonSerializer()
if processor_code in (
_BuiltinProcessor.PMML,
_BuiltinProcessor.XGBoost,
):
return JsonSerializer()
elif processor_code.startswith(FrameworkTypes.TensorFlow.lower()):
serializer = TensorFlowSerializer()
return serializer
elif processor_code.startswith(FrameworkTypes.PyTorch.lower()):
return PyTorchSerializer()
else:
return JsonSerializer()
def _post_init_serializer(self):
"""Post-initialize the serializer by invoking serializer.inspect_from_service"""
if not hasattr(self.serializer, "__post_init_serializer_flag") and hasattr(
self.serializer, "inspect_from_service"
):
self.serializer.inspect_from_service(
self.service_name, session=self.session
)
setattr(self.serializer, "__post_init_serializer_flag", 1)
def inspect_model_signature_def(self):
"""Get SignatureDef of the serving model.
.. note::
Only the service using the TensorFlow processor supports getting the
model signature_definition.
Returns:
Dict[str, Any]: A dictionary representing the signature definition of the
serving model.
"""
service_config = json.loads(self._service_api_object["ServiceConfig"])
processor_code = service_config.get("processor")
if processor_code and processor_code.startswith("tensorflow"):
return TensorFlowSerializer.inspect_model_signature_def(
self.service_name, session=self.session
)
raise RuntimeError(
"Only the online prediction service using the TensorFlow processor supports"
" getting the signature_definition"
)
def describe_service(self) -> Dict[str, Any]:
"""Describe the service that referred by the predictor.
Returns:
Dict[str, Any]: Response from PAI API service.
"""
return self.session.service_api.get(self.service_name)
def start_service(self, wait=True):
"""Start the stopped service."""
self.session.service_api.start(name=self.service_name)
if wait:
status = ServiceStatus.Running
unexpected_status = ServiceStatus.completed_status()
unexpected_status.remove(status)
type(self)._wait_for_status(
service_name=self.service_name,
status=status,
unexpected_status=unexpected_status,
session=self.session,
)
self.refresh()
def stop_service(self, wait=True):
"""Stop the running service."""
self.session.service_api.stop(name=self.service_name)
if wait:
status = ServiceStatus.Stopped
unexpected_status = ServiceStatus.completed_status()
unexpected_status.remove(status)
unexpected_status.remove(ServiceStatus.Running)
type(self)._wait_for_status(
service_name=self.service_name,
status=status,
unexpected_status=unexpected_status,
session=self.session,
)
self.refresh()
def delete_service(self):
"""Delete the service."""
self.session.service_api.delete(name=self.service_name)
def wait_for_ready(self, force: bool = False):
"""Wait until the service enter running status.
Args:
force (bool): Whether to force wait for ready.
Raises:
RuntimeError: Raise if the service terminated unexpectedly.
"""
if self.service_status == ServiceStatus.Running and not force:
return
logger.info(
"Service waiting for ready: service_name={}".format(self.service_name)
)
unexpected_status = ServiceStatus.completed_status()
unexpected_status.remove(ServiceStatus.Running)
type(self)._wait_for_status(
service_name=self.service_name,
status=ServiceStatus.Running,
unexpected_status=unexpected_status,
session=self.session,
)
# hack: PAI-EAS gateway may not be ready when the service is ready.
self._wait_for_gateway_ready()
self.refresh()
def _wait_for_gateway_ready(self, attempts: int = 30, interval: int = 2):
"""Hacky way to wait for the service gateway to be ready.
Args:
attempts (int): Number of attempts to wait for the service gateway to be
ready.
interval (int): Interval between each attempt.
"""
def _is_gateway_not_ready(resp: requests.Response):
return resp.status_code == 503 and resp.content == b"no healthy upstream"
err_count_threshold = 3
err_count = 0
while attempts > 0:
attempts -= 1
try:
# Send a probe request to the service.
resp = self._send_request(method="GET")
if not _is_gateway_not_ready(resp):
logger.info("Gateway for the service is ready.")
break
except requests.exceptions.RequestException as e:
err_count += 1
if err_count >= err_count_threshold:
logger.warning("Failed to check gateway status: %s", e)
break
time.sleep(interval)
else:
logger.warning("Timeout waiting for gateway to be ready.")
@classmethod
def _wait_for_status(
cls,
service_name: str,
status: str,
unexpected_status: List[str],
interval: int = 3,
session: Optional[Session] = None,
):
session = session or get_default_session()
service_api_object = session.service_api.get(service_name)
last_status = service_api_object["Status"]
last_msg = service_api_object["Message"]
time.sleep(interval)
while True:
service_api_object = session.service_api.get(service_name)
# Check the service status
cur_status = service_api_object["Status"]
if cur_status == status:
return status
elif unexpected_status and cur_status in unexpected_status:
# Unexpected terminated status
raise RuntimeError(
f"The Service terminated unexpectedly: "
f"name={service_api_object['ServiceName']} "
f"status={service_api_object['Status']} "
f"reason={service_api_object['Reason']} "
f"message={service_api_object['Message']}."
)
elif (
last_status == cur_status and service_api_object["Message"] == last_msg
) and cur_status != ServiceStatus.Waiting:
# If service.status and service.message have not changed and
# service.status is not 'Waiting', do not print the service
# status/message.
pass
else:
logger.info(
f"Refresh Service status: "
f"name={service_api_object['ServiceName']} "
f"id={service_api_object['ServiceId']} "
f"status={service_api_object['Status']} "
f"reason={service_api_object['Reason']} "
f"message={service_api_object['Message']}."
)
last_status = service_api_object["Status"]
last_msg = service_api_object["Message"]
time.sleep(interval)
def switch_version(self, version: int):
"""Switch service to target version.
Args:
version (int): Target version
"""
service_api_object = self.describe_service()
current_version = service_api_object["CurrentVersion"]
latest_version = service_api_object["LatestVersion"]
if current_version == version:
raise ValueError("Target version equals to current version.")
if version > latest_version:
raise ValueError("Target version greater than latest version.")
self.session.service_api.update_version(self.service_name, version=version)
@classmethod
def deploy(
cls,
config: Dict[str, Any],
session: Optional[Session] = None,
endpoint_type: str = EndpointType.INTERNET,
serializer: Optional[SerializerBase] = None,
wait: bool = True,
) -> PredictorBase:
"""Deploy an online prediction service using given configuration.
Args:
config (Dict[str, Any]): A dictionary of service configuration.
session (:class:`pai.session.Session`, optional): An optional
session object. If not provided, a default session will be used.
serializer: An optional serializer object. If not provided, a
default serializer will be used.
endpoint_type: The type of endpoint to use.
wait: Whether to wait for the service to be ready before returning.
Returns:
:class:`pai.predictor.PredictorBase`: A Predictor object for the deployed
online prediction service.
"""
session = session or get_default_session()
name = session.service_api.create(config=config)
if wait:
# Wait until the service is ready
unexpected_status = ServiceStatus.completed_status()
unexpected_status.remove(ServiceStatus.Running)
Predictor._wait_for_status(
service_name=name,
status=ServiceStatus.Running,
unexpected_status=unexpected_status,
session=session,
)
service_api_obj = session.service_api.get(name)
if service_api_obj["ServiceType"] == ServiceType.Async:
p = AsyncPredictor(
service_name=name,
endpoint_type=endpoint_type,
serializer=serializer,
)
else:
p = Predictor(
service_name=name,
endpoint_type=endpoint_type,
serializer=serializer,
)
return p
def _build_url(
self, path: Optional[str] = None, params: Dict[str, str] = None
) -> str:
url = self.endpoint
if path:
if path.startswith("/"):
path = path[1:]
url = posixpath.join(url, path)
# Add params to URL
url = url + "?" + urlencode(params) if params else url
return url
def _build_headers(self, headers: Dict[str, str] = None) -> Dict[str, str]:
headers = headers or dict()
headers["Authorization"] = self.access_token
headers["User-Agent"] = http_user_agent(headers.get("User-Agent"))
return headers
def _handle_input(self, data):
return self.serializer.serialize(data) if self.serializer else data
def _handle_output(self, content: bytes):
return self.serializer.deserialize(content) if self.serializer else content
def _handle_raw_input(self, data):
if isinstance(data, (IOBase, bytes, str)):
# if data is a file-like object, bytes, or string, it will be sent as
# request body
json_data, data = None, data
else:
# otherwise, it will be treated as a JSON serializable object and sent as
# JSON.
json_data, data = data, None
return json_data, data
def _handle_raw_output(self, status_code: int, headers: dict, content: bytes):
return RawResponse(status_code, headers, content)
def _send_request(
self,
data=None,
path=None,
method="POST",
json=None,
headers=None,
params=None,
**kwargs,
):
url = self._build_url(path)
resp = self._request_session.request(
url=url,
json=json,
data=data,
headers=self._build_headers(headers),
method=method,
params=params,
**kwargs,
)
return resp
async def _send_request_async(
self,
data=None,
path=None,
method="POST",
json=None,
headers=None,
params=None,
**kwargs,
):
url = self._build_url(path=path, params=params)
headers = self._build_headers(headers)
async with aiohttp.ClientSession() as session:
return await session.request(
method=method,
url=url,
headers=headers,
data=data,
json=json,
**kwargs,
)
class Predictor(PredictorBase, _ServicePredictorMixin):
"""Predictor is responsible for making prediction to an online service.
The `predictor.predict` method sends the input data to the online prediction service
and returns the prediction result. The serializer object of the predictor is
responsible for data transformation when the `predict` method is invoked. The input
data is serialized using the `serializer.serialize` method before it is sent, and
the response is deserialized using the `serializer.deserialize` method before the
prediction result returns.
Examples::
# Initialize a predictor object from an existing service using PyTorch
# processor.
torch_predictor = Predictor(service_name="example_torch_service")
result = torch_predictor.predict(numpy.asarray([[22,33,44], [19,22,33]]))
assert isinstance(result, numpy.ndarray)
"""
def __init__(
self,
service_name: str,
endpoint_type: str = EndpointType.INTERNET,
serializer: Optional[SerializerBase] = None,
session: Optional[Session] = None,
):
"""Construct a `Predictor` object using an existing prediction service.
Args:
service_name (str): Name of the existing prediction service.
endpoint_type (str): Selects the endpoint used by the predictor, which
should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type
means that the predictor calls the service over a public endpoint, while
the `INTRANET` endpoint type is over a VPC endpoint.
serializer (SerializerBase, optional): A serializer object that transforms
the input Python object for data transmission and deserialize the
response data to Python object.
session (Session, optional): A PAI session object used for communicating
with PAI service.
"""
super(Predictor, self).__init__(
service_name=service_name,
session=session or get_default_session(),
endpoint_type=endpoint_type,
serializer=serializer,
)
self._check()
def _check(self):
config = json.loads(self._service_api_object["ServiceConfig"])
if config.get("metadata", {}).get("type") == ServiceType.Async:
logger.warning(
"Predictor is not recommended to make prediction to a async"
" prediction service."
)
def predict(self, data):
"""Make a prediction with the online prediction service.
The serializer object for the predictor is responsible for data transformation
when the 'predict' method is invoked. The input data is serialized using the
`serializer.serialize` method before it is sent, and the response is
deserialized using the `serializer.deserialize` method before the prediction
result returns.
Args:
data: The input data for the prediction. It will be serialized using the
serializer of the predictor before transmitted to the prediction
service.
Returns:
object: Prediction result.
Raises:
PredictionException: Raise if status code of the prediction response does
not equal 2xx.
"""
self._post_init_serializer()
data = self._handle_input(data)
resp = self._send_request(
data,
)
if resp.status_code // 100 != 2:
raise PredictionException(resp.status_code, resp.content)
return self._handle_output(
resp.content,
)
def raw_predict(
self,
data: Any = None,
path: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
method: str = "POST",
timeout: Optional[Union[float, Tuple[float, float]]] = None,
**kwargs,
) -> RawResponse:
"""Make a prediction with the online prediction service.
Args:
data (Any): Input data to be sent to the prediction service. If it is a
file-like object, bytes, or string, it will be sent as the request body.
Otherwise, it will be treated as a JSON serializable object and sent as
JSON.
path (str, optional): Path for the request to be sent to. If it is provided,
it will be appended to the endpoint URL (Default None).
headers (dict, optional): Request headers.
method (str, optional): Request method, default to 'POST'.
timeout(float, tuple(float, float), optional): Timeout setting for the
request (Default 10).
**kwargs: Additional keyword arguments for the request.
Returns:
RawResponse: Prediction response from the service.
Raises:
PredictionException: Raise if status code of the prediction response does
not equal 2xx.
"""
json_data, data = self._handle_raw_input(data)
resp = self._send_request(
data=data,
json=json_data,
method=method,
path=path,
headers=headers,
timeout=timeout,
**kwargs,
)
if resp.status_code // 100 != 2:
raise PredictionException(resp.status_code, resp.content)
resp = RawResponse(
status_code=resp.status_code,
content=resp.content,
headers=dict(resp.headers),
)
return resp
class WaitConfig(object):
"""WaitConfig is used to set polling configurations for waiting for asynchronous
requests to complete."""
def __init__(self, max_attempts: int = 0, interval: int = 5):
if interval <= 0:
raise ValueError("interval must be positive integer.")
self.max_attempts = max_attempts
self.interval = interval
class AsyncTask(object):
"""AsyncTask is a wrapper class for `concurrent.futures.Future` object that represents
a prediction call submitted to an async prediction service.
"""
def __init__(
self,
future: Future,
):
self.future = future
super(AsyncTask, self).__init__()
def result(self, timeout: Optional[float] = None):
"""
Returns the prediction result of the call.
Args:
timeout (float, optional): Timeout setting (Default None).
Returns:
The result of the prediction call.
"""
return self.future.result(timeout=timeout)
def done(self):
return self.future.done()
def exception(self, timeout: Optional[float] = None) -> Optional[Exception]:
return self.future.exception()
def running(self):
return self.future.running()
def cancel(self):
return self.future.cancel()
def cancelled(self):
return self.future.cancelled()
class AsyncPredictor(PredictorBase, _ServicePredictorMixin):
"""A class that facilitates making predictions to asynchronous prediction service.
Examples::
# Initialize an AsyncPredictor object using the name of a running service.
async_predictor = AsyncPredictor(service_name="example_service")
# Make a prediction with the service and get the prediction result.
resp = async_predictor.predict(data="YourPredictionData")
result = resp.wait()
# Make a prediction with async API.
import asyncio
result = asyncio.run(async_predictor.predict_async(data="YourPredictionData"))
"""
def __init__(
self,
service_name: str,
max_workers: Optional[int] = None,
endpoint_type: str = EndpointType.INTERNET,
serializer: Optional[SerializerBase] = None,
session: Optional[Session] = None,
):
"""Construct a `AsyncPredictor` object using an existing async prediction service.
Args:
service_name (str): Name of the existing prediction service.
max_workers (int): The maximum number of threads that can be used to
execute the given prediction calls.
endpoint_type (str): Selects the endpoint used by the predictor, which
should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type
means that the predictor calls the service over a public endpoint, while
the `INTRANET` endpoint type is over a VPC endpoint.
serializer (SerializerBase, optional): A serializer object that transforms
the input Python object for data transmission and deserialize the
response data to Python object.
session (Session, optional): A PAI session object used for communicating
with PAI service.
"""
super(AsyncPredictor, self).__init__(
service_name=service_name,
session=session or get_default_session(),
endpoint_type=endpoint_type,
serializer=serializer,
)
self._max_workers = max_workers
self.executor = ThreadPoolExecutor(max_workers=self._max_workers)
self._check()
@property
def max_workers(self):
return self._max_workers
@max_workers.setter
def max_workers(self, n: int):
if hasattr(self, "executor"):
logger.info("Waiting for all submitted tasks in the queue to complete...")
self.executor.shutdown()
self._max_workers = n
self.executor = ThreadPoolExecutor(max_workers=self._max_workers)
def __del__(self):
"""wait for all pending tasks to complete before exit."""
if hasattr(self, "executor"):
logger.info("Waiting for all pending tasks to complete...")
self.executor.shutdown()
super(AsyncPredictor, self).__del__()
def _check(self):
config = json.loads(self._service_api_object["ServiceConfig"])
if config.get("metadata", {}).get("type") != ServiceType.Async:
logger.warning(
"AsyncPredictor is not recommended to make prediction to a standard "
" prediction service."
)
def _get_result(
self, request_id: str
) -> Optional[Tuple[int, Dict[str, str], bytes]]:
resp = self._send_request(
method="GET",
path=_QUEUE_SERVICE_SINK_PATH,
params={
"requestId": request_id,
# _raw_ is false because we want to get the encapsulated prediction
# result in response body.
"_raw_": "false",
},
)
logger.debug(
"Poll prediction result: request_id=%s status_code=%s, content=%s",
request_id,
resp.status_code,
resp.content,
)
if resp.status_code == 204:
# Status code 204 means could not find prediction response for the specific
# request id.
return
# Raise exception if status code is not 2xx.
if resp.status_code // 100 != 2:
raise RuntimeError(
"Pulling prediction result failed: status_code={} content={}".format(
resp.status_code, resp.content.decode("utf-8")
)
)
return self._parse_encapsulated_response(resp.json()[0])
def _parse_encapsulated_response(self, data) -> Tuple[int, Dict[str, str], bytes]:
tags = data["tags"]
# If the status code from prediction service is not 200, a tag with
# key 'lastCode' will be added to the tags in response.
status_code = int(tags.get("lastCode", 200))
data = base64.b64decode(data["data"])
# currently, headers are not supported in async prediction service.
headers = dict()
return status_code, headers, data
async def _get_result_async(
self, request_id: str
) -> Optional[Tuple[int, Dict[str, str], bytes]]:
resp = await self._send_request_async(
method="GET",
path=_QUEUE_SERVICE_SINK_PATH,
params={
"requestId": request_id,
# _raw_ is false because we want to get the encapsulated prediction
# result in response body.
"_raw_": "false",
},
)
status_code = resp.status
content = await resp.read()
logger.debug(
"Get prediction result: request_id=%s status_code=%s, content=%s",
request_id,
status_code,
content,
)
if status_code == 204:
# Status code 204 means could not find prediction response for the specific
# request id.
return
if status_code // 100 != 2:
raise RuntimeError(
"Pulling prediction result failed: status_code={} content={}".format(
status_code, content.decode("utf-8")
)
)
data = (await resp.json())[0]
return self._parse_encapsulated_response(data)
def _poll_result(
self, request_id: str, wait_config: WaitConfig
) -> Tuple[int, Dict[str, str], bytes]:
# if max_attempts is negative or zero, then wait forever
attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts
while attempts != 0:
attempts -= 1
result = self._get_result(request_id=request_id)
if not result:
time.sleep(wait_config.interval)
continue
status_code, headers, content = result
# check real prediction response
if status_code // 100 != 2:
raise PredictionException(
code=status_code,
message=f"Prediction failed: status_code={status_code}"
f" content={content.decode()}",
)
return status_code, headers, content
# Polling prediction result timeout.
raise RuntimeError(
f"Polling prediction result timeout: request_id={request_id}, "
f"total_time={wait_config.max_attempts * wait_config.interval}"
)
async def _poll_result_async(
self, request_id, wait_config: WaitConfig
) -> Tuple[int, Dict[str, str], bytes]:
# if max_attempts is negative or zero, then wait forever
attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts
while attempts != 0:
attempts -= 1
result = await self._get_result_async(request_id)
if not result:
await asyncio.sleep(wait_config.interval)
continue
status_code, headers, content = result
# check real prediction response
if status_code // 100 != 2:
raise PredictionException(
f"Prediction failed: status_code={status_code} content={content.decode()}"
)
return status_code, headers, content
# Polling prediction result timeout.
raise RuntimeError(
f"Polling prediction result timeout: request_id={request_id}, "
f"total_time={wait_config.max_attempts * wait_config.interval}"
)
def _get_request_id(self, resp: requests.models.Response) -> str:
if resp.status_code // 100 != 2:
raise RuntimeError(
f"Send prediction request failed. status_code={resp.status_code} "
f"message={resp.text}"
)
if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:
logger.error(
f"Send prediction request failed. Missing request id."
f" status_code={resp.status_code} content={resp.text}"
)
raise RuntimeError("Missing request id in response header.")
request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]
logger.debug(
f"Send prediction request successfully. request_id={request_id}"
f" status_code={resp.status_code}",
)
return request_id
async def _get_request_id_async(self, resp: aiohttp.ClientResponse) -> str:
content = await resp.read()
if resp.status != 200:
raise RuntimeError(
"Send request to async prediction service failed: status_code={} "
"content={}".format(resp.status, content.decode("utf-8"))
)
if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:
logger.error(
f"Send prediction request failed. Missing request id."
f" status_code={resp.status} content={content.decode()}"
)
raise RuntimeError("Missing request id in response header.")
request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]
logger.debug(
f"Send prediction request successfully. request_id={request_id}"
f" status_code={resp.status}",
)
return request_id
def _predict_fn(
self,
data,
):
"""Make a prediction with the async prediction service."""
# serialize input data
data = self._handle_input(data)
resp = self._send_request(data=data)
request_id = self._get_request_id(resp)
logger.debug("Async prediction RequestId: ", request_id)
# poll prediction result
status, headers, content = self._poll_result(
request_id=request_id, wait_config=WaitConfig()
)
return self._handle_output(content)
def _wrap_callback_fn(self, cb: Callable):
"""Wrap the callback function to handle the prediction result."""
@functools.wraps(cb)
def _(future: Future):
return cb(future.result())
return _
def predict(
self,
data,
callback: Optional[Union[Callable, List[Callable]]] = None,
):
"""Make a prediction with the async prediction service.
The input data is serialized using the `serializer.serialize` method before it
is sent, and the response body is deserialized using the
`serializer.deserialize` method the prediction result returns.
Args:
data: The input data for the prediction. It will be serialized using the
serializer of the predictor before transmitted to the prediction
service.
callback (Union[Callable, List[Callable]], optional): A Callback function,
or a list of callback functions used to process the prediction result.
Returns:
AsyncTask: The task object that can be used to retrieve the prediction
result.
"""
self._post_init_serializer()
future = self.executor.submit(self._predict_fn, data)
if isinstance(callback, Callable):
callback = [callback]
if callback:
for cb in callback:
future.add_done_callback(self._wrap_callback_fn(cb))
return AsyncTask(future=future)
async def predict_async(self, data, wait_config: WaitConfig = WaitConfig()):
"""Make a prediction with the async prediction service.
The serializer object for the predictor is responsible for data transformation
when the 'predict' method is invoked. The input data is serialized using the
`serializer.serialize` method before it is sent, and the response is
deserialized using the `serializer.deserialize` method before the prediction
result returns.
Args:
data: The input data for the prediction. It will be serialized using the
serializer of the predictor before transmitted to the prediction
service.
wait_config (WaitConfig): A config object that controls the behavior of
polling the prediction result.
Returns:
Prediction result.
"""
self._post_init_serializer()
data = self._handle_input(data)
resp = await self._send_request_async(data=data)
request_id = await self._get_request_id_async(resp)
status_code, headers, content = await self._poll_result_async(
request_id=request_id, wait_config=wait_config
)
return self._handle_output(content)
def _raw_predict_fn(self, data, method, path, headers, **kwargs):
json_data, data = self._handle_raw_input(data)
resp = self._send_request(
path=path,
json=json_data,
data=data,
headers=self._build_headers(headers),
method=method,
**kwargs,
)
request_id = self._get_request_id(resp)
status, headers, content = self._poll_result(
request_id, wait_config=WaitConfig()
)
return RawResponse(status, headers, content)
def raw_predict(
self,
data: Any = None,
callback: Optional[Union[Callable, List[Callable], None]] = None,
method: str = "POST",
path: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs,
) -> AsyncTask:
"""Make a prediction with the online prediction service.
Args:
data (Any): Input data to be sent to the prediction service. If it is a
file-like object, bytes, or string, it will be sent as the request body.
Otherwise, it will be treated as a JSON serializable object and sent as
JSON.
callback (Union[Callable, List[Callable]], optional): A Callback function,
or a list of callback functions used to process the prediction result.
path (str, optional): Path for the request to be sent to. If it is provided,
it will be appended to the endpoint URL (Default None).
headers (dict, optional): Request headers.
method (str, optional): Request method, default to 'POST'.
**kwargs: Additional keyword arguments for the request.
Returns:
AsyncTask: The task object that can be used to retrieve the prediction
result.
Examples:
from pai.predictor import AsyncPredictor, AsyncTask
predictor = AsyncPredictor()
task: AsyncTask = predictor.raw_predict(data="YourPredictionData")
print(task.result())
"""
future = self.executor.submit(
self._raw_predict_fn, data, method, path, headers, **kwargs
)
cbs = [callback] if isinstance(callback, Callable) else callback
if cbs:
for cb in cbs:
future.add_done_callback(self._wrap_callback_fn(cb))
return AsyncTask(future=future)
async def raw_predict_async(
self,
data,
wait_config: WaitConfig = WaitConfig(),
method: str = "POST",
headers: Optional[Dict[str, str]] = None,
path: Optional[str] = None,
**kwargs,
) -> RawResponse:
"""Make a prediction with the online prediction service.
Args:
data (Any): Input data to be sent to the prediction service. If it is a
file-like object, bytes, or string, it will be sent as the request body.
Otherwise, it will be treated as a JSON serializable object and sent as
JSON.
wait_config (WaitConfig): A config object that controls the behavior of
polling the prediction result.
path (str, optional): Path for the request to be sent to. If it is provided,
it will be appended to the endpoint URL (Default None).
headers (dict, optional): Request headers.
method (str, optional): Request method, default to 'POST'.
**kwargs: Additional keyword arguments for the request.
Returns:
RawResponse: Prediction result.
"""
if self.service_status not in ServiceStatus.completed_status():
self.wait_for_ready()
json_data, data = self._handle_raw_input(data)
resp = await self._send_request_async(
data=data,
method=method,
json=json_data,
path=path,
headers=headers,
**kwargs,
)
request_id = await self._get_request_id_async(resp)
# Polling the prediction result.
status_code, headers, content = await self._poll_result_async(
request_id=request_id, wait_config=wait_config
)
return self._handle_raw_output(status_code, headers, content)
class LocalPredictor(PredictorBase):
"""Perform prediction to a local service running with docker."""
def __init__(
self,
port: int,
container_id: Optional[str] = None,
serializer: Optional[SerializerBase] = None,
):
"""LocalPredictor initializer.
Args:
port (int): The port of the local service.
container_id (str, optional): The container id of the local service.
serializer (SerializerBase, optional): A serializer object that transforms.
"""
self.container_id = container_id
self.port = port
self.serializer = serializer or JsonSerializer()
self._container_run = (
self._build_container_run(container_id, port=port)
if self.container_id
else None
)
@classmethod
def _build_container_run(cls, container_id, port):
try:
except ImportError:
raise ImportError("Please install docker first: pip install docker")
client = docker.from_env()
container = client.containers.get(container_id)
| return ContainerRun(container=container, port=port) | 1 | 2023-12-01 01:40:12+00:00 | 12k |
xraychen/OFA-Wav2Vec2 | fairseq/models/fairseq_model.py | [
{
"identifier": "utils",
"path": "fairseq/utils.py",
"snippet": "MANIFOLD_PATH_SEP = \"|\"\n HOTRELOAD_PAUSE = bool(os.environ.get(\"HOTRELOAD_PAUSE\", 0))\nclass FileContentsAction(argparse.Action):\nclass set_torch_seed(object):\nclass CudaEnvironment(object):\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n def __call__(self, parser, namespace, values, option_string=None):\ndef split_paths(paths: str, separator=os.pathsep) -> List[str]:\ndef load_ensemble_for_inference(filenames, task, model_arg_overrides=None):\ndef apply_to_sample(f, sample):\n def _apply(x):\ndef move_to_cuda(sample, device=None):\n def _move_to_cuda(tensor):\ndef move_to_cpu(sample):\n def _move_to_cpu(tensor):\ndef move_to_tpu(sample):\n def _move_to_tpu(tensor):\ndef get_incremental_state(\n module: \"MultiheadAttention\",\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n) -> Optional[Dict[str, Optional[Tensor]]]:\ndef set_incremental_state(\n module: \"MultiheadAttention\",\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],\n key: str,\n value: Dict[str, Optional[Tensor]],\n) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:\ndef load_align_dict(replace_unk):\ndef print_embed_overlap(embed_dict, vocab_dict):\ndef parse_embedding(embed_path):\ndef load_embedding(embed_dict, vocab, embedding):\ndef replace_unk(hypo_str, src_str, alignment, align_dict, unk):\ndef post_process_prediction(\n hypo_tokens,\n src_str,\n alignment,\n align_dict,\n tgt_dict,\n remove_bpe=None,\n extra_symbols_to_ignore=None,\n):\ndef make_positions(tensor, padding_idx: int, onnx_trace: bool = False):\ndef strip_pad(tensor, pad):\ndef buffered_arange(max):\ndef convert_padding_direction(\n src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False\n):\ndef item(tensor):\ndef multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:\ndef clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:\n def grad_exists(p):\ndef fill_with_neg_inf(t):\ndef _match_types(arg1, arg2):\n def upgrade(arg_number, arg_structure):\ndef resolve_max_positions(*args):\n def map_value_update(d1, d2):\n def nullsafe_min(l):\ndef import_user_module(args):\ndef softmax(x, dim: int, onnx_trace: bool = False):\ndef log_softmax(x, dim: int, onnx_trace: bool = False):\ndef get_perplexity(loss, round=2, base=2):\ndef deprecation_warning(message, stacklevel=3):\ndef relu_squared(x: torch.Tensor):\ndef get_activation_fn(activation: str) -> Callable:\ndef get_available_activation_fns() -> List:\ndef model_eval(model):\ndef has_parameters(module):\ndef get_rng_state():\ndef set_rng_state(state):\n def __init__(self, seed):\n def __enter__(self):\n def __exit__(self, *exc):\ndef parse_alignment(line):\ndef get_token_to_word_mapping(tokens, exclude_list):\ndef extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):\ndef extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos):\ndef new_arange(x, *size):\ndef get_tpu_device():\ndef tpu_data_loader(itr):\ndef is_xla_tensor(tensor):\ndef index_put(tensor, indices, value):\ndef xla_device_to_cpu(dat):\n def __init__(self):\n def pretty_print_cuda_env_list(cuda_env_list):\ndef csv_str_list(x):\ndef eval_str_list(x, type=float):\ndef eval_str_dict(x, type=dict):\ndef eval_bool(x, default=False):\ndef reset_logging():\ndef safe_getattr(obj, k, default=None):\ndef safe_hasattr(obj, k):\ndef hotreload_function(name=None):\n def hotreload_decorator(func):\n def func_wrapper(*args, **kwargs):"
},
{
"identifier": "Dictionary",
"path": "fairseq/data/dictionary.py",
"snippet": "class Dictionary:\n \"\"\"A mapping from symbols to consecutive integers\"\"\"\n\n def __init__(\n self,\n *, # begin keyword-only arguments\n bos=\"<s>\",\n pad=\"<pad>\",\n eos=\"</s>\",\n unk=\"<unk>\",\n extra_special_symbols=None,\n ):\n self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos\n self.symbols = []\n self.count = []\n self.indices = {}\n self.bos_index = self.add_symbol(bos)\n self.pad_index = self.add_symbol(pad)\n self.eos_index = self.add_symbol(eos)\n self.unk_index = self.add_symbol(unk)\n if extra_special_symbols:\n for s in extra_special_symbols:\n self.add_symbol(s)\n self.nspecial = len(self.symbols)\n\n def __eq__(self, other):\n return self.indices == other.indices\n\n def __getitem__(self, idx):\n if idx < len(self.symbols):\n return self.symbols[idx]\n return self.unk_word\n\n def get_count(self, idx):\n return self.count[idx]\n\n def __len__(self):\n \"\"\"Returns the number of symbols in the dictionary\"\"\"\n return len(self.symbols)\n\n def __contains__(self, sym):\n return sym in self.indices\n\n def index(self, sym):\n \"\"\"Returns the index of the specified symbol\"\"\"\n assert isinstance(sym, str)\n if sym in self.indices:\n return self.indices[sym]\n return self.unk_index\n\n def string(\n self,\n tensor,\n bpe_symbol=None,\n escape_unk=False,\n extra_symbols_to_ignore=None,\n unk_string=None,\n include_eos=False,\n separator=\" \",\n ):\n \"\"\"Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n \"\"\"\n if torch.is_tensor(tensor) and tensor.dim() == 2:\n return \"\\n\".join(\n self.string(\n t,\n bpe_symbol,\n escape_unk,\n extra_symbols_to_ignore,\n include_eos=include_eos,\n )\n for t in tensor\n )\n\n extra_symbols_to_ignore = set(extra_symbols_to_ignore or [])\n if not include_eos:\n extra_symbols_to_ignore.add(self.eos())\n\n def token_string(i):\n if i == self.unk():\n if unk_string is not None:\n return unk_string\n else:\n return self.unk_string(escape_unk)\n else:\n return self[i]\n\n if hasattr(self, \"bos_index\"):\n extra_symbols_to_ignore.add(self.bos())\n\n sent = separator.join(\n token_string(i)\n for i in tensor\n if utils.item(i) not in extra_symbols_to_ignore\n )\n\n return data_utils.post_process(sent, bpe_symbol)\n\n def unk_string(self, escape=False):\n \"\"\"Return unknown string, optionally escaped as: <<unk>>\"\"\"\n if escape:\n return \"<{}>\".format(self.unk_word)\n else:\n return self.unk_word\n\n def add_symbol(self, word, n=1, overwrite=False):\n \"\"\"Adds a word to the dictionary\"\"\"\n if word in self.indices and not overwrite:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + n\n return idx\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(n)\n return idx\n\n def update(self, new_dict):\n \"\"\"Updates counts from new dictionary.\"\"\"\n for word in new_dict.symbols:\n idx2 = new_dict.indices[word]\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + new_dict.count[idx2]\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(new_dict.count[idx2])\n\n def finalize(self, threshold=-1, nwords=-1, padding_factor=8):\n \"\"\"Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n if nwords <= 0:\n nwords = len(self)\n\n new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))\n new_symbols = self.symbols[: self.nspecial]\n new_count = self.count[: self.nspecial]\n\n c = Counter(\n dict(\n sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))\n )\n )\n for symbol, count in c.most_common(nwords - self.nspecial):\n if count >= threshold:\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(count)\n else:\n break\n\n assert len(new_symbols) == len(new_indices)\n\n self.count = list(new_count)\n self.symbols = list(new_symbols)\n self.indices = new_indices\n\n self.pad_to_multiple_(padding_factor)\n\n def pad_to_multiple_(self, padding_factor):\n \"\"\"Pad Dictionary size to be a multiple of *padding_factor*.\"\"\"\n if padding_factor > 1:\n i = 0\n while len(self) % padding_factor != 0:\n symbol = \"madeupword{:04d}\".format(i)\n self.add_symbol(symbol, n=0)\n i += 1\n\n def bos(self):\n \"\"\"Helper to get index of beginning-of-sentence symbol\"\"\"\n return self.bos_index\n\n def pad(self):\n \"\"\"Helper to get index of pad symbol\"\"\"\n return self.pad_index\n\n def eos(self):\n \"\"\"Helper to get index of end-of-sentence symbol\"\"\"\n return self.eos_index\n\n def unk(self):\n \"\"\"Helper to get index of unk symbol\"\"\"\n return self.unk_index\n\n @classmethod\n def load(cls, f):\n \"\"\"Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n \"\"\"\n d = cls()\n d.add_from_file(f)\n return d\n\n def add_from_file(self, f):\n \"\"\"\n Loads a pre-existing dictionary from a text file and adds its symbols\n to this instance.\n \"\"\"\n if isinstance(f, str):\n try:\n with open(PathManager.get_local_path(f), \"r\", encoding=\"utf-8\") as fd:\n self.add_from_file(fd)\n except FileNotFoundError as fnfe:\n raise fnfe\n except UnicodeError:\n raise Exception(\n \"Incorrect encoding detected in {}, please \"\n \"rebuild the dataset\".format(f)\n )\n return\n\n lines = f.readlines()\n indices_start_line = self._load_meta(lines)\n\n for line in lines[indices_start_line:]:\n try:\n line, field = line.rstrip().rsplit(\" \", 1)\n if field == \"#fairseq:overwrite\":\n overwrite = True\n line, field = line.rsplit(\" \", 1)\n else:\n overwrite = False\n count = int(field)\n word = line\n if word in self and not overwrite:\n raise RuntimeError(\n \"Duplicate word found when loading Dictionary: '{}'. \"\n \"Duplicate words can overwrite earlier ones by adding the \"\n \"#fairseq:overwrite flag at the end of the corresponding row \"\n \"in the dictionary file. If using the Camembert model, please \"\n \"download an updated copy of the model file.\".format(word)\n )\n self.add_symbol(word, n=count, overwrite=overwrite)\n except ValueError:\n raise ValueError(\n f\"Incorrect dictionary format, expected '<token> <cnt> [flags]': \\\"{line}\\\"\"\n )\n\n def _save(self, f, kv_iterator):\n if isinstance(f, str):\n PathManager.mkdirs(os.path.dirname(f))\n with PathManager.open(f, \"w\", encoding=\"utf-8\") as fd:\n return self.save(fd)\n for k, v in kv_iterator:\n print(\"{} {}\".format(k, v), file=f)\n\n def _get_meta(self):\n return [], []\n\n def _load_meta(self, lines):\n return 0\n\n def save(self, f):\n \"\"\"Stores dictionary into a text file\"\"\"\n ex_keys, ex_vals = self._get_meta()\n self._save(\n f,\n zip(\n ex_keys + self.symbols[self.nspecial :],\n ex_vals + self.count[self.nspecial :],\n ),\n )\n\n def dummy_sentence(self, length):\n t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()\n t[-1] = self.eos()\n return t\n\n def encode_line(\n self,\n line,\n line_tokenizer=tokenize_line,\n add_if_not_exist=True,\n consumer=None,\n append_eos=True,\n reverse_order=False,\n ) -> torch.IntTensor:\n words = line_tokenizer(line)\n if reverse_order:\n words = list(reversed(words))\n nwords = len(words)\n ids = torch.IntTensor(nwords + 1 if append_eos else nwords)\n\n for i, word in enumerate(words):\n if add_if_not_exist:\n idx = self.add_symbol(word)\n else:\n idx = self.index(word)\n if consumer is not None:\n consumer(word, idx)\n ids[i] = idx\n if append_eos:\n ids[nwords] = self.eos_index\n return ids\n\n @staticmethod\n def _add_file_to_dictionary_single_worker(\n filename,\n tokenize,\n eos_word,\n start_offset,\n end_offset,\n ):\n counter = Counter()\n with Chunker(filename, start_offset, end_offset) as line_iterator:\n for line in line_iterator:\n for word in tokenize(line):\n counter.update([word])\n counter.update([eos_word])\n return counter\n\n @staticmethod\n def add_file_to_dictionary(filename, dict, tokenize, num_workers):\n def merge_result(counter):\n for w, c in sorted(counter.items()):\n dict.add_symbol(w, c)\n\n local_file = PathManager.get_local_path(filename)\n offsets = find_offsets(local_file, num_workers)\n if num_workers > 1:\n chunks = zip(offsets, offsets[1:])\n pool = Pool(processes=num_workers)\n results = []\n for (start_offset, end_offset) in chunks:\n results.append(\n pool.apply_async(\n Dictionary._add_file_to_dictionary_single_worker,\n (\n local_file,\n tokenize,\n dict.eos_word,\n start_offset,\n end_offset,\n ),\n )\n )\n pool.close()\n pool.join()\n for r in results:\n merge_result(r.get())\n else:\n merge_result(\n Dictionary._add_file_to_dictionary_single_worker(\n local_file, tokenize, dict.eos_word, offsets[0], offsets[1]\n )\n )"
},
{
"identifier": "convert_namespace_to_omegaconf",
"path": "fairseq/dataclass/utils.py",
"snippet": "def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig:\n \"\"\"Convert a flat argparse.Namespace to a structured DictConfig.\"\"\"\n\n # Here we are using field values provided in args to override counterparts inside config object\n overrides, deletes = override_module_args(args)\n\n # configs will be in fairseq/config after installation\n config_path = os.path.join(\"..\", \"config\")\n\n GlobalHydra.instance().clear()\n\n with initialize(config_path=config_path):\n try:\n composed_cfg = compose(\"config\", overrides=overrides, strict=False)\n except:\n logger.error(\"Error when composing. Overrides: \" + str(overrides))\n raise\n\n for k in deletes:\n composed_cfg[k] = None\n\n cfg = OmegaConf.create(\n OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True)\n )\n\n # hack to be able to set Namespace in dict config. this should be removed when we update to newer\n # omegaconf version that supports object flags, or when we migrate all existing models\n from omegaconf import _utils\n\n with omegaconf_no_object_check():\n if cfg.task is None and getattr(args, \"task\", None):\n cfg.task = Namespace(**vars(args))\n from fairseq.tasks import TASK_REGISTRY\n\n _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task])\n cfg.task._name = args.task\n if cfg.model is None and getattr(args, \"arch\", None):\n cfg.model = Namespace(**vars(args))\n from fairseq.models import ARCH_MODEL_REGISTRY\n\n _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch])\n cfg.model._name = args.arch\n if cfg.optimizer is None and getattr(args, \"optimizer\", None):\n cfg.optimizer = Namespace(**vars(args))\n from fairseq.optim import OPTIMIZER_REGISTRY\n\n _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer])\n cfg.optimizer._name = args.optimizer\n if cfg.lr_scheduler is None and getattr(args, \"lr_scheduler\", None):\n cfg.lr_scheduler = Namespace(**vars(args))\n from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY\n\n _set_legacy_defaults(\n cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler]\n )\n cfg.lr_scheduler._name = args.lr_scheduler\n if cfg.criterion is None and getattr(args, \"criterion\", None):\n cfg.criterion = Namespace(**vars(args))\n from fairseq.criterions import CRITERION_REGISTRY\n\n _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion])\n cfg.criterion._name = args.criterion\n\n OmegaConf.set_struct(cfg, True)\n return cfg"
},
{
"identifier": "gen_parser_from_dataclass",
"path": "fairseq/dataclass/utils.py",
"snippet": "def gen_parser_from_dataclass(\n parser: ArgumentParser,\n dataclass_instance: FairseqDataclass,\n delete_default: bool = False,\n with_prefix: Optional[str] = None,\n) -> None:\n \"\"\"\n convert a dataclass instance to tailing parser arguments.\n\n If `with_prefix` is provided, prefix all the keys in the resulting parser with it. It means that we are\n building a flat namespace from a structured dataclass (see transformer_config.py for example).\n \"\"\"\n\n def argparse_name(name: str):\n if name == \"data\" and (with_prefix is None or with_prefix == \"\"):\n # normally data is positional args, so we don't add the -- nor the prefix\n return name\n if name == \"_name\":\n # private member, skip\n return None\n full_name = \"--\" + name.replace(\"_\", \"-\")\n if with_prefix is not None and with_prefix != \"\":\n # if a prefix is specified, construct the prefixed arg name\n full_name = with_prefix + \"-\" + full_name[2:] # strip -- when composing\n return full_name\n\n def get_kwargs_from_dc(\n dataclass_instance: FairseqDataclass, k: str\n ) -> Dict[str, Any]:\n \"\"\"k: dataclass attributes\"\"\"\n\n kwargs = {}\n\n field_type = dataclass_instance._get_type(k)\n inter_type = interpret_dc_type(field_type)\n\n field_default = dataclass_instance._get_default(k)\n\n if isinstance(inter_type, type) and issubclass(inter_type, Enum):\n field_choices = [t.value for t in list(inter_type)]\n else:\n field_choices = None\n\n field_help = dataclass_instance._get_help(k)\n field_const = dataclass_instance._get_argparse_const(k)\n\n if isinstance(field_default, str) and field_default.startswith(\"${\"):\n kwargs[\"default\"] = field_default\n else:\n if field_default is MISSING:\n kwargs[\"required\"] = True\n if field_choices is not None:\n kwargs[\"choices\"] = field_choices\n if (\n isinstance(inter_type, type)\n and (issubclass(inter_type, List) or issubclass(inter_type, Tuple))\n ) or (\"List\" in str(inter_type) or \"Tuple\" in str(inter_type)):\n if \"int\" in str(inter_type):\n kwargs[\"type\"] = lambda x: eval_str_list(x, int)\n elif \"float\" in str(inter_type):\n kwargs[\"type\"] = lambda x: eval_str_list(x, float)\n elif \"str\" in str(inter_type):\n kwargs[\"type\"] = lambda x: eval_str_list(x, str)\n else:\n raise NotImplementedError(\n \"parsing of type \" + str(inter_type) + \" is not implemented\"\n )\n if field_default is not MISSING:\n kwargs[\"default\"] = (\n \",\".join(map(str, field_default))\n if field_default is not None\n else None\n )\n elif (\n isinstance(inter_type, type) and issubclass(inter_type, Enum)\n ) or \"Enum\" in str(inter_type):\n kwargs[\"type\"] = str\n if field_default is not MISSING:\n if isinstance(field_default, Enum):\n kwargs[\"default\"] = field_default.value\n else:\n kwargs[\"default\"] = field_default\n elif inter_type is bool:\n kwargs[\"action\"] = (\n \"store_false\" if field_default is True else \"store_true\"\n )\n kwargs[\"default\"] = field_default\n else:\n kwargs[\"type\"] = inter_type\n if field_default is not MISSING:\n kwargs[\"default\"] = field_default\n\n # build the help with the hierarchical prefix\n if with_prefix is not None and with_prefix != \"\" and field_help is not None:\n field_help = with_prefix[2:] + \": \" + field_help\n\n kwargs[\"help\"] = field_help\n if field_const is not None:\n kwargs[\"const\"] = field_const\n kwargs[\"nargs\"] = \"?\"\n\n return kwargs\n\n for k in dataclass_instance._get_all_attributes():\n field_name = argparse_name(dataclass_instance._get_name(k))\n field_type = dataclass_instance._get_type(k)\n if field_name is None:\n continue\n elif inspect.isclass(field_type) and issubclass(field_type, FairseqDataclass):\n # for fields that are of type FairseqDataclass, we can recursively\n # add their fields to the namespace (so we add the args from model, task, etc. to the root namespace)\n prefix = None\n if with_prefix is not None:\n # if a prefix is specified, then we don't want to copy the subfields directly to the root namespace\n # but we prefix them with the name of the current field.\n prefix = field_name\n gen_parser_from_dataclass(parser, field_type(), delete_default, prefix)\n continue\n\n kwargs = get_kwargs_from_dc(dataclass_instance, k)\n\n field_args = [field_name]\n alias = dataclass_instance._get_argparse_alias(k)\n if alias is not None:\n field_args.append(alias)\n\n if \"default\" in kwargs:\n if isinstance(kwargs[\"default\"], str) and kwargs[\"default\"].startswith(\n \"${\"\n ):\n if kwargs[\"help\"] is None:\n # this is a field with a name that will be added elsewhere\n continue\n else:\n del kwargs[\"default\"]\n if delete_default and \"default\" in kwargs:\n del kwargs[\"default\"]\n try:\n parser.add_argument(*field_args, **kwargs)\n except ArgumentError:\n pass"
},
{
"identifier": "FairseqDecoder",
"path": "fairseq/models/fairseq_decoder.py",
"snippet": "class FairseqDecoder(nn.Module):\n \"\"\"Base class for decoders.\"\"\"\n\n def __init__(self, dictionary):\n super().__init__()\n self.dictionary = dictionary\n self.onnx_trace = False\n self.adaptive_softmax = None\n\n def forward(self, prev_output_tokens, encoder_out=None, **kwargs):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): shifted output tokens of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (dict, optional): output from the encoder, used for\n encoder-side attention\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n x, extra = self.extract_features(\n prev_output_tokens, encoder_out=encoder_out, **kwargs\n )\n x = self.output_layer(x)\n return x, extra\n\n def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):\n \"\"\"\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n raise NotImplementedError\n\n def output_layer(self, features, **kwargs):\n \"\"\"\n Project features to the default output size, e.g., vocabulary size.\n\n Args:\n features (Tensor): features returned by *extract_features*.\n \"\"\"\n raise NotImplementedError\n\n def get_normalized_probs(\n self,\n net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],\n log_probs: bool,\n sample: Optional[Dict[str, Tensor]] = None,\n ):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n return self.get_normalized_probs_scriptable(net_output, log_probs, sample)\n\n # TorchScript doesn't support super() method so that the scriptable Subclass\n # can't access the base class model in Torchscript.\n # Current workaround is to add a helper function with different name and\n # call the helper function from scriptable Subclass.\n def get_normalized_probs_scriptable(\n self,\n net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],\n log_probs: bool,\n sample: Optional[Dict[str, Tensor]] = None,\n ):\n \"\"\"Get normalized probabilities (or log probs) from a net's output.\"\"\"\n\n if hasattr(self, \"adaptive_softmax\") and self.adaptive_softmax is not None:\n if sample is not None:\n assert \"target\" in sample\n target = sample[\"target\"]\n else:\n target = None\n out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)\n return out.exp_() if not log_probs else out\n\n logits = net_output[0]\n if log_probs:\n return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)\n else:\n return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the decoder.\"\"\"\n return 1e6 # an arbitrary large number\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade old state dicts to work with newer code.\"\"\"\n return state_dict\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True"
},
{
"identifier": "FairseqEncoder",
"path": "fairseq/models/fairseq_encoder.py",
"snippet": "class FairseqEncoder(nn.Module):\n \"\"\"Base class for encoders.\"\"\"\n\n def __init__(self, dictionary):\n super().__init__()\n self.dictionary = dictionary\n\n def forward(self, src_tokens, src_lengths=None, **kwargs):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n \"\"\"\n raise NotImplementedError\n\n def forward_torchscript(self, net_input: Dict[str, Tensor]):\n \"\"\"A TorchScript-compatible version of forward.\n\n Encoders which use additional arguments may want to override\n this method for TorchScript compatibility.\n \"\"\"\n if torch.jit.is_scripting():\n return self.forward(\n src_tokens=net_input[\"src_tokens\"],\n src_lengths=net_input[\"src_lengths\"],\n )\n else:\n return self.forward_non_torchscript(net_input)\n\n @torch.jit.unused\n def forward_non_torchscript(self, net_input: Dict[str, Tensor]):\n encoder_input = {\n k: v for k, v in net_input.items() if k != \"prev_output_tokens\"\n }\n return self.forward(**encoder_input)\n\n def reorder_encoder_out(self, encoder_out, new_order):\n \"\"\"\n Reorder encoder output according to `new_order`.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n `encoder_out` rearranged according to `new_order`\n \"\"\"\n raise NotImplementedError\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return 1e6 # an arbitrary large number\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade old state dicts to work with newer code.\"\"\"\n return state_dict\n\n def set_num_updates(self, num_updates):\n \"\"\"State from trainer to pass along to model at every update.\"\"\"\n\n def _apply(m):\n if hasattr(m, \"set_num_updates\") and m != self:\n m.set_num_updates(num_updates)\n\n self.apply(_apply)"
}
] | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from argparse import Namespace
from typing import Dict, List, Optional, Tuple
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
gen_parser_from_dataclass,
)
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig
from torch import Tensor
from fairseq.checkpoint_utils import prune_state_dict
from fairseq import hub_utils
from fairseq.checkpoint_utils import prune_state_dict | 9,531 | checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
logger.info(x["args"])
return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"])
@classmethod
def hub_models(cls):
return {}
class FairseqEncoderDecoderModel(BaseFairseqModel):
"""Base class for encoder-decoder models.
Args:
encoder (FairseqEncoder): the encoder
decoder (FairseqDecoder): the decoder
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
check_type(self.encoder, FairseqEncoder)
check_type(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return features
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
class FairseqModel(FairseqEncoderDecoderModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
utils.deprecation_warning(
"FairseqModel is deprecated, please use FairseqEncoderDecoderModel "
"or BaseFairseqModel instead",
stacklevel=4,
)
class FairseqMultiModel(BaseFairseqModel):
"""Base class for combining multiple encoder-decoder models."""
def __init__(self, encoders, decoders):
super().__init__()
assert encoders.keys() == decoders.keys()
self.keys = list(encoders.keys())
for key in self.keys:
check_type(encoders[key], FairseqEncoder)
check_type(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict(
{
key: FairseqEncoderDecoderModel(encoders[key], decoders[key])
for key in self.keys
}
)
@staticmethod
def build_shared_embeddings(
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Base classes for various fairseq models.
"""
logger = logging.getLogger(__name__)
def check_type(module, expected_type):
if hasattr(module, "unwrapped_module"):
assert isinstance(
module.unwrapped_module, expected_type
), f"{type(module.unwrapped_module)} != {expected_type}"
else:
assert isinstance(module, expected_type), f"{type(module)} != {expected_type}"
class BaseFairseqModel(nn.Module):
"""Base class for fairseq models."""
def __init__(self):
super().__init__()
self._is_generation_fast = False
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
# do not set defaults so that settings defaults from various architectures still works
gen_parser_from_dataclass(parser, dc(), delete_default=True)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
raise NotImplementedError("Model must implement the build_model method")
def get_targets(self, sample, net_output):
"""Get targets from either the sample or the net's output."""
return sample["target"]
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Scriptable helper function for get_normalized_probs in ~BaseFairseqModel"""
if hasattr(self, "decoder"):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
# syntactic sugar for simple models which don't have a decoder
# (e.g., the classification tutorial)
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def extract_features(self, *args, **kwargs):
"""Similar to *forward* but only return features."""
return self(*args, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return None
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg: Optional[DictConfig] = None,
args: Optional[Namespace] = None,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
if model_cfg is None and args is not None:
logger.warn(
"using 'args' is deprecated, please update your code to use dataclass config"
)
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
new_state_dict = prune_state_dict(state_dict, model_cfg)
# FIXME: hot fix for loading hubert model with different cluster num
# new_state_dict = {k: v for k, v in new_state_dict.items() if k not in ["label_embs_concat"]}
return super().load_state_dict(new_state_dict, strict)
def upgrade_state_dict(self, state_dict):
"""Upgrade old state dicts to work with newer code."""
self.upgrade_state_dict_named(state_dict, "")
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code.
Args:
state_dict (dict): state dictionary to upgrade, in place
name (str): the state dict key corresponding to the current module
"""
assert state_dict is not None
def do_upgrade(m, prefix):
if len(prefix) > 0:
prefix += "."
for n, c in m.named_children():
name = prefix + n
if hasattr(c, "upgrade_state_dict_named"):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, "upgrade_state_dict"):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
for m in self.modules():
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
def prepare_for_inference_(self, cfg: DictConfig):
"""Prepare model for inference."""
kwargs = {}
kwargs["beamable_mm_beam_size"] = (
None
if getattr(cfg.generation, "no_beamable_mm", False)
else getattr(cfg.generation, "beam", 5)
)
kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False)
if getattr(cfg.generation, "retain_dropout", False):
kwargs["retain_dropout"] = cfg.generation.retain_dropout
kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules
self.make_generation_fast_(**kwargs)
def make_generation_fast_(self, **kwargs):
"""
Legacy entry point to optimize model for faster generation.
Prefer prepare_for_inference_.
"""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except (AttributeError, ValueError): # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module, prefix):
if len(prefix) > 0:
prefix += "."
base_func = BaseFairseqModel.make_generation_fast_
for n, m in module.named_modules():
if (
m != self
and hasattr(m, "make_generation_fast_")
# don't call this implementation again, e.g., if
# children modules also inherit from BaseFairseqModel
and m.make_generation_fast_.__func__ is not base_func
):
name = prefix + n
m.make_generation_fast_(name=name, **kwargs)
apply_make_generation_fast_(self, "")
def train(mode=True):
if mode:
raise RuntimeError("cannot train after make_generation_fast")
# this model should no longer be used for training
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
"""Make model exportable via ONNX trace."""
seen = set()
def apply_prepare_for_onnx_export_(module):
if (
module != self
and hasattr(module, "prepare_for_onnx_export_")
and module not in seen
):
seen.add(module)
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
**kwargs,
):
"""
Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model
file. Downloads and caches the pre-trained model file if needed.
The base implementation returns a
:class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models. The underlying
:class:`~fairseq.models.FairseqModel` can be accessed via the
*generator.models* attribute.
Other models may override this to implement custom hub interfaces.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
logger.info(x["args"])
return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"])
@classmethod
def hub_models(cls):
return {}
class FairseqEncoderDecoderModel(BaseFairseqModel):
"""Base class for encoder-decoder models.
Args:
encoder (FairseqEncoder): the encoder
decoder (FairseqDecoder): the decoder
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
check_type(self.encoder, FairseqEncoder)
check_type(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return features
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
class FairseqModel(FairseqEncoderDecoderModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
utils.deprecation_warning(
"FairseqModel is deprecated, please use FairseqEncoderDecoderModel "
"or BaseFairseqModel instead",
stacklevel=4,
)
class FairseqMultiModel(BaseFairseqModel):
"""Base class for combining multiple encoder-decoder models."""
def __init__(self, encoders, decoders):
super().__init__()
assert encoders.keys() == decoders.keys()
self.keys = list(encoders.keys())
for key in self.keys:
check_type(encoders[key], FairseqEncoder)
check_type(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict(
{
key: FairseqEncoderDecoderModel(encoders[key], decoders[key])
for key in self.keys
}
)
@staticmethod
def build_shared_embeddings( | dicts: Dict[str, Dictionary], | 1 | 2023-11-27 09:53:58+00:00 | 12k |
zerolink-io/zerolink-python | zerolink/api.py | [
{
"identifier": "read_docx",
"path": "zerolink/extract.py",
"snippet": "def read_docx(path: str) -> str:\n \"\"\"\n Turn the Microsoft Word document into a raw bytestring\n \"\"\"\n if NO_DOCX:\n print(\"Microsoft Word document support is not available\")\n sys.exit(1)\n\n document = Document(path)\n target_stream = io.StringIO()\n for paragraph in document.paragraphs:\n target_stream.write(paragraph.text + \"\\n\")\n return target_stream.getvalue()"
},
{
"identifier": "api_key",
"path": "zerolink/settings.py",
"snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HOME\"], \"Library\", \"Application Support\", \"zerolink\", \"config\"\n )\ndef create_config() -> None:\ndef get_config() -> configparser.ConfigParser:\ndef get_config_path() -> str:\ndef get_config_var(var: str) -> str:\ndef write_config_var(var: str, value: str):\ndef write_api_key(api_key: str):\ndef read_api_key() -> Optional[str]:"
},
{
"identifier": "AttributeType",
"path": "zerolink_client/models/attribute_type.py",
"snippet": "class AttributeType(str, Enum):\n DATE = \"DATE\"\n DATETIME = \"DATETIME\"\n DIMENSIONAL_QUANTITY = \"DIMENSIONAL_QUANTITY\"\n DIMENSIONLESS_QUANTITY = \"DIMENSIONLESS_QUANTITY\"\n GPS_COORDINATES = \"GPS_COORDINATES\"\n MONOLINGUAL_TEXT = \"MONOLINGUAL_TEXT\"\n URL = \"URL\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "ContextAssumption",
"path": "zerolink_client/models/context_assumption.py",
"snippet": "class ContextAssumption(str, Enum):\n GLOBAL = \"global\"\n LOCAL = \"local\"\n NONE = \"none\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "CreateAttribute",
"path": "zerolink_client/models/create_attribute.py",
"snippet": "class CreateAttribute:\n \"\"\"\n Attributes:\n subject (str): EID of a builtin entity\n predicate (str): Name of attribute\n attribute (Attribute):\n \"\"\"\n\n subject: str\n predicate: str\n attribute: \"Attribute\"\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.attribute import Attribute\n\n subject = self.subject\n\n predicate = self.predicate\n\n attribute = self.attribute.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"subject\": subject,\n \"predicate\": predicate,\n \"attribute\": attribute,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.attribute import Attribute\n\n d = src_dict.copy()\n subject = d.pop(\"subject\")\n\n predicate = d.pop(\"predicate\")\n\n attribute = Attribute.from_dict(d.pop(\"attribute\"))\n\n create_attribute = cls(\n subject=subject,\n predicate=predicate,\n attribute=attribute,\n )\n\n create_attribute.additional_properties = d\n return create_attribute\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties"
},
{
"identifier": "CreateEntity",
"path": "zerolink_client/models/create_entity.py",
"snippet": "class CreateEntity:\n \"\"\"\n Attributes:\n entity (str): Name of entity\n entity_type (Union[Unset, EntityType]): Entity types are entities that map to base ontological entities in\n Foundation.\n entity_str (Union[Unset, str]): User specified type\n is_class (Union[Unset, bool]): Whether the entity is a class or instance Default: False.\n \"\"\"\n\n entity: str\n entity_type: Union[Unset, EntityType] = UNSET\n entity_str: Union[Unset, str] = UNSET\n is_class: Union[Unset, bool] = False\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n entity = self.entity\n\n entity_type: Union[Unset, str] = UNSET\n if not isinstance(self.entity_type, Unset):\n entity_type = self.entity_type.value\n\n entity_str = self.entity_str\n\n is_class = self.is_class\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"entity\": entity,\n }\n )\n if entity_type is not UNSET:\n field_dict[\"entity_type\"] = entity_type\n if entity_str is not UNSET:\n field_dict[\"entity_str\"] = entity_str\n if is_class is not UNSET:\n field_dict[\"is_class\"] = is_class\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n entity = d.pop(\"entity\")\n\n _entity_type = d.pop(\"entity_type\", UNSET)\n entity_type: Union[Unset, EntityType]\n if isinstance(_entity_type, Unset):\n entity_type = UNSET\n else:\n entity_type = EntityType(_entity_type)\n\n entity_str = d.pop(\"entity_str\", UNSET)\n\n is_class = d.pop(\"is_class\", UNSET)\n\n create_entity = cls(\n entity=entity,\n entity_type=entity_type,\n entity_str=entity_str,\n is_class=is_class,\n )\n\n create_entity.additional_properties = d\n return create_entity\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties"
},
{
"identifier": "CreateRule",
"path": "zerolink_client/models/create_rule.py",
"snippet": "class CreateRule:\n \"\"\"\n Attributes:\n rule (str): Textual representation of the rule to parse\n context (Union[Unset, CreateRuleContext]): Context of entities to use for parsing the rule\n \"\"\"\n\n rule: str\n context: Union[Unset, \"CreateRuleContext\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.create_rule_context import CreateRuleContext\n\n rule = self.rule\n\n context: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.context, Unset):\n context = self.context.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"rule\": rule,\n }\n )\n if context is not UNSET:\n field_dict[\"context\"] = context\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.create_rule_context import CreateRuleContext\n\n d = src_dict.copy()\n rule = d.pop(\"rule\")\n\n _context = d.pop(\"context\", UNSET)\n context: Union[Unset, CreateRuleContext]\n if isinstance(_context, Unset):\n context = UNSET\n else:\n context = CreateRuleContext.from_dict(_context)\n\n create_rule = cls(\n rule=rule,\n context=context,\n )\n\n create_rule.additional_properties = d\n return create_rule\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties"
},
{
"identifier": "CreateRuleContext",
"path": "zerolink_client/models/create_rule_context.py",
"snippet": "class CreateRuleContext:\n \"\"\"Context of entities to use for parsing the rule\"\"\"\n\n additional_properties: Dict[str, str] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n create_rule_context = cls()\n\n create_rule_context.additional_properties = d\n return create_rule_context\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> str:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: str) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties"
},
{
"identifier": "CreateTriple",
"path": "zerolink_client/models/create_triple.py",
"snippet": "class CreateTriple:\n \"\"\"\n Attributes:\n predicate (str): Name of predicate relation\n user_subject (Union[Unset, str]): EID of a user entity\n subject (Union[Unset, str]): EID of a builtin entity\n user_object (Union[Unset, str]): EID of a user entity\n object_ (Union[Unset, str]): EID of a builtin entity\n \"\"\"\n\n predicate: str\n user_subject: Union[Unset, str] = UNSET\n subject: Union[Unset, str] = UNSET\n user_object: Union[Unset, str] = UNSET\n object_: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n predicate = self.predicate\n\n user_subject = self.user_subject\n\n subject = self.subject\n\n user_object = self.user_object\n\n object_ = self.object_\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"predicate\": predicate,\n }\n )\n if user_subject is not UNSET:\n field_dict[\"user_subject\"] = user_subject\n if subject is not UNSET:\n field_dict[\"subject\"] = subject\n if user_object is not UNSET:\n field_dict[\"user_object\"] = user_object\n if object_ is not UNSET:\n field_dict[\"object\"] = object_\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n predicate = d.pop(\"predicate\")\n\n user_subject = d.pop(\"user_subject\", UNSET)\n\n subject = d.pop(\"subject\", UNSET)\n\n user_object = d.pop(\"user_object\", UNSET)\n\n object_ = d.pop(\"object\", UNSET)\n\n create_triple = cls(\n predicate=predicate,\n user_subject=user_subject,\n subject=subject,\n user_object=user_object,\n object_=object_,\n )\n\n create_triple.additional_properties = d\n return create_triple\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties"
},
{
"identifier": "EntityType",
"path": "zerolink_client/models/entity_type.py",
"snippet": "class EntityType(str, Enum):\n ABSTRACT_ENTITY = \"Abstract Entity\"\n AIRCRAFT = \"Aircraft\"\n ALBUM = \"Album\"\n ANATOMY = \"Anatomy\"\n ANIMAL = \"Animal\"\n ARTISTIC_WORK = \"Artistic Work\"\n ARTWORK = \"Artwork\"\n ASTRONOMICAL_OBJECT = \"Astronomical Object\"\n ATTRIBUTE = \"Attribute\"\n AWARD = \"Award\"\n BEING = \"Being\"\n BIOLOGICAL_PROCESS = \"Biological Process\"\n BODY_OF_WATER = \"Body of Water\"\n BOOK = \"Book\"\n CELLULAR_COMPONENT = \"Cellular Component\"\n CHEMICAL_COMPOUND = \"Chemical Compound\"\n CITY = \"City\"\n CLASS = \"Class\"\n COMPANY = \"Company\"\n CONCEPT = \"Concept\"\n CONTINUANT = \"Continuant\"\n COUNTRY = \"Country\"\n CREATIVE_WORK = \"Creative Work\"\n CURRENCY = \"Currency\"\n DISEASE = \"Disease\"\n DRINK = \"Drink\"\n DRUG = \"Drug\"\n ECONOMIC_ENTITY = \"Economic Entity\"\n ELEMENT = \"Element\"\n ENTITY = \"Entity\"\n EXPOSURE = \"Exposure\"\n FIELD_OF_WORK = \"Field of Work\"\n FILM = \"Film\"\n FINANCIAL_INSTRUMENT = \"Financial Instrument\"\n FOOD = \"Food\"\n GENDER_IDENTITY = \"Gender Identity\"\n GENE = \"Gene\"\n GENRE = \"Genre\"\n GOVERNMENT = \"Government\"\n HISTORICAL_ENTITY = \"Historical Entity\"\n IMMATERIAL_ENTITY = \"Immaterial Entity\"\n INDUSTRY = \"Industry\"\n INFORMATION = \"Information\"\n LANGUAGE = \"Language\"\n LAW = \"Law\"\n LOCATION = \"Location\"\n MATERIAL_ENTITY = \"Material Entity\"\n MATHEMATICAL_OBJECT = \"Mathematical Object\"\n MEDIA = \"Media\"\n METACLASS = \"Metaclass\"\n MOLECULAR_FUNCTION = \"Molecular Function\"\n MUSIC = \"Music\"\n OCCURRENT = \"Occurrent\"\n ORGANISM = \"Organism\"\n ORGANIZATION = \"Organization\"\n PATHWAY = \"Pathway\"\n PERSON = \"Person\"\n PHENOTYPE = \"Phenotype\"\n PHYSICAL_ENTITY = \"Physical Entity\"\n PLANT = \"Plant\"\n PROCESS = \"Process\"\n PRODUCT = \"Product\"\n PROFESSION = \"Profession\"\n PROPERTY = \"Property\"\n PROTEIN = \"Protein\"\n QUALIA = \"Qualia\"\n QUALITY = \"Quality\"\n QUANTITY = \"Quantity\"\n RELATIONSHIP = \"Relationship\"\n RELIGION = \"Religion\"\n ROLE = \"Role\"\n SCHOLARLY_ARTICLE = \"Scholarly Article\"\n SHIP = \"Ship\"\n SOFTWARE = \"Software\"\n SONG = \"Song\"\n SPORT = \"Sport\"\n STAR = \"Star\"\n SUBATOMIC_PARTICLE = \"Subatomic Particle\"\n SUBSTANCE = \"Substance\"\n TAXON = \"Taxon\"\n TV_SERIES = \"TV Series\"\n UNIT = \"Unit\"\n UNIVERSITY = \"University\"\n VEHICLE = \"Vehicle\"\n VIDEO_GAME = \"Video Game\"\n WEBSITE = \"Website\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "ExtractModel",
"path": "zerolink_client/models/extract_model.py",
"snippet": "class ExtractModel(str, Enum):\n BASE = \"base\"\n FINANCE = \"finance\"\n GENOMICS = \"genomics\"\n INSURANCE = \"insurance\"\n LEGAL = \"legal\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "ResultStatus",
"path": "zerolink_client/models/result_status.py",
"snippet": "class ResultStatus(str, Enum):\n ANSWERS = \"answers\"\n EMPTY = \"empty\"\n ERROR = \"error\"\n FALSE = \"false\"\n TRUE = \"true\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "SpatialAssumption",
"path": "zerolink_client/models/spatial_assumption.py",
"snippet": "class SpatialAssumption(str, Enum):\n EARTH = \"earth\"\n UNIVERSE = \"universe\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "TemporalAssumption",
"path": "zerolink_client/models/temporal_assumption.py",
"snippet": "class TemporalAssumption(str, Enum):\n ABSTRACT = \"abstract\"\n CURRENT = \"current\"\n HISTORICAL = \"historical\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "TextExtract",
"path": "zerolink_client/models/text_extract.py",
"snippet": "class TextExtract:\n \"\"\"\n Attributes:\n text (str): Text to extract from\n extraction_model (Union[Unset, ExtractModel]): An enumeration. Default: ExtractModel.BASE.\n \"\"\"\n\n text: str\n extraction_model: Union[Unset, ExtractModel] = ExtractModel.BASE\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n text = self.text\n\n extraction_model: Union[Unset, str] = UNSET\n if not isinstance(self.extraction_model, Unset):\n extraction_model = self.extraction_model.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"text\": text,\n }\n )\n if extraction_model is not UNSET:\n field_dict[\"extraction_model\"] = extraction_model\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n text = d.pop(\"text\")\n\n _extraction_model = d.pop(\"extraction_model\", UNSET)\n extraction_model: Union[Unset, ExtractModel]\n if isinstance(_extraction_model, Unset):\n extraction_model = UNSET\n else:\n extraction_model = ExtractModel(_extraction_model)\n\n text_extract = cls(\n text=text,\n extraction_model=extraction_model,\n )\n\n text_extract.additional_properties = d\n return text_extract\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties"
},
{
"identifier": "WorldAssumption",
"path": "zerolink_client/models/world_assumption.py",
"snippet": "class WorldAssumption(str, Enum):\n CLOSED = \"closed\"\n OPEN = \"open\"\n PARTIAL = \"partial\"\n\n def __str__(self) -> str:\n return str(self.value)"
},
{
"identifier": "File",
"path": "zerolink_client/types.py",
"snippet": "class File:\n \"\"\"Contains information for file uploads\"\"\"\n\n payload: BinaryIO\n file_name: Optional[str] = None\n mime_type: Optional[str] = None\n\n def to_tuple(self) -> FileJsonType:\n \"\"\"Return a tuple representation that httpx will accept for multipart/form-data\"\"\"\n return self.file_name, self.payload, self.mime_type"
}
] | import json
import os
import zerolink.attribute as attr
import zerolink.req as req
from typing import Any, Generator, List, Optional, Union
from zerolink.extract import read_docx
from zerolink.settings import api_key
from zerolink_client.models import (
AttributeType,
ContextAssumption,
CreateAttribute,
CreateEntity,
CreateRule,
CreateRuleContext,
CreateTriple,
EntityType,
ExtractModel,
ResultStatus,
SpatialAssumption,
TemporalAssumption,
TextExtract,
WorldAssumption,
)
from zerolink_client.types import File | 7,234 | The Foundation knowledge graph is a collection of entities and relations
that are common to all knowledge graphs and form the foundation of
commonsense reasoning. The graph is read-only.
"""
def __init__(self) -> None:
self.name = "Foundation"
self.session_id = None
def entity(self, name: str) -> Entity:
"""
Get a foundation entity by name.
"""
ents = list(find_entity(name))
if len(ents) == 0:
raise ValueError(f"Entity '{name}' not found")
else:
return ents[0]
def entity_id(self, id: str) -> Entity:
"""
Get a foundation entity by id.
"""
e = req.get_entity_id(id)
desc = e.description if e.description else None
return Entity(e.id, e.entity, desc)
def property(self, name: str) -> Relation:
"""
Get a foundation property by name.
"""
rels = list(find_relation(name))
if len(rels) == 0:
raise ValueError(f"Relation '{name}' not found")
else:
return rels[0]
def attribute(self, name: str) -> Relation:
"""
Get a foundation attribute by name.
"""
rels = list(find_relation(name))
if len(rels) == 0:
raise ValueError(f"Relation '{name}' not found")
else:
return rels[0]
def reasoner(self, name: str):
"""
Get a foundation reasoner by name.
"""
reasoners = set(req.get_reasoners(name))
if name in reasoners:
return name
else:
raise ValueError(f"Reasoner '{name}' not found")
class UserKnowledgeGraph(KnowledgeGraph):
"""
A knowledge graph is a collection of entities and relations.
"""
session_id: int
def __init__(self, session_id: int, name: str):
self.name = name
self.session_id = session_id
@property
def entities(self) -> List[Entity]:
rep = req.get_session_entities_list(self.session_id)
return [
Entity(id=e.id, name=e.entity, description=e.desc, kg=self) for e in rep
]
@property
def facts(self) -> List[Fact]:
rep = req.get_session_facts_list(self.session_id)
return [
Fact(id=f.id, subject=f.subject, predicate=f.predicate, object=f.object_)
for f in rep
]
def add_entity(
self,
name: str,
type: Optional[Union[EntityType, Entity, str]] = None,
description: Optional[str] = None,
) -> Entity:
if isinstance(type, EntityType):
body = CreateEntity(name, entity_type=type)
elif isinstance(type, Entity):
body = CreateEntity(name, entity_str=type.id)
elif isinstance(type, str):
body = CreateEntity(name, entity_str=type)
elif type is None:
body = CreateEntity(name)
else:
raise ValueError("Invalid type")
rep = req.add_entity(self.session_id, body)
return Entity(id=rep.id, name=rep.entity, description=None, kg=self)
def get_or_add_entity(
self, name: str, type: Union[EntityType, Entity, str]
) -> Entity:
raise NotImplementedError()
def add_relation(self, *args, **kwargs):
"""
Add a relation to a user knowledge graph.
"""
raise NotImplementedError()
def add_attribute(self, e: Entity, a: str, v: Any):
"""
Add an attribute to a user knowledge graph.
"""
value = attr.value(v)
|
# ------------------------------------------------------------------------
# Entities
# ------------------------------------------------------------------------
class Entity(object):
"""
An entity is an object/node in a knowledge graph.
"""
def __init__(
self,
id: str,
name: str,
description: Optional[str] = None,
kg: Optional["KnowledgeGraph"] = None,
):
self.id = id
self.name = name
self.description = description
self.kg = kg
def instance(self, class_: "Entity") -> "Fact":
"""
Add a fact that this entity is an instance of a class.
"""
if self.kg:
return self.kg.add_fact(self, "instance of", class_)
else:
raise ValueError("Not attached to a knowledge graph")
def is_a(self, class_: "Entity") -> "Fact":
"""
Add a fact that this entity is an instance of a class.
"""
return self.instance(class_)
def subclass(self, superclass: "Entity") -> "Fact":
"""
Add a fact that this entity is a subclass of a superclass.
"""
if self.kg:
return self.kg.add_fact(self, "subclass of", superclass)
else:
raise ValueError("Not attached to a knowledge graph")
def is_type(self, type_: "Entity") -> "Fact":
"""
Add a fact that this entity is a subclass of a superclass.
"""
if self.kg:
return self.kg.add_fact(self, "is subclass", type_)
else:
raise ValueError("Not attached to a knowledge graph")
def quality(self, quality: "Entity") -> "Fact":
"""
Add a fact that this entity has a quality.
"""
if self.kg:
return self.kg.add_fact(self, "has quality", quality)
else:
raise ValueError("Not attached to a knowledge graph")
def characteristic(self, characteristic: "Entity") -> "Fact":
"""
Add a fact that this entity has a characteristic.
"""
if self.kg:
return self.kg.add_fact(self, "has characteristic", characteristic)
else:
raise ValueError("Not attached to a knowledge graph")
def part_of(self, whole: "Entity") -> "Fact":
"""
Add a fact that this entity is part of another entity.
"""
if self.kg:
return self.kg.add_fact(self, "part of", whole)
else:
raise ValueError("Not attached to a knowledge graph")
def add_fact(self, predicate: str, object: "Entity") -> "Fact":
"""
Add an arbitrary fact about this entity.
"""
if self.kg:
return self.kg.add_fact(self, predicate, object)
else:
raise ValueError("Not attached to a knowledge graph")
def add_attribute(self, attribute: str, value: Any) -> None:
"""
Add an attribute to this entity.
"""
if self.kg:
self.kg.add_attribute(self, attribute, value)
else:
raise ValueError("Not attached to a knowledge graph")
@property
def user_entity(self) -> bool:
# If the id starts with EU then it is a user entity, otherwise it is a
# foundation entity
return self.id.startswith("EU")
def ontology(self) -> dict[str, Any]:
return ontology(self.id)
def __str__(self) -> str:
return f"{self.name} : ({self.id}) - {self.description}"
def __repr__(self) -> str:
return f'<Entity id="{self.id}" name="{self.name}" description="{self.description}">'
class Relation(object):
"""
A relation is an edge in a knowledge graph.
"""
def __init__(self, id: str, name: str):
self.id = id
self.name = name
def __str__(self):
return f"{self.name} : ({self.id})"
def __repr__(self):
return f'<Relation id="{self.id}" name="{self.name}">'
class Fact(object):
def __init__(self, id: str, subject: str, predicate: str, object: str, kg=None):
self.id = id
self.subject = subject
self.predicate = predicate
self.object = object
self.kg = kg
def __str__(self) -> str:
return f"{self.subject} {self.predicate} {self.object}"
def __rep__(self) -> str:
return (
f'<Fact "{self.subject}" "{self.predicate}" "{self.object}" id="{self.id}">'
)
# ------------------------------------------------------------------------
# Knowledge Graph
# ------------------------------------------------------------------------
class KnowledgeGraph(object):
name: str
session_id: Optional[int]
def __repr__(self):
return f'<KnowledgeGraph name="{self.name}" session_id="{self.session_id}">'
def ask(
self,
question: str,
spatial: SpatialAssumption = SpatialAssumption.EARTH,
temporal: TemporalAssumption = TemporalAssumption.CURRENT,
world: WorldAssumption = WorldAssumption.PARTIAL,
context: ContextAssumption = ContextAssumption.GLOBAL,
reasoners: Optional[list[str]] = None,
dump_interpretation: bool = True,
**kwargs,
) -> "Result":
assumps = {
"spatial": spatial,
"temporal": temporal,
"world": world,
"context": context,
}
rep = req.ask_question(self.session_id, question, assumps, **kwargs)
if rep.query and dump_interpretation:
print("Interpretation:")
print(json.dumps(rep.query.to_dict(), indent=4))
if rep is None:
return Result(data=[], status=ResultStatus.EMPTY)
if rep.msg == "Found the answer":
return Result(data=rep.answers, status=ResultStatus.ANSWERS)
else:
return Result(data=[], status=ResultStatus.EMPTY)
def add_entity(
self,
name: str,
type: Optional[Union[EntityType, Entity, str]] = None,
description: Optional[str] = None,
) -> Entity:
raise ValueError("Cannot add entity to a read-only knowledge graph")
def add_fact(self, s: Entity, p: str, o: Entity) -> Fact:
raise ValueError("Cannot add fact to a read-only knowledge graph")
def add_rule(self, rule: str, ctx: Optional[dict[str, Any]] = None) -> str:
raise ValueError("Cannot add rule to a read-only knowledge graph")
def add_attribute(self, e: Entity, a: str, v: Any) -> Fact:
raise ValueError("Cannot add attribute to a read-only knowledge graph")
@property
def entities(self) -> List[Entity]:
raise NotImplementedError()
@property
def facts(self) -> List[Fact]:
raise NotImplementedError()
class Foundation(KnowledgeGraph):
"""
The Foundation knowledge graph is a collection of entities and relations
that are common to all knowledge graphs and form the foundation of
commonsense reasoning. The graph is read-only.
"""
def __init__(self) -> None:
self.name = "Foundation"
self.session_id = None
def entity(self, name: str) -> Entity:
"""
Get a foundation entity by name.
"""
ents = list(find_entity(name))
if len(ents) == 0:
raise ValueError(f"Entity '{name}' not found")
else:
return ents[0]
def entity_id(self, id: str) -> Entity:
"""
Get a foundation entity by id.
"""
e = req.get_entity_id(id)
desc = e.description if e.description else None
return Entity(e.id, e.entity, desc)
def property(self, name: str) -> Relation:
"""
Get a foundation property by name.
"""
rels = list(find_relation(name))
if len(rels) == 0:
raise ValueError(f"Relation '{name}' not found")
else:
return rels[0]
def attribute(self, name: str) -> Relation:
"""
Get a foundation attribute by name.
"""
rels = list(find_relation(name))
if len(rels) == 0:
raise ValueError(f"Relation '{name}' not found")
else:
return rels[0]
def reasoner(self, name: str):
"""
Get a foundation reasoner by name.
"""
reasoners = set(req.get_reasoners(name))
if name in reasoners:
return name
else:
raise ValueError(f"Reasoner '{name}' not found")
class UserKnowledgeGraph(KnowledgeGraph):
"""
A knowledge graph is a collection of entities and relations.
"""
session_id: int
def __init__(self, session_id: int, name: str):
self.name = name
self.session_id = session_id
@property
def entities(self) -> List[Entity]:
rep = req.get_session_entities_list(self.session_id)
return [
Entity(id=e.id, name=e.entity, description=e.desc, kg=self) for e in rep
]
@property
def facts(self) -> List[Fact]:
rep = req.get_session_facts_list(self.session_id)
return [
Fact(id=f.id, subject=f.subject, predicate=f.predicate, object=f.object_)
for f in rep
]
def add_entity(
self,
name: str,
type: Optional[Union[EntityType, Entity, str]] = None,
description: Optional[str] = None,
) -> Entity:
if isinstance(type, EntityType):
body = CreateEntity(name, entity_type=type)
elif isinstance(type, Entity):
body = CreateEntity(name, entity_str=type.id)
elif isinstance(type, str):
body = CreateEntity(name, entity_str=type)
elif type is None:
body = CreateEntity(name)
else:
raise ValueError("Invalid type")
rep = req.add_entity(self.session_id, body)
return Entity(id=rep.id, name=rep.entity, description=None, kg=self)
def get_or_add_entity(
self, name: str, type: Union[EntityType, Entity, str]
) -> Entity:
raise NotImplementedError()
def add_relation(self, *args, **kwargs):
"""
Add a relation to a user knowledge graph.
"""
raise NotImplementedError()
def add_attribute(self, e: Entity, a: str, v: Any):
"""
Add an attribute to a user knowledge graph.
"""
value = attr.value(v) | body = CreateAttribute( | 4 | 2023-12-03 07:50:04+00:00 | 12k |
amazon-science/discal | run_summarization_w_calib.py | [
{
"identifier": "CustomSeq2SeqTrainer",
"path": "calibration_utils.py",
"snippet": "class CustomSeq2SeqTrainer(Seq2SeqTrainer):\n '''\n This is a custom seq2seq training, supporting an EMA-based generator for calibaration.\n '''\n def __init__(self, teacher_model=None, calibration_params=None, **kwargs,):\n super().__init__(**kwargs)\n\n # teacher model\n self.teacher_model = teacher_model\n\n # calibration parameters\n self.calibration = calibration_params['calibration']\n self.num_beams = calibration_params['num_beams']\n self.num_candidate_beams = calibration_params['num_candidate_beams']\n self.diverse_penalty= calibration_params['disversity_penalty']\n self.min_length = calibration_params['min_length']\n self.max_length = calibration_params['max_length']\n self.length_penalty = calibration_params['length_penalty']\n self.abstract_weight = calibration_params['abstract_weight']\n self.mle_weight = calibration_params['mle_weight']\n self.calibration_weight = calibration_params['calibration_weight']\n\n\n def compute_loss(self, model, inputs):\n\n ## pseudo and goldlen labels\n gt_labels = inputs.pop('labels')\n gt_label_ids = inputs.pop('decoder_input_ids')\n decoded_gt_labels = self.tokenizer.batch_decode(gt_label_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n ####\n\n # encoding first.\n attention_mask = inputs[\"input_ids\"] != self.model.config.pad_token_id\n encoder_outputs = model.module.get_encoder()(\n input_ids = inputs[\"input_ids\"],\n attention_mask = attention_mask\n )\n \n decoded_gen_summaries = None\n if self.calibration and self.teacher_model is not None:\n # if calibration is turned on.\n # candidate summary generation using a teacher as generator \n gen_summaries = self.teacher_model.generate(\n input_ids=inputs['input_ids'], \n attention_mask=attention_mask,\n num_return_sequences=self.num_candidate_beams, \n num_beams=self.num_candidate_beams,\n num_beam_groups=self.num_candidate_beams, \n diversity_penalty=self.diverse_penalty,\n max_length=self.max_length, \n min_length=self.min_length,\n no_repeat_ngram_size=3,\n length_penalty=self.length_penalty,\n early_stopping=True\n ) \n \n # decoding to strings\n decoded_gen_summaries = [self.tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in gen_summaries]\n\n # decoder inference with {gold ref, candidate ref}\n calibration_inputs = {\n 'inputs': inputs,\n 'attention_mask': attention_mask,\n 'encoder_outputs': encoder_outputs,\n 'decoded_gold': decoded_gt_labels,\n 'decoded_candidates': decoded_gen_summaries,\n }\n output, labels = self.calibrated_inference(model, **calibration_inputs)\n\n # loss computation\n # 1/ target mle loss\n # index '0' is the logit for gt labels, other indices are for the logits of candidiate references\n _target_index = 1 # best label\n logits = output[\"raw_outputs\"][:, _target_index]\n gold = labels[:, _target_index, :]\n mle_loss = self.target_loss(logits, gold)\n\n # 2/ ranking loss\n # we refer to BRIO papers \n similarity, gold_similarity = output['score'], output['summary_score']\n ranking_loss = RankingLoss(similarity, gold_similarity)\n\n # combined loss\n loss = self.mle_weight * mle_loss + self.calibration_weight * ranking_loss \n #print('mle:', mle_loss, ', rank:', ranking_loss, self.mle_weight, self.calibration_weight)\n \n else:\n # if calibration is not turned on\n calibration_inputs = {\n 'inputs': inputs,\n 'attention_mask': attention_mask,\n 'encoder_outputs': encoder_outputs,\n 'decoded_gold': decoded_gt_labels\n }\n\n # decoder inference with only gold reference\n output, labels = self.uncalibrated_inference(model, **calibration_inputs)\n\n # get mle loss\n logits = output[\"raw_outputs\"][:, 0]\n gold = labels[:, 0, :]\n mle_loss = self.target_loss(logits, gold)\n loss = mle_loss\n\n return loss\n \n \n def calibrated_inference(self, model, inputs, attention_mask, encoder_outputs, decoded_gold, decoded_candidates\n , require_gold=True):\n '''\n Performs decoding inference with calibration.\n model: training model\n inputs: source input (doc)\n attention mask: encoder attention mask as input to decoder\n encoder_outputs: encoder outputs for the input\n decoded_gold: decoded golden reference, which is used to compute rouge score with psuedo references\n decoded_candidates: decoded candidate (pusedo) references from the teacher model, which is used to compute rouge and abstractivenss scores\n require gold: whether to return the logit of gold reference from the BART decoder, which can be used to compute final loss for optimization\n '''\n\n # train mode: we feed candidate references together with gold reference\n # non-train mode: no need for providing candidate references.\n if decoded_gold is not None:\n train_mode = True\n else:\n train_mode = False\n\n batch_size = inputs['input_ids'].shape[0]\n decoded_src = self.tokenizer.batch_decode(inputs['input_ids'], skip_special_tokens=True)\n if train_mode:\n gen_summaries = []\n\n # sorting the candidate reference based on the specified scores (e.g., rouge)\n for s_idx in range(batch_size):\n src_doc = decoded_src[s_idx]\n gt_label = decoded_gold[s_idx].strip() \n scored_summaries = []\n for ref_idx in range(self.num_candidate_beams):\n ref_label = decoded_candidates[s_idx * self.num_candidate_beams + ref_idx].strip()\n score = compute_score(src_doc, gt_label, ref_label)\n scored_summaries.append((score, ref_label))\n \n # normalize & merge scores\n agg_scores = {}\n for (score, summary) in scored_summaries:\n for key, value in score.items():\n if key not in agg_scores:\n agg_scores[key] = []\n agg_scores[key].append(value)\n \n for key, scores in agg_scores.items():\n _sum = float(sum(scores))\n if _sum != 0:\n scores = [score/_sum for score in scores]\n agg_scores[key] = np.array(scores)\n \n final_scores = None\n for key, scores in agg_scores.items():\n if key == 'rouge':\n type_weight = 1.0 - self.abstract_weight\n else:\n type_weight = self.abstract_weight\n if final_scores is None:\n final_scores = (scores * type_weight)\n else:\n final_scores += (scores * type_weight)\n scored_summaries = [(final_score, summary) for final_score, (_, summary) in zip(final_scores, scored_summaries)]\n scored_summaries = sorted(scored_summaries, key=lambda tup: tup[0], reverse=True)\n\n scored_summaries = [summary for (score, summary) in scored_summaries]\n merged_summaries = [gt_label]\n merged_summaries.extend(scored_summaries)\n\n # gt label first, and then sorted candidate summaries (in desencding order, high -> low)\n gen_summaries.extend(merged_summaries)\n decoded_candidates = gen_summaries\n\n else:\n # for only gold reference (we turn off calibration during evaluation)\n gen_summaries = []\n for s_idx in range(batch_size):\n scored_summaries = []\n for ref_idx in range(self.num_beams):\n ref_label = decoded_candidates[s_idx * self.num_beams + ref_idx].strip()\n gen_summaries.append(ref_label)\n decoded_candidates = gen_summaries\n\n # tokenizing the candidates and golden reference strings -> decoder inputs\n with self.tokenizer.as_target_tokenizer(): \n encoded_gen_summaries = self.tokenizer(decoded_candidates, max_length=self.max_length, padding=\"max_length\", truncation=True)\n gen_decoder_input_labels = torch.tensor(encoded_gen_summaries[\"input_ids\"]).to(inputs['input_ids'])\n gen_decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=gen_decoder_input_labels)\n\n # reshape to inference with all the references in a batch\n cand_num = self.num_candidate_beams + 1 if train_mode else self.num_beams\n gen_decoder_input_ids = gen_decoder_input_ids.view(batch_size, cand_num, -1)\n gen_decoder_input_labels = gen_decoder_input_labels.view(batch_size, cand_num, -1)\n cand_mask = gen_decoder_input_labels != self.model.config.pad_token_id\n\n # interleaving the encoding outputs\n encoder_hidden_states = encoder_outputs[0]\n encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, cand_num, dim=0)\n attention_mask = torch.repeat_interleave(attention_mask, cand_num, dim=0)\n decoder_input_ids = gen_decoder_input_ids.view(-1, gen_decoder_input_ids.size(-1))\n decoder_attention_mask = cand_mask.view(-1, cand_mask.size(-1))\n\n # with label smoothing.\n new_inputs = {k: v for k, v in inputs.items()}\n if \"labels\" in new_inputs:\n new_inputs.pop(\"labels\")\n new_inputs[\"encoder_outputs\"] = [encoder_hidden_states]\n new_inputs[\"attention_mask\"] = attention_mask\n new_inputs[\"decoder_input_ids\"] = decoder_input_ids\n # fine-tuning and calibration, enable this \"bi-directional attention - we see the next tokens as well\"\n new_inputs[\"decoder_attention_mask\"] = decoder_attention_mask\n\n outputs = model(**new_inputs)\n\n # outputs consisting of \"logits\" and \"scores for ranking loss\"\n output = self.model.score_forward(outputs,\n batch_size=batch_size,\n decoder_labels=gen_decoder_input_labels,\n length_penalty=self.length_penalty,\n require_gold=require_gold,\n adding=0)\n\n return output, gen_decoder_input_labels\n\n\n def uncalibrated_inference(self, model, inputs, attention_mask, encoder_outputs, decoded_gold):\n '''\n Decoder inference for only golden reference.\n '''\n\n batch_size = inputs['input_ids'].shape[0]\n gen_summaries = []\n for s_idx in range(batch_size):\n gt_label = decoded_gold[s_idx].strip()\n gen_summaries.append(gt_label)\n decoded_gen_summaries = gen_summaries\n\n with self.tokenizer.as_target_tokenizer(): \n encoded_gen_summaries = self.tokenizer(decoded_gen_summaries, max_length=self.max_length, padding=\"max_length\", truncation=True)\n gen_decoder_input_labels = torch.tensor(encoded_gen_summaries[\"input_ids\"]).to(inputs['input_ids'])\n gen_decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=gen_decoder_input_labels)\n\n cand_num = 1\n gen_decoder_input_ids = gen_decoder_input_ids.view(batch_size, cand_num, -1)\n gen_decoder_input_labels = gen_decoder_input_labels.view(batch_size, cand_num, -1)\n cand_mask = gen_decoder_input_labels != self.model.config.pad_token_id\n\n encoder_hidden_states = encoder_outputs[0]\n encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, cand_num, dim=0)\n attention_mask = torch.repeat_interleave(attention_mask, cand_num, dim=0)\n decoder_input_ids = gen_decoder_input_ids.view(-1, gen_decoder_input_ids.size(-1))\n decoder_attention_mask = cand_mask.view(-1, cand_mask.size(-1))\n\n new_inputs = {k: v for k, v in inputs.items()}\n if \"labels\" in new_inputs:\n new_inputs.pop(\"labels\")\n new_inputs[\"encoder_outputs\"] = [encoder_hidden_states]\n new_inputs[\"attention_mask\"] = attention_mask\n new_inputs[\"decoder_input_ids\"] = decoder_input_ids\n # for scrach training, disable this.\n #inputs[\"decoder_attention_mask\"] = decoder_attention_mask\n \n outputs = model(**new_inputs)\n output = self.model.score_forward(outputs,\n batch_size=batch_size,\n decoder_labels=gen_decoder_input_labels,\n length_penalty=self.length_penalty,\n require_gold=True,\n adding=0)\n\n return output, gen_decoder_input_labels\n\n\n def prediction_step(\n self,\n model: torch.nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on `model` using `inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (`nn.Module`):\n The model to evaluate.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (`bool`):\n Whether or not to return the loss only.\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n\n if not self.args.predict_with_generate or prediction_loss_only:\n return super().prediction_step(\n model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys\n )\n\n has_labels = \"labels\" in inputs\n inputs = self._prepare_inputs(inputs)\n\n # XXX: adapt synced_gpus for fairscale as well\n # Priority (handled in generate):\n # gen_kwargs > model.generation_config > default GenerationConfig()\n gen_kwargs = self._gen_kwargs.copy()\n if gen_kwargs.get(\"max_length\") is None and gen_kwargs.get(\"max_new_tokens\") is None:\n gen_kwargs[\"max_length\"] = self.model.config.max_length\n gen_kwargs[\"num_beams\"] = (\n gen_kwargs[\"num_beams\"] if gen_kwargs.get(\"num_beams\") is not None else self.model.config.num_beams\n )\n default_synced_gpus = True if is_deepspeed_zero3_enabled() else False\n gen_kwargs[\"synced_gpus\"] = (\n gen_kwargs[\"synced_gpus\"] if gen_kwargs.get(\"synced_gpus\") is not None else default_synced_gpus\n )\n\n # TODO (Joao): the following line is needed to keep a consistent result on SQUAD. Ideally, we should not block\n # users from preparing a dataset with `decoder_input_ids`.\n inputs = {k: v for k, v in inputs.items() if k != \"decoder_input_ids\"}\n\n generated_tokens = self.model.generate(\n input_ids=inputs['input_ids'], \n attention_mask=inputs['attention_mask'],\n num_beams=self.num_beams,\n max_length=self.max_length,\n min_length=self.min_length,\n no_repeat_ngram_size=3,\n length_penalty=self.length_penalty,\n early_stopping=True\n ) \n\n # Temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop\n # TODO: remove this hack when the legacy code that initializes generation_config from a model config is\n # removed in https://github.com/huggingface/transformers/blob/98d88b23f54e5a23e741833f1e973fdf600cc2c5/src/transformers/generation/utils.py#L1183\n if self.model.generation_config._from_model_config:\n self.model.generation_config._from_model_config = False\n\n # Retrieves GenerationConfig from model.generation_config\n gen_config = self.model.generation_config\n # in case the batch is shorter than max length, the output should be padded\n if generated_tokens.shape[-1] < gen_config.max_length:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length)\n elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1)\n\n with torch.no_grad():\n if has_labels:\n with self.compute_loss_context_manager():\n outputs = model(**inputs)\n if self.label_smoother is not None:\n loss = self.label_smoother(outputs, inputs[\"labels\"]).mean().detach()\n else:\n loss = (outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]).mean().detach()\n else:\n loss = None\n\n if self.args.prediction_loss_only:\n return loss, None, None\n\n if has_labels:\n labels = inputs[\"labels\"]\n if labels.shape[-1] < gen_config.max_length:\n labels = self._pad_tensors_to_max_len(labels, gen_config.max_length)\n elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1:\n labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1)\n else:\n labels = None\n \n return loss, generated_tokens, labels\n \n\n def target_loss(self, model_output, labels, shift_labels=False):\n # this is orignal loss function for seq2seq training with label smoothing.\n\n epsilon = self.args.label_smoothing_factor # 0.1 default\n ignore_index = self.model.config.pad_token_id\n\n logits = model_output\n if shift_labels:\n logits = logits[..., :-1, :].contiguous()\n labels = labels[..., 1:].contiguous()\n\n log_probs = -torch.nn.functional.log_softmax(logits, dim=-1)\n if labels.dim() == log_probs.dim() - 1:\n labels = labels.unsqueeze(-1)\n\n padding_mask = labels.eq(ignore_index)\n\n # In case the ignore_index is -100, the gather will fail, so we replace labels by 0. The padding_mask\n # will ignore them in any case.\n labels = torch.clamp(labels, min=0)\n nll_loss = log_probs.gather(dim=-1, index=labels)\n # works for fp16 input tensor too, by internally upcasting it to fp32\n smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32)\n\n nll_loss.masked_fill_(padding_mask, 0.0)\n smoothed_loss.masked_fill_(padding_mask, 0.0)\n\n # Take the mean over the label dimensions, then divide by the number of active elements (i.e. not-padded):\n num_active_elements = padding_mask.numel() - padding_mask.long().sum()\n nll_loss = nll_loss.sum() / num_active_elements\n smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1])\n\n return (1 - epsilon) * nll_loss + epsilon * smoothed_loss\n\n\n def training_step(self, model: torch.nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:\n\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n #if is_sagemaker_mp_enabled():\n # loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)\n # return loss_mb.reduce_mean().detach().to(self.args.device)\n\n with self.compute_loss_context_manager():\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:\n # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.do_grad_scaling:\n self.scaler.scale(loss).backward()\n elif self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.deepspeed:\n # loss gets scaled under gradient_accumulation_steps in deepspeed\n loss = self.deepspeed.backward(loss)\n else:\n loss.backward()\n\n return loss.detach()"
},
{
"identifier": "get_teacher_model",
"path": "distillation_utils.py",
"snippet": "def get_teacher_model(model_args, training_args):\n \n # Model\n base_config = BartConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.teacher_model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n # for the evaluation using checkpoint model -> attention temperatured pseudo label\n base_config.temperature_scaling = training_args.temperature_scaling\n base_config.dynamic_temperature = training_args.dynamic_temperature\n\n model = BartForConditionalGeneration.from_pretrained(\n model_args.teacher_model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.teacher_model_name_or_path),\n config=base_config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model.config.task_specific_params['summarization']['max_length'] = training_args.generation_max_length\n model.config.task_specific_params['summarization']['min_length'] = training_args.generation_min_length\n model.config.task_specific_params['summarization']['num_beams'] = training_args.generation_num_beams\n model.config.task_specific_params['summarization']['length_penalty'] = training_args.generation_length_penalty\n model.config.task_specific_params['summarization']['no_repeat_ngram_size'] = training_args.generation_no_repeat_ngram_size\n model.config.max_length = training_args.generation_max_length\n model.config.min_length = training_args.generation_min_length\n model.config.num_beams = training_args.generation_num_beams\n model.config.length_penalty = training_args.generation_length_penalty\n model.config.no_repeat_ngram_size = training_args.generation_no_repeat_ngram_size\n\n return model"
},
{
"identifier": "get_student_model",
"path": "distillation_utils.py",
"snippet": "def get_student_model(model_args, training_args):\n \n # Model\n base_config = BartConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n # for the evaluation using checkpoint model -> attention temperatured pseudo label\n base_config.dynamic_temperature = False\n base_config.temperature_scaling = 1.0\n\n if training_args.shrink_type is None: # Need to erase shrink tpye to load dual head models.\n\n model = BartForConditionalGeneration.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=base_config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model.config.task_specific_params['summarization']['max_length'] = training_args.generation_max_length\n model.config.task_specific_params['summarization']['min_length'] = training_args.generation_min_length\n model.config.task_specific_params['summarization']['num_beams'] = training_args.generation_num_beams\n model.config.task_specific_params['summarization']['length_penalty'] = training_args.generation_length_penalty\n model.config.task_specific_params['summarization']['no_repeat_ngram_size'] = training_args.generation_no_repeat_ngram_size\n model.config.max_length = training_args.generation_max_length\n model.config.min_length = training_args.generation_min_length\n model.config.num_beams = training_args.generation_num_beams\n model.config.length_penalty = training_args.generation_length_penalty\n model.config.no_repeat_ngram_size = training_args.generation_no_repeat_ngram_size\n\n return model\n \n else:\n \n # single head teacher, and copy to dual head for initialization\n model = BartForConditionalGeneration.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=base_config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n base_config = base_config.to_dict()\n\n if '12-12' in training_args.shrink_type:\n enc_step, dec_step = 1, 1\n elif '12-6' in training_args.shrink_type:\n enc_step, dec_step = 1, 2\n elif '12-3' in training_args.shrink_type:\n enc_step, dec_step = 1, 4\n elif '12-1' in training_args.shrink_type:\n enc_step, dec_step = 1, 12\n elif '6-6' in training_args.shrink_type:\n enc_step, dec_step = 2, 2\n elif '6-3' in training_args.shrink_type:\n enc_step, dec_step = 2, 4\n elif '6-1' in training_args.shrink_type:\n enc_step, dec_step = 2, 12\n\n base_config['encoder_layers'] //= enc_step\n base_config['decoder_layers'] //= dec_step\n base_config['max_length'] = training_args.generation_max_length\n base_config['min_length'] = training_args.generation_min_length\n base_config['num_beams'] = training_args.generation_num_beams\n base_config['length_penalty'] = training_args.generation_length_penalty\n base_config['no_repeat_ngram_size'] = training_args.generation_no_repeat_ngram_size\n base_config['task_specific_params']['summarization']['max_length'] = training_args.generation_max_length\n base_config['task_specific_params']['summarization']['min_length'] = training_args.generation_min_length\n base_config['task_specific_params']['summarization']['num_beams'] = training_args.generation_num_beams\n base_config['task_specific_params']['summarization']['length_penalty'] = training_args.generation_length_penalty\n base_config['task_specific_params']['summarization']['no_repeat_ngram_size'] = training_args.generation_no_repeat_ngram_size\n\n target_config = BartConfig.from_dict(base_config)\n shrink_model = BartForConditionalGeneration(target_config)\n\n def _copy_weight_shared_layer(base, target):\n target.load_state_dict(base.state_dict())\n\n def _copy_weight_encoding_layer(base, target):\n target.embed_tokens.load_state_dict(base.embed_tokens.state_dict())\n target.embed_positions.load_state_dict(base.embed_positions.state_dict())\n \n base_layers = [layer for layer in base.layers]\n target_layers = [layer for layer in target.layers]\n for i in range(len(target_layers)):\n target_layers[i].load_state_dict(base_layers[enc_step * i].state_dict())\n\n def _copy_weight_decoding_layer(base, target):\n target.embed_tokens.load_state_dict(base.embed_tokens.state_dict())\n target.embed_positions.load_state_dict(base.embed_positions.state_dict())\n \n base_layers = [layer for layer in base.layers]\n target_layers = [layer for layer in target.layers]\n for i in range(len(target_layers)):\n target_layers[i].load_state_dict(base_layers[dec_step * i].state_dict())\n\n def _copy_weight_head_layer(base, target):\n target.load_state_dict(base.state_dict())\n\n _copy_weight_shared_layer(model.model.shared, shrink_model.model.shared)\n _copy_weight_encoding_layer(model.model.encoder, shrink_model.model.encoder)\n _copy_weight_decoding_layer(model.model.decoder, shrink_model.model.decoder)\n _copy_weight_head_layer(model.lm_head, shrink_model.lm_head)\n\n return shrink_model"
}
] | import logging
import os
import sys
import datasets
import nltk # Here to have a nice missing dependency error message early on
import torch
import transformers
import numpy as np
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset, load_metric
from filelock import FileLock
from transformers import (
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed
)
from transformers.file_utils import is_offline_mode
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from calibration_utils import CustomSeq2SeqTrainer
from distillation_utils import get_teacher_model, get_student_model | 9,677 | return model_inputs
column_names = raw_datasets["train"].column_names
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
#with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
column_names = raw_datasets["validation"].column_names
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
#with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
column_names = raw_datasets["test"].column_names
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
#with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
# Metric
metric = load_metric("rouge")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
# Configuration for calibration
calibration_params = {}
calibration_params['num_beams'] = training_args.generation_num_beams
calibration_params['num_candidate_beams'] = training_args.num_candidate_beams
calibration_params['calibration'] = training_args.calibration
calibration_params['min_length'] = training_args.generation_min_length
calibration_params['max_length'] = training_args.generation_max_length
calibration_params['length_penalty'] = training_args.generation_length_penalty
calibration_params['mle_weight'] = training_args.mle_weight
calibration_params['calibration_weight'] = training_args.calibration_weight
calibration_params['disversity_penalty'] = training_args.disversity_penalty
calibration_params['abstract_weight'] = training_args.abstract_weight
calibration_params['normalize'] = True
calibration_params['score_mode'] = 'log'
training_args.load_best_model_at_end = False
# Need this line for keeping all the columns
training_args.remove_unused_columns=False
# Initialize our Trainer
| """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.9.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models (student)"}
)
teacher_model_name_or_path: Optional[str] = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models (teacher)"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
summary_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics (rouge) on "
"(a jsonlines or csv file)."
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on " "(a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
eval_type: Optional[str] = field(
default='plain',
metadata={
"help": "spans_combined: splitting prediction into ISSUE, OUTCOME, NEXTSTEP spans for evaluation; "
"plain: filtering out ISSUE, OUTCOME, NEXTSTEP tokens, evaluating the entire summary text"
},
)
early_stopping_patience: Optional[int] = field(
default=5,
metadata={
"help": "the training will be stopped after this many checkpoints with no improvement"
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
@dataclass
class BARTrainingArguments(Seq2SeqTrainingArguments):
"""
sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use a `sortish sampler` or not. Only possible if the underlying datasets are `Seq2SeqDataset` for
now but will become generally available in the near future.
It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for
the training set.
predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use generate to calculate generative metrics (ROUGE, BLEU).
generation_max_length (:obj:`int`, `optional`):
The :obj:`max_length` to use on each evaluation loop when :obj:`predict_with_generate=True`. Will default to
the :obj:`max_length` value of the model configuration.
generation_num_beams (:obj:`int`, `optional`):
The :obj:`num_beams` to use on each evaluation loop when :obj:`predict_with_generate=True`. Will default to the
:obj:`num_beams` value of the model configuration.
"""
predict_with_generate: bool = field(
default=True, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
)
generation_min_length: Optional[int] = field(
default=10,
metadata={
"help": "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
},
)
generation_max_length: Optional[int] = field(
default=360,
metadata={
"help": "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
},
)
generation_num_beams: Optional[int] = field(
default=6,
metadata={
"help": "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
},
)
generation_length_penalty: Optional[float] = field(
default=0.0,
metadata={
"help": "Value > 0.0 promotes longer sequences in beam decoding, value < 0.0 promotes shorter ones"
},
)
generation_no_repeat_ngram_size: Optional[int] = field(
default=3,
metadata={
"help": "Is > 0, no ngram of this size will be repeated in the output sequences"
},
)
metric_for_best_model: Optional[str] = field(
default="loss",
metadata={
"help": "can be loss or Rouge score for some/all of the spans"
},
)
#### new arguments for calibration process
calibration: bool = field(
default=False,
metadata={"help": "Whether to use the calibration loss."},
)
num_candidate_beams: int = field(
default=6,
metadata={"help": "The number of candidate summaries used for probability calibration."},
)
disversity_penalty: float = field(
default=0.1,
metadata={"help": "penalty for diverse beamserch in pseudo summary generation."},
)
mle_weight: float = field(
default=0.01,
metadata={"help": "Weight of MLE Loss."},
)
calibration_weight: float = field(
default=1.0,
metadata={"help": "Weight of calibration loss."},
)
abstract_weight: float = field(
default=0.0,
metadata={"help": "Weight of abstractiveness."},
)
#### new arguments for distillation
shrink_type: Optional[str] = field(
default=None,
metadata={"help": "Types of distillation."},
)
dynamic_temperature: bool = field(
default=False,
metadata={"help": "Whether to use dynamic temperature scaling for the teacher model."},
)
temperature_scaling: float = field(
default=1.0,
metadata={"help": "Temperature scaling value for the teacher model."},
)
summarization_name_mapping = {
"amazon_reviews_multi": ("review_body", "review_title"),
"big_patent": ("description", "abstract"),
"cnn_dailymail": ("article", "highlights"),
"orange_sum": ("text", "summary"),
"pn_summary": ("article", "summary"),
"psc": ("extract_text", "summary_text"),
"samsum": ("dialogue", "summary"),
"thaisum": ("body", "summary"),
"xglue": ("news_body", "news_title"),
"xsum": ("document", "summary"),
"wiki_summary": ("article", "highlights"),
}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, BARTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full texts and the second column for the
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# teacher model
teacher_model = None
if model_args.teacher_model_name_or_path is not None:
teacher_model = get_teacher_model(model_args, training_args)
teacher_model.resize_token_embeddings(len(tokenizer))
if training_args.fp16:
teacher_model.half()
if torch.cuda.is_available():
teacher_model.to('cuda')
teacher_model.eval()
trainable_n_parameters = sum(p.numel() for p in teacher_model.parameters() if p.requires_grad)
total_n_parameters = sum(p.numel() for p in teacher_model.parameters())
print(teacher_model.config)
print(f'Model # total params: {total_n_parameters}')
print(f'Model # trainable params: {trainable_n_parameters}')
# student model
model = get_student_model(model_args, training_args)
trainable_n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
total_n_parameters = sum(p.numel() for p in model.parameters())
print(model.config)
print(f'Model # total params: {total_n_parameters}')
print(f'Model # trainable params: {trainable_n_parameters}')
model.resize_token_embeddings(len(tokenizer))
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if data_args.text_column is None:
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
text_column = data_args.text_column
if text_column not in column_names:
raise ValueError(
f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.summary_column is None:
summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
summary_column = data_args.summary_column
if summary_column not in column_names:
raise ValueError(
f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[summary_column]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
column_names = raw_datasets["train"].column_names
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
#with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
column_names = raw_datasets["validation"].column_names
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
#with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
column_names = raw_datasets["test"].column_names
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
#with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
# Metric
metric = load_metric("rouge")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
# Configuration for calibration
calibration_params = {}
calibration_params['num_beams'] = training_args.generation_num_beams
calibration_params['num_candidate_beams'] = training_args.num_candidate_beams
calibration_params['calibration'] = training_args.calibration
calibration_params['min_length'] = training_args.generation_min_length
calibration_params['max_length'] = training_args.generation_max_length
calibration_params['length_penalty'] = training_args.generation_length_penalty
calibration_params['mle_weight'] = training_args.mle_weight
calibration_params['calibration_weight'] = training_args.calibration_weight
calibration_params['disversity_penalty'] = training_args.disversity_penalty
calibration_params['abstract_weight'] = training_args.abstract_weight
calibration_params['normalize'] = True
calibration_params['score_mode'] = 'log'
training_args.load_best_model_at_end = False
# Need this line for keeping all the columns
training_args.remove_unused_columns=False
# Initialize our Trainer | trainer = CustomSeq2SeqTrainer( | 0 | 2023-11-30 19:49:45+00:00 | 12k |
JunMa11/UHNSeg-Quiz | nnunetv2/evaluation/find_best_configuration.py | [
{
"identifier": "default_num_processes",
"path": "nnunetv2/configuration.py",
"snippet": "ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low"
},
{
"identifier": "ensemble_crossvalidations",
"path": "nnunetv2/ensembling/ensemble.py",
"snippet": "def ensemble_crossvalidations(list_of_trained_model_folders: List[str],\n output_folder: str,\n folds: Union[Tuple[int, ...], List[int]] = (0, 1, 2, 3, 4),\n num_processes: int = default_num_processes,\n overwrite: bool = True) -> None:\n \"\"\"\n Feature: different configurations can now have different splits\n \"\"\"\n dataset_json = load_json(join(list_of_trained_model_folders[0], 'dataset.json'))\n plans_manager = PlansManager(join(list_of_trained_model_folders[0], 'plans.json'))\n\n # first collect all unique filenames\n files_per_folder = {}\n unique_filenames = set()\n for tr in list_of_trained_model_folders:\n files_per_folder[tr] = {}\n for f in folds:\n if not isdir(join(tr, f'fold_{f}', 'validation')):\n raise RuntimeError(f'Expected model output directory does not exist. You must train all requested '\n f'folds of the specified model.\\nModel: {tr}\\nFold: {f}')\n files_here = subfiles(join(tr, f'fold_{f}', 'validation'), suffix='.npz', join=False)\n if len(files_here) == 0:\n raise RuntimeError(f\"No .npz files found in folder {join(tr, f'fold_{f}', 'validation')}. Rerun your \"\n f\"validation with the --npz flag. Use nnUNetv2_train [...] --val --npz.\")\n files_per_folder[tr][f] = subfiles(join(tr, f'fold_{f}', 'validation'), suffix='.npz', join=False)\n unique_filenames.update(files_per_folder[tr][f])\n\n # verify that all trained_model_folders have all predictions\n ok = True\n for tr, fi in files_per_folder.items():\n all_files_here = set()\n for f in folds:\n all_files_here.update(fi[f])\n diff = unique_filenames.difference(all_files_here)\n if len(diff) > 0:\n ok = False\n print(f'model {tr} does not seem to contain all predictions. Missing: {diff}')\n if not ok:\n raise RuntimeError('There were missing files, see print statements above this one')\n\n # now we need to collect where these files are\n file_mapping = []\n for tr in list_of_trained_model_folders:\n file_mapping.append({})\n for f in folds:\n for fi in files_per_folder[tr][f]:\n # check for duplicates\n assert fi not in file_mapping[-1].keys(), f\"Duplicate detected. Case {fi} is present in more than \" \\\n f\"one fold of model {tr}.\"\n file_mapping[-1][fi] = join(tr, f'fold_{f}', 'validation', fi)\n\n lists_of_lists_of_files = [[fm[i] for fm in file_mapping] for i in unique_filenames]\n output_files_truncated = [join(output_folder, fi[:-4]) for fi in unique_filenames]\n\n image_reader_writer = plans_manager.image_reader_writer_class()\n maybe_mkdir_p(output_folder)\n label_manager = plans_manager.get_label_manager(dataset_json)\n\n if not overwrite:\n tmp = [isfile(i + dataset_json['file_ending']) for i in output_files_truncated]\n lists_of_lists_of_files = [lists_of_lists_of_files[i] for i in range(len(tmp)) if not tmp[i]]\n output_files_truncated = [output_files_truncated[i] for i in range(len(tmp)) if not tmp[i]]\n\n with multiprocessing.get_context(\"spawn\").Pool(num_processes) as pool:\n num_preds = len(lists_of_lists_of_files)\n _ = pool.starmap(\n merge_files,\n zip(\n lists_of_lists_of_files,\n output_files_truncated,\n [dataset_json['file_ending']] * num_preds,\n [image_reader_writer] * num_preds,\n [label_manager] * num_preds,\n [False] * num_preds\n )\n )\n\n shutil.copy(join(list_of_trained_model_folders[0], 'plans.json'), join(output_folder, 'plans.json'))\n shutil.copy(join(list_of_trained_model_folders[0], 'dataset.json'), join(output_folder, 'dataset.json'))"
},
{
"identifier": "accumulate_cv_results",
"path": "nnunetv2/evaluation/accumulate_cv_results.py",
"snippet": "def accumulate_cv_results(trained_model_folder,\n merged_output_folder: str,\n folds: Union[List[int], Tuple[int, ...]],\n num_processes: int = default_num_processes,\n overwrite: bool = True):\n \"\"\"\n There are a lot of things that can get fucked up, so the simplest way to deal with potential problems is to\n collect the cv results into a separate folder and then evaluate them again. No messing with summary_json files!\n \"\"\"\n\n if overwrite and isdir(merged_output_folder):\n shutil.rmtree(merged_output_folder)\n maybe_mkdir_p(merged_output_folder)\n\n dataset_json = load_json(join(trained_model_folder, 'dataset.json'))\n plans_manager = PlansManager(join(trained_model_folder, 'plans.json'))\n rw = plans_manager.image_reader_writer_class()\n shutil.copy(join(trained_model_folder, 'dataset.json'), join(merged_output_folder, 'dataset.json'))\n shutil.copy(join(trained_model_folder, 'plans.json'), join(merged_output_folder, 'plans.json'))\n\n did_we_copy_something = False\n for f in folds:\n expected_validation_folder = join(trained_model_folder, f'fold_{f}', 'validation')\n if not isdir(expected_validation_folder):\n raise RuntimeError(f\"fold {f} of model {trained_model_folder} is missing. Please train it!\")\n predicted_files = subfiles(expected_validation_folder, suffix=dataset_json['file_ending'], join=False)\n for pf in predicted_files:\n if overwrite and isfile(join(merged_output_folder, pf)):\n raise RuntimeError(f'More than one of your folds has a prediction for case {pf}')\n if overwrite or not isfile(join(merged_output_folder, pf)):\n shutil.copy(join(expected_validation_folder, pf), join(merged_output_folder, pf))\n did_we_copy_something = True\n\n if did_we_copy_something or not isfile(join(merged_output_folder, 'summary.json')):\n label_manager = plans_manager.get_label_manager(dataset_json)\n gt_folder = join(nnUNet_raw, plans_manager.dataset_name, 'labelsTr')\n if not isdir(gt_folder):\n gt_folder = join(nnUNet_preprocessed, plans_manager.dataset_name, 'gt_segmentations')\n compute_metrics_on_folder(gt_folder,\n merged_output_folder,\n join(merged_output_folder, 'summary.json'),\n rw,\n dataset_json['file_ending'],\n label_manager.foreground_regions if label_manager.has_regions else\n label_manager.foreground_labels,\n label_manager.ignore_label,\n num_processes)"
},
{
"identifier": "compute_metrics_on_folder",
"path": "nnunetv2/evaluation/evaluate_predictions.py",
"snippet": "def compute_metrics_on_folder(folder_ref: str, folder_pred: str, output_file: str,\n image_reader_writer: BaseReaderWriter,\n file_ending: str,\n regions_or_labels: Union[List[int], List[Union[int, Tuple[int, ...]]]],\n ignore_label: int = None,\n num_processes: int = default_num_processes,\n chill: bool = True) -> dict:\n \"\"\"\n output_file must end with .json; can be None\n \"\"\"\n if output_file is not None:\n assert output_file.endswith('.json'), 'output_file should end with .json'\n files_pred = subfiles(folder_pred, suffix=file_ending, join=False)\n files_ref = subfiles(folder_ref, suffix=file_ending, join=False)\n if not chill:\n present = [isfile(join(folder_pred, i)) for i in files_ref]\n assert all(present), \"Not all files in folder_pred exist in folder_ref\"\n files_ref = [join(folder_ref, i) for i in files_pred]\n files_pred = [join(folder_pred, i) for i in files_pred]\n with multiprocessing.get_context(\"spawn\").Pool(num_processes) as pool:\n # for i in list(zip(files_ref, files_pred, [image_reader_writer] * len(files_pred), [regions_or_labels] * len(files_pred), [ignore_label] * len(files_pred))):\n # compute_metrics(*i)\n results = pool.starmap(\n compute_metrics,\n list(zip(files_ref, files_pred, [image_reader_writer] * len(files_pred), [regions_or_labels] * len(files_pred),\n [ignore_label] * len(files_pred)))\n )\n\n # mean metric per class\n metric_list = list(results[0]['metrics'][regions_or_labels[0]].keys())\n means = {}\n for r in regions_or_labels:\n means[r] = {}\n for m in metric_list:\n means[r][m] = np.nanmean([i['metrics'][r][m] for i in results])\n\n # foreground mean\n foreground_mean = {}\n for m in metric_list:\n values = []\n for k in means.keys():\n if k == 0 or k == '0':\n continue\n values.append(means[k][m])\n foreground_mean[m] = np.mean(values)\n\n [recursive_fix_for_json_export(i) for i in results]\n recursive_fix_for_json_export(means)\n recursive_fix_for_json_export(foreground_mean)\n result = {'metric_per_case': results, 'mean': means, 'foreground_mean': foreground_mean}\n if output_file is not None:\n save_summary_json(result, output_file)\n return result\n # print('DONE')"
},
{
"identifier": "load_summary_json",
"path": "nnunetv2/evaluation/evaluate_predictions.py",
"snippet": "def load_summary_json(filename: str):\n results = load_json(filename)\n # convert keys in mean metrics\n results['mean'] = {key_to_label_or_region(k): results['mean'][k] for k in results['mean'].keys()}\n # convert metric_per_case\n for i in range(len(results[\"metric_per_case\"])):\n results[\"metric_per_case\"][i]['metrics'] = \\\n {key_to_label_or_region(k): results[\"metric_per_case\"][i]['metrics'][k]\n for k in results[\"metric_per_case\"][i]['metrics'].keys()}\n return results"
},
{
"identifier": "nnUNet_preprocessed",
"path": "nnunetv2/paths.py",
"snippet": ""
},
{
"identifier": "determine_postprocessing",
"path": "nnunetv2/postprocessing/remove_connected_components.py",
"snippet": "def determine_postprocessing(folder_predictions: str,\n folder_ref: str,\n plans_file_or_dict: Union[str, dict],\n dataset_json_file_or_dict: Union[str, dict],\n num_processes: int = default_num_processes,\n keep_postprocessed_files: bool = True):\n \"\"\"\n Determines nnUNet postprocessing. Its output is a postprocessing.pkl file in folder_predictions which can be\n used with apply_postprocessing_to_folder.\n\n Postprocessed files are saved in folder_predictions/postprocessed. Set\n keep_postprocessed_files=False to delete these files after this function is done (temp files will eb created\n and deleted regardless).\n\n If plans_file_or_dict or dataset_json_file_or_dict are None, we will look for them in input_folder\n \"\"\"\n output_folder = join(folder_predictions, 'postprocessed')\n\n if plans_file_or_dict is None:\n expected_plans_file = join(folder_predictions, 'plans.json')\n if not isfile(expected_plans_file):\n raise RuntimeError(f\"Expected plans file missing: {expected_plans_file}. The plans files should have been \"\n f\"created while running nnUNetv2_predict. Sadge.\")\n plans_file_or_dict = load_json(expected_plans_file)\n plans_manager = PlansManager(plans_file_or_dict)\n\n if dataset_json_file_or_dict is None:\n expected_dataset_json_file = join(folder_predictions, 'dataset.json')\n if not isfile(expected_dataset_json_file):\n raise RuntimeError(\n f\"Expected plans file missing: {expected_dataset_json_file}. The plans files should have been \"\n f\"created while running nnUNetv2_predict. Sadge.\")\n dataset_json_file_or_dict = load_json(expected_dataset_json_file)\n\n if not isinstance(dataset_json_file_or_dict, dict):\n dataset_json = load_json(dataset_json_file_or_dict)\n else:\n dataset_json = dataset_json_file_or_dict\n\n rw = plans_manager.image_reader_writer_class()\n label_manager = plans_manager.get_label_manager(dataset_json)\n labels_or_regions = label_manager.foreground_regions if label_manager.has_regions else label_manager.foreground_labels\n\n predicted_files = subfiles(folder_predictions, suffix=dataset_json['file_ending'], join=False)\n ref_files = subfiles(folder_ref, suffix=dataset_json['file_ending'], join=False)\n # we should print a warning if not all files from folder_ref are present in folder_predictions\n if not all([i in predicted_files for i in ref_files]):\n print(f'WARNING: Not all files in folder_ref were found in folder_predictions. Determining postprocessing '\n f'should always be done on the entire dataset!')\n\n # before we start we should evaluate the imaegs in the source folder\n if not isfile(join(folder_predictions, 'summary.json')):\n compute_metrics_on_folder(folder_ref,\n folder_predictions,\n join(folder_predictions, 'summary.json'),\n rw,\n dataset_json['file_ending'],\n labels_or_regions,\n label_manager.ignore_label,\n num_processes)\n\n # we save the postprocessing functions in here\n pp_fns = []\n pp_fn_kwargs = []\n\n # pool party!\n with multiprocessing.get_context(\"spawn\").Pool(num_processes) as pool:\n # now let's see whether removing all but the largest foreground region improves the scores\n output_here = join(output_folder, 'temp', 'keep_largest_fg')\n maybe_mkdir_p(output_here)\n pp_fn = remove_all_but_largest_component_from_segmentation\n kwargs = {\n 'labels_or_regions': label_manager.foreground_labels,\n }\n\n pool.starmap(\n load_postprocess_save,\n zip(\n [join(folder_predictions, i) for i in predicted_files],\n [join(output_here, i) for i in predicted_files],\n [rw] * len(predicted_files),\n [[pp_fn]] * len(predicted_files),\n [[kwargs]] * len(predicted_files)\n )\n )\n compute_metrics_on_folder(folder_ref,\n output_here,\n join(output_here, 'summary.json'),\n rw,\n dataset_json['file_ending'],\n labels_or_regions,\n label_manager.ignore_label,\n num_processes)\n # now we need to figure out if doing this improved the dice scores. We will implement that defensively in so far\n # that if a single class got worse as a result we won't do this. We can change this in the future but right now I\n # prefer to do it this way\n baseline_results = load_summary_json(join(folder_predictions, 'summary.json'))\n pp_results = load_summary_json(join(output_here, 'summary.json'))\n do_this = pp_results['foreground_mean']['Dice'] > baseline_results['foreground_mean']['Dice']\n if do_this:\n for class_id in pp_results['mean'].keys():\n if pp_results['mean'][class_id]['Dice'] < baseline_results['mean'][class_id]['Dice']:\n do_this = False\n break\n if do_this:\n print(f'Results were improved by removing all but the largest foreground region. '\n f'Mean dice before: {round(baseline_results[\"foreground_mean\"][\"Dice\"], 5)} '\n f'after: {round(pp_results[\"foreground_mean\"][\"Dice\"], 5)}')\n source = output_here\n pp_fns.append(pp_fn)\n pp_fn_kwargs.append(kwargs)\n else:\n print(f'Removing all but the largest foreground region did not improve results!')\n source = folder_predictions\n\n # in the old nnU-Net we could just apply all-but-largest component removal to all classes at the same time and\n # then evaluate for each class whether this improved results. This is no longer possible because we now support\n # region-based predictions and regions can overlap, causing interactions\n # in principle the order with which the postprocessing is applied to the regions matter as well and should be\n # investigated, but due to some things that I am too lazy to explain right now it's going to be alright (I think)\n # to stick to the order in which they are declared in dataset.json (if you want to think about it then think about\n # region_class_order)\n # 2023_02_06: I hate myself for the comment above. Thanks past me\n if len(labels_or_regions) > 1:\n for label_or_region in labels_or_regions:\n pp_fn = remove_all_but_largest_component_from_segmentation\n kwargs = {\n 'labels_or_regions': label_or_region,\n }\n\n output_here = join(output_folder, 'temp', 'keep_largest_perClassOrRegion')\n maybe_mkdir_p(output_here)\n\n pool.starmap(\n load_postprocess_save,\n zip(\n [join(source, i) for i in predicted_files],\n [join(output_here, i) for i in predicted_files],\n [rw] * len(predicted_files),\n [[pp_fn]] * len(predicted_files),\n [[kwargs]] * len(predicted_files)\n )\n )\n compute_metrics_on_folder(folder_ref,\n output_here,\n join(output_here, 'summary.json'),\n rw,\n dataset_json['file_ending'],\n labels_or_regions,\n label_manager.ignore_label,\n num_processes)\n baseline_results = load_summary_json(join(source, 'summary.json'))\n pp_results = load_summary_json(join(output_here, 'summary.json'))\n do_this = pp_results['mean'][label_or_region]['Dice'] > baseline_results['mean'][label_or_region]['Dice']\n if do_this:\n print(f'Results were improved by removing all but the largest component for {label_or_region}. '\n f'Dice before: {round(baseline_results[\"mean\"][label_or_region][\"Dice\"], 5)} '\n f'after: {round(pp_results[\"mean\"][label_or_region][\"Dice\"], 5)}')\n if isdir(join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest')):\n shutil.rmtree(join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest'))\n shutil.move(output_here, join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest'), )\n source = join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest')\n pp_fns.append(pp_fn)\n pp_fn_kwargs.append(kwargs)\n else:\n print(f'Removing all but the largest component for {label_or_region} did not improve results! '\n f'Dice before: {round(baseline_results[\"mean\"][label_or_region][\"Dice\"], 5)} '\n f'after: {round(pp_results[\"mean\"][label_or_region][\"Dice\"], 5)}')\n [shutil.copy(join(source, i), join(output_folder, i)) for i in subfiles(source, join=False)]\n save_pickle((pp_fns, pp_fn_kwargs), join(folder_predictions, 'postprocessing.pkl'))\n\n baseline_results = load_summary_json(join(folder_predictions, 'summary.json'))\n final_results = load_summary_json(join(output_folder, 'summary.json'))\n tmp = {\n 'input_folder': {i: baseline_results[i] for i in ['foreground_mean', 'mean']},\n 'postprocessed': {i: final_results[i] for i in ['foreground_mean', 'mean']},\n 'postprocessing_fns': [i.__name__ for i in pp_fns],\n 'postprocessing_kwargs': pp_fn_kwargs,\n }\n # json is a very annoying little bi###. Can't handle tuples as dict keys.\n tmp['input_folder']['mean'] = {label_or_region_to_key(k): tmp['input_folder']['mean'][k] for k in\n tmp['input_folder']['mean'].keys()}\n tmp['postprocessed']['mean'] = {label_or_region_to_key(k): tmp['postprocessed']['mean'][k] for k in\n tmp['postprocessed']['mean'].keys()}\n # did I already say that I hate json? \"TypeError: Object of type int64 is not JSON serializable\" You retarded bro?\n recursive_fix_for_json_export(tmp)\n save_json(tmp, join(folder_predictions, 'postprocessing.json'))\n\n shutil.rmtree(join(output_folder, 'temp'))\n\n if not keep_postprocessed_files:\n shutil.rmtree(output_folder)\n return pp_fns, pp_fn_kwargs"
},
{
"identifier": "maybe_convert_to_dataset_name",
"path": "nnunetv2/utilities/file_path_utilities.py",
"snippet": "def convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration):\ndef convert_identifier_to_trainer_plans_config(identifier: str):\ndef get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer',\n plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres',\n fold: Union[str, int] = None) -> str:\ndef parse_dataset_trainer_plans_configuration_from_path(path: str):\ndef get_ensemble_name(model1_folder, model2_folder, folds: Tuple[int, ...]):\ndef get_ensemble_name_from_d_tr_c(dataset, tr1, p1, c1, tr2, p2, c2, folds: Tuple[int, ...]):\ndef convert_ensemble_folder_to_model_identifiers_and_folds(ensemble_folder: str):\ndef folds_tuple_to_string(folds: Union[List[int], Tuple[int, ...]]):\ndef folds_string_to_tuple(folds_string: str):\ndef check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0):"
},
{
"identifier": "PlansManager",
"path": "nnunetv2/utilities/plans_handling/plans_handler.py",
"snippet": "class PlansManager(object):\n def __init__(self, plans_file_or_dict: Union[str, dict]):\n \"\"\"\n Why do we need this?\n 1) resolve inheritance in configurations\n 2) expose otherwise annoying stuff like getting the label manager or IO class from a string\n 3) clearly expose the things that are in the plans instead of hiding them in a dict\n 4) cache shit\n\n This class does not prevent you from going wild. You can still use the plans directly if you prefer\n (PlansHandler.plans['key'])\n \"\"\"\n self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict)\n\n def __repr__(self):\n return self.plans.__repr__()\n\n def _internal_resolve_configuration_inheritance(self, configuration_name: str,\n visited: Tuple[str, ...] = None) -> dict:\n if configuration_name not in self.plans['configurations'].keys():\n raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid '\n f'configuration names are {list(self.plans[\"configurations\"].keys())}.')\n configuration = deepcopy(self.plans['configurations'][configuration_name])\n if 'inherits_from' in configuration:\n parent_config_name = configuration['inherits_from']\n\n if visited is None:\n visited = (configuration_name,)\n else:\n if parent_config_name in visited:\n raise RuntimeError(f\"Circular dependency detected. The following configurations were visited \"\n f\"while solving inheritance (in that order!): {visited}. \"\n f\"Current configuration: {configuration_name}. Its parent configuration \"\n f\"is {parent_config_name}.\")\n visited = (*visited, configuration_name)\n\n base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited)\n base_config.update(configuration)\n configuration = base_config\n return configuration\n\n @lru_cache(maxsize=10)\n def get_configuration(self, configuration_name: str):\n if configuration_name not in self.plans['configurations'].keys():\n raise RuntimeError(f\"Requested configuration {configuration_name} not found in plans. \"\n f\"Available configurations: {list(self.plans['configurations'].keys())}\")\n\n configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name)\n return ConfigurationManager(configuration_dict)\n\n @property\n def dataset_name(self) -> str:\n return self.plans['dataset_name']\n\n @property\n def plans_name(self) -> str:\n return self.plans['plans_name']\n\n @property\n def original_median_spacing_after_transp(self) -> List[float]:\n return self.plans['original_median_spacing_after_transp']\n\n @property\n def original_median_shape_after_transp(self) -> List[float]:\n return self.plans['original_median_shape_after_transp']\n\n @property\n @lru_cache(maxsize=1)\n def image_reader_writer_class(self) -> Type[BaseReaderWriter]:\n return recursive_find_reader_writer_by_name(self.plans['image_reader_writer'])\n\n @property\n def transpose_forward(self) -> List[int]:\n return self.plans['transpose_forward']\n\n @property\n def transpose_backward(self) -> List[int]:\n return self.plans['transpose_backward']\n\n @property\n def available_configurations(self) -> List[str]:\n return list(self.plans['configurations'].keys())\n\n @property\n @lru_cache(maxsize=1)\n def experiment_planner_class(self) -> Type[ExperimentPlanner]:\n planner_name = self.experiment_planner_name\n experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], \"experiment_planning\"),\n planner_name,\n current_module=\"nnunetv2.experiment_planning\")\n return experiment_planner\n\n @property\n def experiment_planner_name(self) -> str:\n return self.plans['experiment_planner_used']\n\n @property\n @lru_cache(maxsize=1)\n def label_manager_class(self) -> Type[LabelManager]:\n return get_labelmanager_class_from_plans(self.plans)\n\n def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager:\n return self.label_manager_class(label_dict=dataset_json['labels'],\n regions_class_order=dataset_json.get('regions_class_order'),\n **kwargs)\n\n @property\n def foreground_intensity_properties_per_channel(self) -> dict:\n if 'foreground_intensity_properties_per_channel' not in self.plans.keys():\n if 'foreground_intensity_properties_by_modality' in self.plans.keys():\n return self.plans['foreground_intensity_properties_by_modality']\n return self.plans['foreground_intensity_properties_per_channel']"
}
] | import argparse
import os.path
from copy import deepcopy
from typing import Union, List, Tuple
from batchgenerators.utilities.file_and_folder_operations import load_json, join, isdir, save_json
from nnunetv2.configuration import default_num_processes
from nnunetv2.ensembling.ensemble import ensemble_crossvalidations
from nnunetv2.evaluation.accumulate_cv_results import accumulate_cv_results
from nnunetv2.evaluation.evaluate_predictions import compute_metrics_on_folder, load_summary_json
from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw, nnUNet_results
from nnunetv2.postprocessing.remove_connected_components import determine_postprocessing
from nnunetv2.utilities.file_path_utilities import maybe_convert_to_dataset_name, get_output_folder, \
convert_identifier_to_trainer_plans_config, get_ensemble_name, folds_tuple_to_string
from nnunetv2.utilities.plans_handling.plans_handler import PlansManager | 8,184 | f"Inferred plans file: {join(nnUNet_preprocessed, maybe_convert_to_dataset_name(dataset_name_or_id), trained_model['plans'] + '.json')}.")
continue
# check if trained model output folder exists. This is a requirement. No mercy here.
expected_output_folder = get_output_folder(dataset_name_or_id, trained_model['trainer'], trained_model['plans'],
trained_model['configuration'], fold=None)
if not isdir(expected_output_folder):
raise RuntimeError(f"Trained model {trained_model} does not have an output folder. "
f"Expected: {expected_output_folder}. Please run the training for this model! (don't forget "
f"the --npz flag if you want to ensemble multiple configurations)")
valid.append(trained_model)
return valid
def generate_inference_command(dataset_name_or_id: Union[int, str], configuration_name: str,
plans_identifier: str = 'nnUNetPlans', trainer_name: str = 'nnUNetTrainer',
folds: Union[List[int], Tuple[int, ...]] = (0, 1, 2, 3, 4),
folder_with_segs_from_prev_stage: str = None,
input_folder: str = 'INPUT_FOLDER',
output_folder: str = 'OUTPUT_FOLDER',
save_npz: bool = False):
fold_str = ''
for f in folds:
fold_str += f' {f}'
predict_command = ''
trained_model_folder = get_output_folder(dataset_name_or_id, trainer_name, plans_identifier, configuration_name, fold=None)
plans_manager = PlansManager(join(trained_model_folder, 'plans.json'))
configuration_manager = plans_manager.get_configuration(configuration_name)
if 'previous_stage' in plans_manager.available_configurations:
prev_stage = configuration_manager.previous_stage_name
predict_command += generate_inference_command(dataset_name_or_id, prev_stage, plans_identifier, trainer_name,
folds, None, output_folder='OUTPUT_FOLDER_PREV_STAGE') + '\n'
folder_with_segs_from_prev_stage = 'OUTPUT_FOLDER_PREV_STAGE'
predict_command = f'nnUNetv2_predict -d {dataset_name_or_id} -i {input_folder} -o {output_folder} -f {fold_str} ' \
f'-tr {trainer_name} -c {configuration_name} -p {plans_identifier}'
if folder_with_segs_from_prev_stage is not None:
predict_command += f' -prev_stage_predictions {folder_with_segs_from_prev_stage}'
if save_npz:
predict_command += ' --save_probabilities'
return predict_command
def find_best_configuration(dataset_name_or_id,
allowed_trained_models: Union[List[dict], Tuple[dict, ...]] = default_trained_models,
allow_ensembling: bool = True,
num_processes: int = default_num_processes,
overwrite: bool = True,
folds: Union[List[int], Tuple[int, ...]] = (0, 1, 2, 3, 4),
strict: bool = False):
dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id)
all_results = {}
allowed_trained_models = filter_available_models(deepcopy(allowed_trained_models), dataset_name_or_id)
for m in allowed_trained_models:
output_folder = get_output_folder(dataset_name_or_id, m['trainer'], m['plans'], m['configuration'], fold=None)
if not isdir(output_folder) and strict:
raise RuntimeError(f'{dataset_name}: The output folder of plans {m["plans"]} configuration '
f'{m["configuration"]} is missing. Please train the model (all requested folds!) first!')
identifier = os.path.basename(output_folder)
merged_output_folder = join(output_folder, f'crossval_results_folds_{folds_tuple_to_string(folds)}')
accumulate_cv_results(output_folder, merged_output_folder, folds, num_processes, overwrite)
all_results[identifier] = {
'source': merged_output_folder,
'result': load_summary_json(join(merged_output_folder, 'summary.json'))['foreground_mean']['Dice']
}
if allow_ensembling:
for i in range(len(allowed_trained_models)):
for j in range(i + 1, len(allowed_trained_models)):
m1, m2 = allowed_trained_models[i], allowed_trained_models[j]
output_folder_1 = get_output_folder(dataset_name_or_id, m1['trainer'], m1['plans'], m1['configuration'], fold=None)
output_folder_2 = get_output_folder(dataset_name_or_id, m2['trainer'], m2['plans'], m2['configuration'], fold=None)
identifier = get_ensemble_name(output_folder_1, output_folder_2, folds)
output_folder_ensemble = join(nnUNet_results, dataset_name, 'ensembles', identifier)
ensemble_crossvalidations([output_folder_1, output_folder_2], output_folder_ensemble, folds,
num_processes, overwrite=overwrite)
# evaluate ensembled predictions
plans_manager = PlansManager(join(output_folder_1, 'plans.json'))
dataset_json = load_json(join(output_folder_1, 'dataset.json'))
label_manager = plans_manager.get_label_manager(dataset_json)
rw = plans_manager.image_reader_writer_class()
compute_metrics_on_folder(join(nnUNet_preprocessed, dataset_name, 'gt_segmentations'),
output_folder_ensemble,
join(output_folder_ensemble, 'summary.json'),
rw,
dataset_json['file_ending'],
label_manager.foreground_regions if label_manager.has_regions else
label_manager.foreground_labels,
label_manager.ignore_label,
num_processes)
all_results[identifier] = \
{
'source': output_folder_ensemble,
'result': load_summary_json(join(output_folder_ensemble, 'summary.json'))['foreground_mean']['Dice']
}
# pick best and report inference command
best_score = max([i['result'] for i in all_results.values()])
best_keys = [k for k in all_results.keys() if all_results[k]['result'] == best_score] # may never happen but theoretically
# there can be a tie. Let's pick the first model in this case because it's going to be the simpler one (ensembles
# come after single configs)
best_key = best_keys[0]
print()
print('***All results:***')
for k, v in all_results.items():
print(f'{k}: {v["result"]}')
print(f'\n*Best*: {best_key}: {all_results[best_key]["result"]}')
print()
print('***Determining postprocessing for best model/ensemble***')
|
default_trained_models = tuple([
{'plans': 'nnUNetPlans', 'configuration': '2d', 'trainer': 'nnUNetTrainer'},
{'plans': 'nnUNetPlans', 'configuration': '3d_fullres', 'trainer': 'nnUNetTrainer'},
{'plans': 'nnUNetPlans', 'configuration': '3d_lowres', 'trainer': 'nnUNetTrainer'},
{'plans': 'nnUNetPlans', 'configuration': '3d_cascade_fullres', 'trainer': 'nnUNetTrainer'},
])
def filter_available_models(model_dict: Union[List[dict], Tuple[dict, ...]], dataset_name_or_id: Union[str, int]):
valid = []
for trained_model in model_dict:
plans_manager = PlansManager(join(nnUNet_preprocessed, maybe_convert_to_dataset_name(dataset_name_or_id),
trained_model['plans'] + '.json'))
# check if configuration exists
# 3d_cascade_fullres and 3d_lowres do not exist for each dataset so we allow them to be absent IF they are not
# specified in the plans file
if trained_model['configuration'] not in plans_manager.available_configurations:
print(f"Configuration {trained_model['configuration']} not found in plans {trained_model['plans']}.\n"
f"Inferred plans file: {join(nnUNet_preprocessed, maybe_convert_to_dataset_name(dataset_name_or_id), trained_model['plans'] + '.json')}.")
continue
# check if trained model output folder exists. This is a requirement. No mercy here.
expected_output_folder = get_output_folder(dataset_name_or_id, trained_model['trainer'], trained_model['plans'],
trained_model['configuration'], fold=None)
if not isdir(expected_output_folder):
raise RuntimeError(f"Trained model {trained_model} does not have an output folder. "
f"Expected: {expected_output_folder}. Please run the training for this model! (don't forget "
f"the --npz flag if you want to ensemble multiple configurations)")
valid.append(trained_model)
return valid
def generate_inference_command(dataset_name_or_id: Union[int, str], configuration_name: str,
plans_identifier: str = 'nnUNetPlans', trainer_name: str = 'nnUNetTrainer',
folds: Union[List[int], Tuple[int, ...]] = (0, 1, 2, 3, 4),
folder_with_segs_from_prev_stage: str = None,
input_folder: str = 'INPUT_FOLDER',
output_folder: str = 'OUTPUT_FOLDER',
save_npz: bool = False):
fold_str = ''
for f in folds:
fold_str += f' {f}'
predict_command = ''
trained_model_folder = get_output_folder(dataset_name_or_id, trainer_name, plans_identifier, configuration_name, fold=None)
plans_manager = PlansManager(join(trained_model_folder, 'plans.json'))
configuration_manager = plans_manager.get_configuration(configuration_name)
if 'previous_stage' in plans_manager.available_configurations:
prev_stage = configuration_manager.previous_stage_name
predict_command += generate_inference_command(dataset_name_or_id, prev_stage, plans_identifier, trainer_name,
folds, None, output_folder='OUTPUT_FOLDER_PREV_STAGE') + '\n'
folder_with_segs_from_prev_stage = 'OUTPUT_FOLDER_PREV_STAGE'
predict_command = f'nnUNetv2_predict -d {dataset_name_or_id} -i {input_folder} -o {output_folder} -f {fold_str} ' \
f'-tr {trainer_name} -c {configuration_name} -p {plans_identifier}'
if folder_with_segs_from_prev_stage is not None:
predict_command += f' -prev_stage_predictions {folder_with_segs_from_prev_stage}'
if save_npz:
predict_command += ' --save_probabilities'
return predict_command
def find_best_configuration(dataset_name_or_id,
allowed_trained_models: Union[List[dict], Tuple[dict, ...]] = default_trained_models,
allow_ensembling: bool = True,
num_processes: int = default_num_processes,
overwrite: bool = True,
folds: Union[List[int], Tuple[int, ...]] = (0, 1, 2, 3, 4),
strict: bool = False):
dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id)
all_results = {}
allowed_trained_models = filter_available_models(deepcopy(allowed_trained_models), dataset_name_or_id)
for m in allowed_trained_models:
output_folder = get_output_folder(dataset_name_or_id, m['trainer'], m['plans'], m['configuration'], fold=None)
if not isdir(output_folder) and strict:
raise RuntimeError(f'{dataset_name}: The output folder of plans {m["plans"]} configuration '
f'{m["configuration"]} is missing. Please train the model (all requested folds!) first!')
identifier = os.path.basename(output_folder)
merged_output_folder = join(output_folder, f'crossval_results_folds_{folds_tuple_to_string(folds)}')
accumulate_cv_results(output_folder, merged_output_folder, folds, num_processes, overwrite)
all_results[identifier] = {
'source': merged_output_folder,
'result': load_summary_json(join(merged_output_folder, 'summary.json'))['foreground_mean']['Dice']
}
if allow_ensembling:
for i in range(len(allowed_trained_models)):
for j in range(i + 1, len(allowed_trained_models)):
m1, m2 = allowed_trained_models[i], allowed_trained_models[j]
output_folder_1 = get_output_folder(dataset_name_or_id, m1['trainer'], m1['plans'], m1['configuration'], fold=None)
output_folder_2 = get_output_folder(dataset_name_or_id, m2['trainer'], m2['plans'], m2['configuration'], fold=None)
identifier = get_ensemble_name(output_folder_1, output_folder_2, folds)
output_folder_ensemble = join(nnUNet_results, dataset_name, 'ensembles', identifier)
ensemble_crossvalidations([output_folder_1, output_folder_2], output_folder_ensemble, folds,
num_processes, overwrite=overwrite)
# evaluate ensembled predictions
plans_manager = PlansManager(join(output_folder_1, 'plans.json'))
dataset_json = load_json(join(output_folder_1, 'dataset.json'))
label_manager = plans_manager.get_label_manager(dataset_json)
rw = plans_manager.image_reader_writer_class()
compute_metrics_on_folder(join(nnUNet_preprocessed, dataset_name, 'gt_segmentations'),
output_folder_ensemble,
join(output_folder_ensemble, 'summary.json'),
rw,
dataset_json['file_ending'],
label_manager.foreground_regions if label_manager.has_regions else
label_manager.foreground_labels,
label_manager.ignore_label,
num_processes)
all_results[identifier] = \
{
'source': output_folder_ensemble,
'result': load_summary_json(join(output_folder_ensemble, 'summary.json'))['foreground_mean']['Dice']
}
# pick best and report inference command
best_score = max([i['result'] for i in all_results.values()])
best_keys = [k for k in all_results.keys() if all_results[k]['result'] == best_score] # may never happen but theoretically
# there can be a tie. Let's pick the first model in this case because it's going to be the simpler one (ensembles
# come after single configs)
best_key = best_keys[0]
print()
print('***All results:***')
for k, v in all_results.items():
print(f'{k}: {v["result"]}')
print(f'\n*Best*: {best_key}: {all_results[best_key]["result"]}')
print()
print('***Determining postprocessing for best model/ensemble***') | determine_postprocessing(all_results[best_key]['source'], join(nnUNet_preprocessed, dataset_name, 'gt_segmentations'), | 6 | 2023-12-04 19:43:14+00:00 | 12k |
Inflectra/spira-jira-migration-advanced | convert_jira_to_spira_issue_elements.py | [
{
"identifier": "Spira",
"path": "spira.py",
"snippet": "class Spira:\n def __init__(self, base_url, basic_auth, verify=True):\n if base_url[-1] == \"/\":\n self.base_url = base_url\n else:\n self.base_url = base_url + \"/\"\n\n self.host = urlparse(base_url).netloc\n\n self.verify = verify\n\n self.construct_base_header(basic_auth)\n\n def construct_base_header(self, basic_auth):\n self.headers = {\n \"Host\": self.host,\n \"username\": basic_auth[0],\n \"api-key\": basic_auth[1],\n \"accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n\n # Get all tasks the current user owns\n def get_tasks(self) -> Dict:\n get_tasks_url = self.base_url + \"tasks\"\n\n response = requests.request(\n \"GET\", get_tasks_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n # Get all tasks created from a certain date\n def get_all_tasks(\n self,\n project_id,\n start_row=1,\n number_of_rows=100000,\n creation_date=\"2020-01-01T00:00:00.000\",\n ):\n params = {\n \"start_row\": start_row,\n \"number_of_rows\": number_of_rows,\n \"creation_date\": creation_date,\n }\n\n get_all_tasks_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/tasks/new?\"\n + urllib.parse.urlencode(params)\n )\n\n response = requests.request(\n \"GET\", get_all_tasks_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n # Get all task types\n def get_task_types(self, project_template_id) -> Dict:\n get_task_types_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/tasks/types\"\n )\n\n response = requests.request(\n \"GET\", get_task_types_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n # Create a new task on the supplied project with the supplied task body\n def create_task(self, project_id, body) -> Dict:\n new_task_url = self.base_url + \"projects/\" + str(project_id) + \"/tasks\"\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\", new_task_url, headers=self.headers, data=payload, verify=self.verify\n )\n\n return response.json()\n\n def get_requirement_types(self, project_template_id) -> Dict:\n get_requirement_types_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/requirements/types\"\n )\n\n response = requests.request(\n \"GET\", get_requirement_types_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n # Create a new requirement\n def create_requirement(self, project_id, body) -> Dict:\n new_requirement_url = (\n self.base_url + \"projects/\" + str(project_id) + \"/requirements\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_requirement_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Create a new requirement\n def create_child_requirement(self, project_id, parentid, body) -> Dict:\n new_requirement_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/requirements/parent/\"\n + str(parentid)\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_requirement_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Get all requirements\n def get_all_requirements(self, project_id, starting_row=1, number_of_rows=100000):\n params = {\"starting_row\": starting_row, \"number_of_rows\": number_of_rows}\n\n get_all_requirements_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/requirements?\"\n + urllib.parse.urlencode(params)\n )\n\n response = requests.request(\n \"GET\", get_all_requirements_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n # Get all incident types\n def get_incident_types(self, project_template_id) -> Dict:\n get_incident_types_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/incidents/types\"\n )\n\n response = requests.request(\n \"GET\", get_incident_types_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n # Create a new incident\n def create_incident(self, project_id, body) -> Dict:\n new_incident_url = self.base_url + \"projects/\" + str(project_id) + \"/incidents\"\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_incident_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Create a new release\n def create_release(self, project_id, body) -> Dict:\n new_releases_url = self.base_url + \"projects/\" + str(project_id) + \"/releases\"\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_releases_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Create a new child release\n def create_child_release(self, project_id, parent_id, body) -> Dict:\n new_parent_releases_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/releases/\"\n + str(parent_id)\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_parent_releases_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Create a new component\n def create_component(self, project_id, body) -> Dict:\n new_component_url = (\n self.base_url + \"projects/\" + str(project_id) + \"/components\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_component_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Create a new customlist at project template level\n def create_project_template_customlist(self, project_template_id, body) -> Dict:\n new_customlist_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/custom-lists\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_customlist_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Create a new customlist at system level\n def create_system_customlist(self, body) -> Dict:\n new_system_customlist_url = self.base_url + \"/system/custom-lists\"\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_system_customlist_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n # Create a incidents\n def get_all_incidents(\n self,\n project_id,\n start_row=1,\n number_rows=100000,\n creation_date=\"2020-01-01T00:00:00.000\",\n ):\n params = {\n \"start_row\": start_row,\n \"number_rows\": number_rows,\n \"creation_date\": creation_date,\n }\n\n get_all_incidents_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/incidents/recent?\"\n + urllib.parse.urlencode(params)\n )\n\n response = requests.request(\n \"GET\", get_all_incidents_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n # Create a new task comment\n def create_task_comment(self, project_id, task_id, body) -> Dict:\n new_task_comment_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/tasks/\"\n + str(task_id)\n + \"/comments\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_task_comment_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n\n return response.json()\n\n # Create a new incident comment\n def create_incident_comment(self, project_id, incident_id, body) -> Dict:\n new_incident_comment_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/incidents/\"\n + str(incident_id)\n + \"/comments\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_incident_comment_url,\n headers=self.headers,\n data=\"[\" + payload + \"]\",\n verify=self.verify,\n )\n\n return response.json()\n\n # Create a new requirement comment\n def create_requirement_comment(self, project_id, requirement_id, body) -> Dict:\n new_requirement_comment_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/requirements/\"\n + str(requirement_id)\n + \"/comments\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n new_requirement_comment_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_all_document_folders(self, project_id) -> Dict:\n get_all_document_folders = (\n self.base_url + \"projects/\" + str(project_id) + \"/document-folders\"\n )\n\n response = requests.request(\n \"GET\",\n get_all_document_folders,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def add_document_folder(self, project_id, body) -> Dict:\n add_document_folder_url = (\n self.base_url + \"projects/\" + str(project_id) + \"/document-folders\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n add_document_folder_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n def delete_document_folder(self, project_id, folder_id) -> Dict:\n delete_document_folder_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/document-folders/\"\n + str(folder_id)\n )\n\n response = requests.request(\n \"DELETE\",\n delete_document_folder_url,\n headers=self.headers,\n verify=self.verify,\n )\n return response.json()\n\n def get_all_documents(self, project_id) -> Dict:\n get_all_documents_url = (\n self.base_url + \"projects/\" + str(project_id) + \"/documents\"\n )\n\n response = requests.request(\n \"GET\",\n get_all_documents_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_document(self, project_id, document_id) -> Dict:\n get_all_documents_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/documents/\"\n + str(document_id)\n )\n\n response = requests.request(\n \"GET\",\n get_all_documents_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def add_document(self, project_id, body) -> Dict:\n add_document_url = (\n self.base_url + \"projects/\" + str(project_id) + \"/documents/file\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n add_document_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n return response.json()\n\n def add_artifact_document_association(\n self, project_id, artifact_type_id, artifact_id, document_id\n ) -> Dict:\n attach_document_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/artifact-types/\"\n + str(artifact_type_id)\n + \"/artifacts/\"\n + str(artifact_id)\n + \"/documents/\"\n + str(document_id)\n )\n\n response = requests.request(\n \"POST\",\n attach_document_url,\n headers=self.headers,\n verify=self.verify,\n )\n return response.json()\n\n def remove_artifact_document_association(\n self, project_id, artifact_type_id, artifact_id, document_id\n ) -> Dict:\n detach_document_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/artifact-types/\"\n + str(artifact_type_id)\n + \"/artifacts/\"\n + str(artifact_id)\n + \"/documents/\"\n + str(document_id)\n )\n\n response = requests.request(\n \"DELETE\",\n detach_document_url,\n headers=self.headers,\n verify=self.verify,\n )\n return response.json()\n\n def delete_document(self, project_id, document_id) -> Dict:\n delete_document_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/documents/\"\n + str(document_id)\n )\n\n response = requests.request(\n \"DELETE\",\n delete_document_url,\n headers=self.headers,\n verify=self.verify,\n )\n return response.json()\n\n def get_projects(self) -> Dict:\n get_projects_url = self.base_url + \"projects\"\n\n response = requests.request(\n \"GET\", get_projects_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_all_project_templates(self):\n get_all_project_templates_url = self.base_url + \"project-templates\"\n\n response = requests.request(\n \"GET\",\n get_all_project_templates_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_project_template(self, project_template_id):\n get_project_template_url = (\n self.base_url + \"project-templates/\" + str(project_template_id)\n )\n\n response = requests.request(\n \"GET\", get_project_template_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_all_users(self, include_inactive=True, start_row=1, number_rows=5000):\n params = {\n \"include_inactive\": include_inactive,\n \"start_row\": start_row,\n \"number_rows\": number_rows,\n }\n get_all_users_url = (\n self.base_url + \"users/all?\" + urllib.parse.urlencode(params)\n )\n\n response = requests.request(\n \"GET\", get_all_users_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_project_template_custom_properties(\n self, project_template_id, artifact_type_name\n ):\n get_project_template_custom_properties_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/custom-properties/\"\n + artifact_type_name\n )\n\n response = requests.request(\n \"GET\",\n get_project_template_custom_properties_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_project_template_custom_lists(self, project_template_id):\n get_project_template_custom_list_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/custom-lists\"\n )\n\n response = requests.request(\n \"GET\",\n get_project_template_custom_list_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_project_template_custom_list_values(self, project_template_id, list_id):\n get_project_template_custom_list_values_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/custom-lists/\"\n + str(list_id)\n )\n\n response = requests.request(\n \"GET\",\n get_project_template_custom_list_values_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_system_level_custom_lists(self):\n get_system_level_custom_lists_url = self.base_url + \"/system/custom-lists\"\n\n response = requests.request(\n \"GET\",\n get_system_level_custom_lists_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_system_level_custom_list_values(self, list_id):\n get_system_level_custom_list_values_url = (\n self.base_url + \"/system/custom-lists/\" + str(list_id)\n )\n\n response = requests.request(\n \"GET\",\n get_system_level_custom_list_values_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_all_releases(self, project_id, active_only=False):\n params = {\"active_only\": active_only}\n\n get_all_releases_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/releases?\"\n + urllib.parse.urlencode(params)\n )\n\n response = requests.request(\n \"GET\", get_all_releases_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_all_components(self, project_id, active_only=False, include_deleted=False):\n params = {\n \"active_only\": active_only,\n \"include_deleted\": include_deleted,\n }\n\n get_all_components_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/components?\"\n + urllib.parse.urlencode(params)\n )\n\n response = requests.request(\n \"GET\", get_all_components_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_requirement_importances(self, project_template_id):\n get_requirement_importances_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/requirements/importances\"\n )\n\n response = requests.request(\n \"GET\",\n get_requirement_importances_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_incident_priorities(self, project_template_id):\n get_incident_priorities_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/incidents/priorities\"\n )\n\n response = requests.request(\n \"GET\", get_incident_priorities_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_task_priorities(self, project_template_id):\n get_task_priorities_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/tasks/priorities\"\n )\n\n response = requests.request(\n \"GET\", get_task_priorities_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def add_association(self, project_id, body) -> Dict:\n create_association_url = (\n self.base_url + \"projects/\" + str(project_id) + \"/associations\"\n )\n payload = json.dumps(body)\n response = requests.request(\n \"POST\",\n create_association_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n if response.ok:\n return response.json()\n else:\n return response # type: ignore\n\n def get_requirement_statuses(self, project_template_id):\n get_requirement_statuses_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/requirements/statuses\"\n )\n\n response = requests.request(\n \"GET\",\n get_requirement_statuses_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_incident_statuses(self, project_template_id):\n get_incident_statuses_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/incidents/statuses\"\n )\n\n response = requests.request(\n \"GET\", get_incident_statuses_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_task_statuses(self, project_template_id):\n get_task_statuses_url = (\n self.base_url\n + \"project-templates/\"\n + str(project_template_id)\n + \"/tasks/statuses\"\n )\n\n response = requests.request(\n \"GET\", get_task_statuses_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def delete_requirement(self, project_id, requirement_id):\n delete_requirement_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/requirements/\"\n + str(requirement_id)\n )\n\n response = requests.request(\n \"DELETE\", delete_requirement_url, headers=self.headers, verify=self.verify\n )\n\n return response.status_code\n\n def delete_incident(self, project_id, incident_id):\n delete_incident_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/incidents/\"\n + str(incident_id)\n )\n\n response = requests.request(\n \"DELETE\", delete_incident_url, headers=self.headers, verify=self.verify\n )\n\n return response.status_code\n\n def delete_task(self, project_id, task_id):\n delete_task_url = (\n self.base_url + \"projects/\" + str(project_id) + \"/tasks/\" + str(task_id)\n )\n\n response = requests.request(\n \"DELETE\", delete_task_url, headers=self.headers, verify=self.verify\n )\n\n return response.status_code\n\n def delete_component(self, project_id, component_id):\n delete_component_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/components/\"\n + str(component_id)\n )\n\n response = requests.request(\n \"DELETE\", delete_component_url, headers=self.headers, verify=self.verify\n )\n\n return response.status_code\n\n def delete_release(self, project_id, release_id):\n delete_release_url = (\n self.base_url\n + \"projects/\"\n + str(project_id)\n + \"/releases/\"\n + str(release_id)\n )\n\n response = requests.request(\n \"DELETE\", delete_release_url, headers=self.headers, verify=self.verify\n )\n\n return response.status_code\n\n def get_all_programs(self) -> Dict:\n get_all_programs_url = self.base_url + \"programs\"\n\n response = requests.request(\n \"GET\", get_all_programs_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_program(self, program_id) -> Dict:\n get_program_url = self.base_url + \"programs/\" + program_id\n\n response = requests.request(\n \"GET\", get_program_url, headers=self.headers, verify=self.verify\n )\n\n return response.json()\n\n def get_system_custom_properties(self, artifact) -> Dict:\n get_system_custom_property_url = (\n self.base_url + \"system/custom-properties/\" + artifact\n )\n\n response = requests.request(\n \"GET\",\n get_system_custom_property_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def create_program_milestone(self, program_id, body):\n create_program_milestone_url = (\n self.base_url + \"programs/\" + str(program_id) + \"/milestones\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n create_program_milestone_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_all_program_milestones(self, program_id):\n get_all_program_milestones_url = (\n self.base_url + \"programs/\" + str(program_id) + \"/milestones\"\n )\n\n response = requests.request(\n \"GET\",\n get_all_program_milestones_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def create_capability(self, program_id, body):\n create_capability_url = (\n self.base_url + \"programs/\" + str(program_id) + \"/capabilities\"\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n create_capability_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n\n return response.json()\n\n def create_child_capability(self, program_id, parentid, body):\n create_child_capability_url = (\n self.base_url\n + \"programs/\"\n + str(program_id)\n + \"/capabilities/\"\n + str(parentid)\n )\n\n payload = json.dumps(body)\n\n response = requests.request(\n \"POST\",\n create_child_capability_url,\n headers=self.headers,\n data=payload,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_all_program_capabilities(self, program_id):\n params = {\"current_page\": 1, \"page_size\": 10000}\n\n get_all_program_capabilities_url = (\n self.base_url\n + \"programs/\"\n + str(program_id)\n + \"/capabilities/search?\"\n + urllib.parse.urlencode(params)\n )\n\n response = requests.request(\n \"GET\",\n get_all_program_capabilities_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def add_capability_requirement_association(\n self, program_id, capability_id, requirement_id\n ):\n capability_requirement_association_url = (\n self.base_url\n + \"programs/\"\n + str(program_id)\n + \"/capabilities/\"\n + str(capability_id)\n + \"/requirements/\"\n + str(requirement_id)\n )\n\n response = requests.request(\n \"POST\",\n capability_requirement_association_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.status_code\n\n def delete_program_capability(self, program_id, capability_id):\n delete_program_capability_url = (\n self.base_url\n + \"/programs/\"\n + str(program_id)\n + \"/capabilities/\"\n + str(capability_id)\n )\n\n response = requests.request(\n \"DELETE\",\n delete_program_capability_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.status_code\n\n def delete_program_milestone(self, program_id, milestone_id):\n delete_program_milestone_url = (\n self.base_url\n + \"/programs/\"\n + str(program_id)\n + \"/milestones/\"\n + str(milestone_id)\n )\n\n response = requests.request(\n \"DELETE\",\n delete_program_milestone_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.status_code\n\n def get_program_capability_types(self):\n get_program_capability_types_url = self.base_url + \"capabilities/types\"\n\n response = requests.request(\n \"GET\",\n get_program_capability_types_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_program_capability_statuses(self):\n get_program_capability_statuses_url = self.base_url + \"capabilities/statuses\"\n\n response = requests.request(\n \"GET\",\n get_program_capability_statuses_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_program_capability_priorities(self):\n get_program_capability_priorities_url = (\n self.base_url + \"capabilities/priorities\"\n )\n\n response = requests.request(\n \"GET\",\n get_program_capability_priorities_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_program_milestone_types(self):\n get_program_milestone_types_url = self.base_url + \"program-milestones/types\"\n\n response = requests.request(\n \"GET\",\n get_program_milestone_types_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()\n\n def get_program_milestone_statuses(self):\n get_program_milestone_statuses_url = (\n self.base_url + \"program-milestones/statuses\"\n )\n\n response = requests.request(\n \"GET\",\n get_program_milestone_statuses_url,\n headers=self.headers,\n verify=self.verify,\n )\n\n return response.json()"
},
{
"identifier": "convert_jira_markup_to_html",
"path": "utility.py",
"snippet": "def convert_jira_markup_to_html(jira_connection_dict, skip_ssl, jira_markup: str):\n render_markup_url = jira_connection_dict[\"jira_base_url\"] + \"/rest/api/1.0/render\"\n\n if jira_markup is None or jira_markup == \"\":\n return \"--EMPTY--\"\n\n # Strip all the \\x unicode chars.\n jira_markup = re.sub(r\"\\\\x([0-9a-fA-F]{2})\", \"\", jira_markup)\n\n # Try to dump a string to json. If it fails return a standard string and warning messages.\n if not try_json_dump_string(jira_markup):\n return \"--MIGRATION OF TEXT FAILED because of error during JSON validation--\"\n\n headers = {\n \"Content-Type\": \"application/json\",\n }\n\n body = {\n \"rendererType\": \"atlassian-wiki-renderer\",\n \"unrenderedMarkup\": jira_markup,\n }\n\n response = requests.request(\n \"POST\",\n render_markup_url,\n headers=headers,\n verify=(not skip_ssl),\n data=json.dumps(body),\n )\n\n if response.status_code != 200:\n print(response.text)\n print(\"Conversion of text from jira markup to html failed for text:\")\n print(jira_markup)\n print(repr(jira_markup))\n return \"--MIGRATION OF TEXT FAILED because of jira renderer error--\"\n else:\n return response.text"
},
{
"identifier": "jira_string_field_to_spira_custom_prop",
"path": "convert_jira_to_spira_issues.py",
"snippet": "def jira_string_field_to_spira_custom_prop(\n spira_metadata, artifact_type, spira_custom_prop_name, issue_field_value\n) -> dict | None:\n spira_custom_props = spira_metadata[\"custom_properties\"][artifact_type]\n\n custom_prop_data = next(\n filter((lambda x: x[\"Name\"] == spira_custom_prop_name), spira_custom_props),\n None,\n )\n\n if custom_prop_data:\n custom_prop = {\n \"PropertyNumber\": custom_prop_data[\"PropertyNumber\"],\n \"StringValue\": issue_field_value,\n \"IntegerValue\": None,\n \"BooleanValue\": None,\n \"DateTimeValue\": None,\n \"DecimalValue\": None,\n \"IntegerListValue\": None,\n \"Definition\": {\n \"CustomPropertyId\": custom_prop_data[\"CustomPropertyId\"],\n \"ProjectTemplateId\": None\n if artifact_type == \"capability\"\n else spira_metadata[\"project\"][\"ProjectTemplateId\"],\n \"ArtifactTypeId\": custom_prop_data[\"ArtifactTypeId\"],\n \"Name\": custom_prop_data[\"CustomPropertyFieldName\"],\n \"CustomList\": None,\n \"CustomPropertyFieldName\": custom_prop_data[\"CustomPropertyFieldName\"],\n \"CustomPropertyTypeId\": custom_prop_data[\"CustomPropertyTypeId\"],\n \"CustomPropertyTypeName\": \"Text\",\n \"IsDeleted\": False,\n \"PropertyNumber\": custom_prop_data[\"PropertyNumber\"],\n \"SystemDataType\": \"System.String\",\n \"Options\": None,\n \"Position\": None,\n \"Description\": None,\n \"Guid\": None,\n \"ConcurrencyGuid\": None,\n \"LastUpdateDate\": None,\n },\n }\n return custom_prop\n else:\n return None"
},
{
"identifier": "jira_datetime_field_to_spira_custom_prop",
"path": "convert_jira_to_spira_issues.py",
"snippet": "def jira_datetime_field_to_spira_custom_prop(\n spira_metadata, artifact_type, spira_custom_prop_name, issue_field_value\n) -> dict | None:\n spira_custom_props = spira_metadata[\"custom_properties\"][artifact_type]\n\n custom_prop_data = next(\n filter((lambda x: x[\"Name\"] == spira_custom_prop_name), spira_custom_props),\n None,\n )\n\n if issue_field_value:\n issue_field_value = convert_datetime(issue_field_value)\n\n if custom_prop_data:\n custom_prop = {\n \"PropertyNumber\": custom_prop_data[\"PropertyNumber\"],\n \"StringValue\": None,\n \"IntegerValue\": None,\n \"BooleanValue\": None,\n \"DateTimeValue\": issue_field_value,\n \"DecimalValue\": None,\n \"IntegerListValue\": None,\n \"Definition\": {\n \"CustomPropertyId\": custom_prop_data[\"CustomPropertyId\"],\n \"ProjectTemplateId\": None\n if artifact_type == \"capability\"\n else spira_metadata[\"project\"][\"ProjectTemplateId\"],\n \"ArtifactTypeId\": custom_prop_data[\"ArtifactTypeId\"],\n \"Name\": custom_prop_data[\"CustomPropertyFieldName\"],\n \"CustomList\": None,\n \"CustomPropertyFieldName\": custom_prop_data[\"CustomPropertyFieldName\"],\n \"CustomPropertyTypeId\": custom_prop_data[\"CustomPropertyTypeId\"],\n \"CustomPropertyTypeName\": \"Date & Time\",\n \"IsDeleted\": False,\n \"PropertyNumber\": custom_prop_data[\"PropertyNumber\"],\n \"SystemDataType\": \"System.DateTime\",\n \"Options\": None,\n \"Position\": None,\n \"Description\": None,\n \"Guid\": None,\n \"ConcurrencyGuid\": None,\n \"LastUpdateDate\": None,\n },\n }\n return custom_prop\n else:\n return None"
}
] | from spira import Spira
from utility import convert_jira_markup_to_html
from convert_jira_to_spira_issues import (
jira_string_field_to_spira_custom_prop,
jira_datetime_field_to_spira_custom_prop,
)
import json | 9,399 |
def convert_jira_to_spira_issue_elements(
jira_connection_dict,
skip_ssl,
jira_output_dict,
mapping_dict,
all_artifacts_in_spira,
action,
spira: Spira,
spira_metadata={},
jira_metadata={},
):
print("Starting conversion")
to_validate = open("temp/to_spira.json", "w")
validation_dict = {"update_action": "", "artifacts": []}
issues = jira_output_dict["issues"]
issues_with_outward_links = []
all_outward_links = []
if action == "associations":
# For all issues found
for issue in issues:
# Check if issue has any links
if issue["fields"]["issuelinks"]:
# If there are links check which of those are outward links
for link in issue["fields"]["issuelinks"]:
artifact = {"project_id": mapping_dict["spira_product_id"]}
if "outwardIssue" in link.keys():
source_id_data = get_artifact_id_data_from_jira_id(
issue["key"], all_artifacts_in_spira
)
dest_id_data = get_artifact_id_data_from_jira_id(
link["outwardIssue"]["key"], all_artifacts_in_spira
)
issues_with_outward_links.append(issue["key"])
all_outward_links.append(link["outwardIssue"]["key"])
if source_id_data and dest_id_data:
payload = {
# "ArtifactLinkId":None,
"SourceArtifactId": source_id_data["artifact_id"],
"SourceArtifactTypeId": source_id_data[
"artifact_type_id"
],
"DestArtifactId": dest_id_data["artifact_id"],
"DestArtifactTypeId": dest_id_data["artifact_type_id"],
"ArtifactLinkTypeId": 1, # At the moment they will all be set to "relates to"
# "CreatorId":None,
"Comment": link["type"]["outward"],
# "CreationDate":None,
# "DestArtifactName":None,
# "DestArtifactTypeName":None,
# "CreatorName":None,
# "ArtifactLinkTypeName":None,
# "Guid":None,
# "ConcurrencyGuid":None,
# "LastUpdateDate":None
}
validation_dict["update_action"] = "association"
artifact["payload"] = payload
validation_dict["artifacts"].append(artifact)
print(
"Found "
+ str(len(list(set(issues_with_outward_links))))
+ " issues with at total of "
+ str(len(all_outward_links))
+ "links"
)
elif action == "comments":
# For all issues found
for issue in issues:
# Check if issue has any links
if issue["fields"]["comment"]["comments"]:
# If there are links check which of those are outward links
for comment in issue["fields"]["comment"]["comments"]:
artifact = {"project_id": mapping_dict["spira_product_id"]}
source_id_data = get_artifact_id_data_from_jira_id(
issue["key"], all_artifacts_in_spira
)
userinfo = get_user_info_from_email(
comment["author"]["emailAddress"], spira_metadata["users"]
)
if source_id_data:
payload = {
# "CommentId":None, ReadOnly
"ArtifactId": source_id_data["artifact_id"],
# "Guid":None,
"UserId": userinfo["spira_id"],
# "UserGuid":None,
"UserName": userinfo["name"],
|
def convert_jira_to_spira_issue_elements(
jira_connection_dict,
skip_ssl,
jira_output_dict,
mapping_dict,
all_artifacts_in_spira,
action,
spira: Spira,
spira_metadata={},
jira_metadata={},
):
print("Starting conversion")
to_validate = open("temp/to_spira.json", "w")
validation_dict = {"update_action": "", "artifacts": []}
issues = jira_output_dict["issues"]
issues_with_outward_links = []
all_outward_links = []
if action == "associations":
# For all issues found
for issue in issues:
# Check if issue has any links
if issue["fields"]["issuelinks"]:
# If there are links check which of those are outward links
for link in issue["fields"]["issuelinks"]:
artifact = {"project_id": mapping_dict["spira_product_id"]}
if "outwardIssue" in link.keys():
source_id_data = get_artifact_id_data_from_jira_id(
issue["key"], all_artifacts_in_spira
)
dest_id_data = get_artifact_id_data_from_jira_id(
link["outwardIssue"]["key"], all_artifacts_in_spira
)
issues_with_outward_links.append(issue["key"])
all_outward_links.append(link["outwardIssue"]["key"])
if source_id_data and dest_id_data:
payload = {
# "ArtifactLinkId":None,
"SourceArtifactId": source_id_data["artifact_id"],
"SourceArtifactTypeId": source_id_data[
"artifact_type_id"
],
"DestArtifactId": dest_id_data["artifact_id"],
"DestArtifactTypeId": dest_id_data["artifact_type_id"],
"ArtifactLinkTypeId": 1, # At the moment they will all be set to "relates to"
# "CreatorId":None,
"Comment": link["type"]["outward"],
# "CreationDate":None,
# "DestArtifactName":None,
# "DestArtifactTypeName":None,
# "CreatorName":None,
# "ArtifactLinkTypeName":None,
# "Guid":None,
# "ConcurrencyGuid":None,
# "LastUpdateDate":None
}
validation_dict["update_action"] = "association"
artifact["payload"] = payload
validation_dict["artifacts"].append(artifact)
print(
"Found "
+ str(len(list(set(issues_with_outward_links))))
+ " issues with at total of "
+ str(len(all_outward_links))
+ "links"
)
elif action == "comments":
# For all issues found
for issue in issues:
# Check if issue has any links
if issue["fields"]["comment"]["comments"]:
# If there are links check which of those are outward links
for comment in issue["fields"]["comment"]["comments"]:
artifact = {"project_id": mapping_dict["spira_product_id"]}
source_id_data = get_artifact_id_data_from_jira_id(
issue["key"], all_artifacts_in_spira
)
userinfo = get_user_info_from_email(
comment["author"]["emailAddress"], spira_metadata["users"]
)
if source_id_data:
payload = {
# "CommentId":None, ReadOnly
"ArtifactId": source_id_data["artifact_id"],
# "Guid":None,
"UserId": userinfo["spira_id"],
# "UserGuid":None,
"UserName": userinfo["name"], | "Text": convert_jira_markup_to_html( | 1 | 2023-11-28 20:31:27+00:00 | 12k |
Zuricho/chroma_pipeline | chroma/models/graph_classifier.py | [
{
"identifier": "validate_XC",
"path": "chroma/data/xcs.py",
"snippet": "def validate_XCS(all_atom=None, sequence=True):\n def decorator(func):\n def new_func(*args, **kwargs):"
},
{
"identifier": "basic",
"path": "chroma/layers/basic.py",
"snippet": "class NoOp(nn.Module):\nclass Transpose(nn.Module):\nclass Unsqueeze(nn.Module):\nclass OneHot(nn.Module):\nclass MeanEmbedding(nn.Module):\nclass PeriodicPositionalEncoding(nn.Module):\nclass PositionWiseFeedForward(nn.Module):\nclass DropNormLin(nn.Module):\nclass ResidualLinearLayer(nn.Module):\nclass TriangleMultiplication(nn.Module):\nclass NodeProduct(nn.Module):\nclass FourierFeaturization(nn.Module):\nclass PositionalEncoding(nn.Module):\nclass MaybeOnehotEmbedding(nn.Embedding):\n def __init__(self):\n def forward(self, x, **kwargs):\n def __init__(self, d1=1, d2=2):\n def forward(self, x):\n def __init__(self, dim=1):\n def forward(self, x):\n def __init__(self, n_tokens):\n def forward(self, x):\n def __init__(self, embedding, use_softmax=True):\n def forward(self, x):\n def __init__(self, d_model, max_seq_len=4000, dropout=0.0):\n def forward(self, x):\n def __init__(self, d_model, d_hidden, dropout=0.1):\n def reset_parameters(self):\n def forward(self, x):\n def __init__(\n self, in_features, out_features, norm_type=\"ln\", dropout=0.0, actn=nn.ReLU()\n ):\n def forward(self, x, input_mask=None):\n def __init__(self, d_model, use_norm=True):\n def forward(self, x):\n def __init__(self, d_model=512, mode=\"outgoing\"):\n def forward(self, X, mask=None):\n def __init__(self, d_in, d_out):\n def forward(self, node_features, node_mask=None, edge_mask=None):\n def __init__(self, d_input, d_model, trainable=False, scale=1.0):\n def forward(self, inputs):\n def __init__(self, d_model, d_input=1, period_range=(1.0, 1000.0)):\n def forward(self, inputs):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n A = self.left_edge_mlp(h)\n B = self.right_edge_mlp(h)\n G = self.skip(h)\n A = A.masked_fill(~mask, 0.0)\n B = B.masked_fill(~mask, 0.0)\n B = 2 * math.pi * scale * torch.randn(d_input, d_model // 2)"
},
{
"identifier": "AttentionChainPool",
"path": "chroma/layers/attention.py",
"snippet": "class AttentionChainPool(nn.Module):\n \"\"\"Pools residue-based representations to chain-based representations using a chain mask and attention.\n Args:\n n_head (int): number of attention heads\n d_model (int): dimension of embeddings to be pooled\n\n Inputs:\n h (torch.tensor): of size (batch_size, sequence_length, d_model)\n C (torch.tensor): of size (batch_size, sequence_length)\n\n Outputs:\n output (torch.tensor): of size (batch_size, n_chains, d_model)\n chain_mask (torch.tensor): of size (batch_size, n_chains)\n \"\"\"\n\n def __init__(self, n_head, d_model):\n super().__init__()\n self.attention = MultiHeadAttention(\n n_head, d_model, d_model, d_model, dropout=0.0\n )\n\n def get_query(self, x):\n return torch.ones(x.size(0), 1, x.size(2)).type(x.dtype).to(x.device)\n\n def forward(self, h, C):\n bs, num_res = C.size()\n chains = C.abs().unique()\n chains = (\n chains[chains > 0].unsqueeze(-1).repeat(1, bs).reshape(-1).unsqueeze(-1)\n )\n num_chains = len(chains.unique())\n\n h_repeat = h.repeat(num_chains, 1, 1)\n C_repeat = C.repeat(num_chains, 1)\n mask = (C_repeat == chains).unsqueeze(-2)\n\n output, _ = self.attention(\n self.get_query(h_repeat), h_repeat, h_repeat, mask=mask\n )\n output = torch.cat(output.split(bs), 1)\n chain_mask = torch.stack(mask.squeeze(1).any(dim=-1).split(bs), -1)\n return output, chain_mask"
},
{
"identifier": "NodeProduct",
"path": "chroma/layers/basic.py",
"snippet": "class NodeProduct(nn.Module):\n \"\"\"Like Alg. 10 in Jumper et al. (2021) but instead of computing a mean over MSA dimension,\n process for single-sequence inputs.\n Args:\n d_in (int): dimension of node embeddings (inputs)\n d_out (int): dimension of edge embeddings (outputs)\n\n Inputs:\n node_features (torch.tensor): of size (batch_size, nres, d_model)\n node_mask (torch.tensor): of size (batch_size, nres)\n edge_mask (torch.tensor): of size (batch_size, nres, nres)\n\n Outputs:\n edge_features (torch.tensor): of size (batch_size, nres, nres, d_model)\n \"\"\"\n\n def __init__(self, d_in, d_out):\n super().__init__()\n self.layer_norm = nn.LayerNorm(d_in)\n self.left_lin = nn.Linear(d_in, d_in)\n self.right_lin = nn.Linear(d_in, d_in)\n self.edge_lin = nn.Linear(2 * d_in, d_out)\n\n def forward(self, node_features, node_mask=None, edge_mask=None):\n _, nres, _ = node_features.size()\n\n node_features = self.layer_norm(node_features)\n left_embs = self.left_lin(node_features)\n right_embs = self.right_lin(node_features)\n\n if node_mask is not None:\n mask = node_mask[:, :, None]\n left_embs = left_embs.masked_fill(~mask, 0.0)\n right_embs = right_embs.masked_fill(~mask, 0.0)\n\n left_embs = left_embs[:, None, :, :].repeat(1, nres, 1, 1)\n right_embs = right_embs[:, :, None, :].repeat(1, 1, nres, 1)\n edge_features = torch.cat([left_embs, right_embs], dim=-1)\n edge_features = self.edge_lin(edge_features)\n\n if edge_mask is not None:\n mask = edge_mask[:, :, :, None]\n edge_features = edge_features.masked_fill(~mask, 0.0)\n\n return edge_features"
},
{
"identifier": "NoOp",
"path": "chroma/layers/basic.py",
"snippet": "class NoOp(nn.Module):\n \"\"\"A dummy nn.Module wrapping an identity operation.\n\n Inputs:\n x (any)\n\n Outputs:\n x (any)\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def forward(self, x, **kwargs):\n return x"
},
{
"identifier": "MLP",
"path": "chroma/layers/graph.py",
"snippet": "class MLP(nn.Module):\n \"\"\"Multilayer perceptron with variable input, hidden, and output dims.\n\n Args:\n dim_in (int): Feature dimension of input tensor.\n dim_hidden (int or None): Feature dimension of intermediate layers.\n Defaults to matching output dimension.\n dim_out (int or None): Feature dimension of output tensor.\n Defaults to matching input dimension.\n num_layers_hidden (int): Number of hidden MLP layers.\n activation (str): MLP nonlinearity.\n `'relu'`: Rectified linear unit.\n `'softplus'`: Softplus.\n dropout (float): Dropout rate. Default is 0.\n\n Inputs:\n h (torch.Tensor): Input tensor with shape `(..., dim_in)`\n\n Outputs:\n h (torch.Tensor): Input tensor with shape `(..., dim_in)`\n \"\"\"\n\n def __init__(\n self,\n dim_in: int,\n dim_hidden: Optional[int] = None,\n dim_out: Optional[int] = None,\n num_layers_hidden: int = 1,\n activation: str = \"relu\",\n dropout: float = 0.0,\n ):\n super(MLP, self).__init__()\n\n # Default is dimension preserving\n dim_out = dim_out if dim_out is not None else dim_in\n dim_hidden = dim_hidden if dim_hidden is not None else dim_out\n\n nonlinearites = {\"relu\": nn.ReLU, \"softplus\": nn.Softplus}\n activation_func = nonlinearites[activation]\n\n if num_layers_hidden == 0:\n layers = [nn.Linear(dim_in, dim_out)]\n else:\n layers = []\n for i in range(num_layers_hidden):\n d_1 = dim_in if i == 0 else dim_hidden\n layers = layers + [\n nn.Linear(d_1, dim_hidden),\n activation_func(),\n nn.Dropout(dropout),\n ]\n layers = layers + [nn.Linear(dim_hidden, dim_out)]\n self.layers = nn.Sequential(*layers)\n\n def forward(self, h: torch.Tensor) -> torch.Tensor:\n return self.layers(h)"
},
{
"identifier": "MaskedNorm",
"path": "chroma/layers/graph.py",
"snippet": "class MaskedNorm(nn.Module):\n \"\"\"Masked normalization layer.\n\n Args:\n dim (int): Dimensionality of the normalization. Can be 1 for 1D\n normalization along dimension 1 or 2 for 2D normalization along\n dimensions 1 and 2.\n num_features (int): Channel dimension; only needed if `affine` is True.\n affine (bool): If True, inclde a learnable affine transformation\n post-normalization. Default is False.\n norm (str): Type of normalization, can be `instance`, `layer`, or\n `transformer`.\n eps (float): Small number for numerical stability.\n\n Inputs:\n data (torch.Tensor): Input tensor with shape\n `(num_batch, num_nodes, num_channels)` (1D) or\n `(num_batch, num_nodes, num_nodes, num_channels)` (2D).\n mask (torch.Tensor): Mask tensor with shape\n `(num_batch, num_nodes)` (1D) or\n `(num_batch, num_nodes, num_nodes)` (2D).\n\n Outputs:\n norm_data (torch.Tensor): Mask-normalized tensor with shape\n `(num_batch, num_nodes, num_channels)` (1D) or\n `(num_batch, num_nodes, num_nodes, num_channels)` (2D).\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_features: int = -1,\n affine: bool = False,\n norm: str = \"instance\",\n eps: float = 1e-5,\n ):\n super(MaskedNorm, self).__init__()\n\n self.norm_type = norm\n self.dim = dim\n self.norm = norm + str(dim)\n self.affine = affine\n self.eps = eps\n\n # Dimension to sum\n if self.norm == \"instance1\":\n self.sum_dims = [1]\n elif self.norm == \"layer1\":\n self.sum_dims = [1, 2]\n elif self.norm == \"transformer1\":\n self.sum_dims = [-1]\n elif self.norm == \"instance2\":\n self.sum_dims = [1, 2]\n elif self.norm == \"layer2\":\n self.sum_dims = [1, 2, 3]\n elif self.norm == \"transformer2\":\n self.sum_dims = [-1]\n else:\n raise NotImplementedError\n\n # Number of features, only required if affine\n self.num_features = num_features\n\n # Affine transformation is a linear layer on the C channel\n if self.affine:\n self.weights = nn.Parameter(torch.rand(self.num_features))\n self.bias = nn.Parameter(torch.zeros(self.num_features))\n\n def forward(\n self, data: torch.Tensor, mask: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n # Add optional trailing singleton dimension and expand if necessary\n if mask is not None:\n if len(mask.shape) == len(data.shape) - 1:\n mask = mask.unsqueeze(-1)\n if data.shape != mask.shape:\n mask = mask.expand(data.shape)\n\n # Input shape is Batch, Channel, Dim1, (dim2 if 2d)\n dims = self.sum_dims\n if (mask is None) or (self.norm_type == \"transformer\"):\n mask_mean = data.mean(dim=dims, keepdim=True)\n mask_std = torch.sqrt(\n (((data - mask_mean)).pow(2)).mean(dim=dims, keepdim=True) + self.eps\n )\n\n # Norm\n norm_data = (data - mask_mean) / mask_std\n\n else:\n # Zeroes vector to sum all mask data\n norm_data = torch.zeros_like(data).to(data.device).type(data.dtype)\n for mask_id in mask.unique():\n # Skip zero, since real mask\n if mask_id == 0:\n continue\n\n # Transform mask to temp mask that match mask id\n tmask = (mask == mask_id).type(torch.float32)\n\n # Sum mask for mean\n mask_sum = tmask.sum(dim=dims, keepdim=True)\n\n # Data is tmask, so that mean is only for unmasked pos\n mask_mean = (data * tmask).sum(dim=dims, keepdim=True) / mask_sum\n mask_std = torch.sqrt(\n (((data - mask_mean) * tmask).pow(2)).sum(dim=dims, keepdim=True)\n / mask_sum\n + self.eps\n )\n\n # Calculate temp norm, apply mask\n tnorm = ((data - mask_mean) / mask_std) * tmask\n # Sometime mask is empty, so generate nan that are conversted to 0\n tnorm[tnorm != tnorm] = 0\n\n # Add to init zero norm data\n norm_data += tnorm\n\n # Apply affine\n if self.affine:\n norm_data = norm_data * self.weights + self.bias\n\n # If mask, apply mask\n if mask is not None:\n norm_data = norm_data * (mask != 0).type(data.dtype)\n return norm_data"
},
{
"identifier": "diffusion",
"path": "chroma/layers/structure/diffusion.py",
"snippet": "class GaussianNoiseSchedule:\nclass NoiseTimeEmbedding(nn.Module):\nclass DiffusionChainCov(nn.Module):\nclass ReconstructionLosses(nn.Module):\n def __init__(\n self, log_snr_range: Tuple[float, float] = (-7.0, 13.5), kind: str = \"log_snr\",\n ) -> None:\n def t_map(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def derivative(self, t: torch.Tensor, func: Callable) -> torch.Tensor:\n def tensor_check(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha_func(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma_func(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def alpha_deriv(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def sigma_deriv(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def beta(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def g(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def log_SNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def compute_t_range(self, log_snr: Union[float, torch.Tensor]) -> torch.Tensor:\n def SNR_derivative(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SSNR(self, t: Union[float, torch.Tensor]) -> torch.Tensor:\n def SSNR_inv(self, ssnr: torch.Tensor) -> torch.Tensor:\n def SSNR_inv_deriv(self, ssnr: Union[float, torch.Tensor]) -> torch.Tensor:\n def prob_SSNR(self, ssnr: Union[float, torch.Tensor]) -> torch.Tensor:\n def linear_logsnr_grid(self, N: int, tspan: Tuple[float, float]) -> torch.Tensor:\n def __init__(\n self,\n dim_embedding: int,\n noise_schedule: GaussianNoiseSchedule,\n rff_scale: float = 0.8,\n feature_type: str = \"log_snr\",\n ) -> None:\n def forward(\n self, t: torch.Tensor, log_alpha: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n def __init__(\n self,\n log_snr_range: Tuple[float, float] = (-7.0, 13.5),\n noise_schedule: str = \"log_snr\",\n sigma_translation: float = 1.0,\n covariance_model: str = \"brownian\",\n complex_scaling: bool = False,\n **kwargs,\n ) -> None:\n def sample_t(\n self,\n C: torch.LongTensor,\n t: Optional[torch.Tensor] = None,\n inverse_CDF: Optional[Callable] = None,\n ) -> torch.Tensor:\n def sde_forward(self, X, C, t, Z=None):\n def _schedule_coefficients(\n self,\n t: torch.Tensor,\n inverse_temperature: float = 1.0,\n langevin_isothermal: bool = True,\n ) -> Tuple[\n def langevin(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n ):\n def reverse_sde(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n ):\n def ode(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.LongTensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n Z: Union[torch.Tensor, None] = None,\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n align_X0: bool = True,\n detach_X0: bool = True,\n ):\n def energy(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.Tensor,\n t: torch.Tensor,\n detach_X0: bool = True,\n align_X0: bool = True,\n ) -> torch.Tensor:\n def score(\n self,\n X: torch.Tensor,\n X0_func: Callable,\n C: torch.Tensor,\n t: Union[torch.Tensor, float],\n conditioner: Callable = None,\n detach_X0: bool = True,\n align_X0: bool = True,\n U_traj: List = [],\n ) -> torch.Tensor:\n def elbo(self, X0_pred, X0, C, t):\n def pseudoelbo(self, loss_per_residue, C, t):\n def _baoab_sample_step(\n self,\n _x,\n p,\n C,\n t,\n dt,\n score_func,\n gamma=2.0,\n kT=1.0,\n n_equil=1,\n ode_boost=True,\n langevin_isothermal=False,\n ):\n def baoab_step(_x, p, t):\n def ode_step(t, _x):\n def sample_sde(\n self,\n X0_func: Callable,\n C: torch.LongTensor,\n X_init: Optional[torch.Tensor] = None,\n conditioner: Optional[Callable] = None,\n N: int = 100,\n tspan: Tuple[float, float] = (1.0, 0.001),\n inverse_temperature: float = 1.0,\n langevin_factor: float = 0.0,\n langevin_isothermal: bool = True,\n sde_func: str = \"reverse_sde\",\n integrate_func: str = \"euler_maruyama\",\n initialize_noise: bool = True,\n remap_time: bool = False,\n remove_drift_translate: bool = False,\n remove_noise_translate: bool = False,\n align_X0: bool = True,\n ) -> Dict[str, torch.Tensor]:\n def _X0_func(_X, _C, t):\n def sdefun(_t, _X):\n def estimate_pseudoelbo_X(\n self,\n X0_func,\n X,\n C,\n num_samples=50,\n deterministic_seed=0,\n return_elbo_t=False,\n noise=True,\n ):\n def _score_direct(\n self, Xt, X0_func, C, t, align_X0=True,\n ):\n def estimate_logp(\n self,\n X0_func: Callable,\n X_sample: torch.Tensor,\n C: torch.LongTensor,\n N: int,\n return_trace_t: bool = False,\n ):\n def divergence(fn, x, t):\n def flow_gradient(\n X, X0_func, C, t,\n ):\n def odefun(_t, _X):\n def estimate_elbo(\n self,\n X0_func: Callable,\n X: torch.Tensor,\n C: torch.LongTensor,\n num_samples: int = 50,\n deterministic_seed: int = 0,\n return_elbo_t: bool = False,\n grad_logprob_Y_func: Optional[Callable] = None,\n ) -> torch.Tensor:\n def conditional_X0(\n self, X0: torch.Tensor, score: torch.Tensor, C: torch.tensor, t: torch.Tensor\n ) -> torch.Tensor:\n def _mean(self, X, C, alpha):\n def _X_to_Z(self, X_sample, X, C, alpha, sigma):\n def _Z_to_X(self, Z, X, C, alpha, sigma):\n def sample_conditional(\n self, X: torch.Tensor, C: torch.LongTensor, t: torch.Tensor, s: torch.Tensor\n ) -> torch.Tensor:\n def forward(\n self, X: torch.Tensor, C: torch.LongTensor, t: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n def __init__(\n self,\n diffusion: DiffusionChainCov,\n loss_scale: float = 10.0,\n rmsd_method: str = \"symeig\",\n ):\n def _batch_average(self, loss, C):\n def _loss_elbo(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_rmsd(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_pseudoelbo(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_fragment(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_pair(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_neighborhood(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_distance(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def _loss_hbonds(self, losses, X0_pred, X, C, t, w=None, X_t_2=None):\n def estimate_metrics(\n self,\n X0_func: Callable,\n X: torch.Tensor,\n C: torch.LongTensor,\n num_samples: int = 50,\n deterministic_seed: int = 0,\n use_noise: bool = True,\n return_samples: bool = False,\n tspan: Tuple[float] = (1e-4, 1.0),\n ):\n def forward(\n self,\n X0_pred: torch.Tensor,\n X: torch.Tensor,\n C: torch.LongTensor,\n t: torch.Tensor,\n ):\ndef _debug_viz_gradients(\n pml_file, X_list, dX_list, C, S, arrow_length=2.0, name=\"gradient\", color=\"red\"\n):\ndef _debug_viz_XZC(X, Z, C, rgb=True):\n SNR = self.log_SNR(t).exp()\n SNR = self.alpha(t).pow(2) / (self.sigma(t).pow(2))\n Z = torch.randn_like(X)\n Z = Z.reshape(X.shape[0], -1, 3)\n R_Z = self.base_gaussian._multiply_R(Z, C).reshape(X.shape)\n X = backbone.center_X(X, C)\n Z = torch.randn_like(X) if Z is None else Z\n Z = torch.randn_like(X) if Z is None else Z\n X = backbone.center_X(X, C)\n X = backbone.impute_masked_X(X, C)\n X0 = X0_func(X, C, t=t)\n X0 = X0_func(X, C, t=t)\n X0, _ = self.loss_rmsd.align(X0, X, C, align_unmasked=True)\n X0 = X0.detach()\n Z = self._X_to_Z(X, X0, C, alpha, sigma)\n X = backbone.impute_masked_X(X, C)\n X = X.detach().clone()\n X0 = backbone.impute_masked_X(X0, C)\n Z = torch.randn_like(_x)\n _X0 = X0_func(_X, _C, t)\n T = np.linspace(1e-4, 1.0, num_samples)\n X0 = X0_func(Xt, C, t)\n X0, _ = self.loss_rmsd.align(X0, Xt, C, align_unmasked=True)\n C = C.abs()\n X = backbone.impute_masked_X(X, C)\n T = np.linspace(1e-4, 1.0, num_samples)\n X = backbone.impute_masked_X(X, C)\n Z = self.base_gaussian._multiply_R_inverse(X_noise, C)\n X = backbone.impute_masked_X(X, C)\n X = backbone.center_X(X, C)\n X = backbone.impute_masked_X(X, C)\n T = np.linspace(1e-4, 1.0, num_samples)\n X = X.reshape(X.shape[0], -1, 3)\n Z = Z.reshape(Z.shape[0], -1, 3)\n C = C_expand.reshape(C.shape[0], -1)\n N = X.shape[1]"
},
{
"identifier": "BackboneEncoderGNN",
"path": "chroma/models/graph_design.py",
"snippet": "class BackboneEncoderGNN(nn.Module):\n \"\"\"Graph Neural Network for processing protein structure into graph embeddings.\n\n Args:\n See documention of `structure.protein_graph.ProteinFeatureGraph`,\n and `graph.GraphNN` for more details.\n\n dim_nodes (int): Hidden dimension of node tensors.\n dim_edges (int): Hidden dimension of edge tensors.\n num_neighbors (int): Number of neighbors per nodes.\n node_features (tuple): List of node feature specifications. Features\n can be given as strings or as dictionaries.\n edge_features (tuple): List of edge feature specifications. Features\n can be given as strings or as dictionaries.\n num_layers (int): Number of layers.\n node_mlp_layers (int): Number of hidden layers for node update\n function.\n node_mlp_dim (int, optional): Dimension of hidden layers for node update\n function, defaults to match output dimension.\n edge_update (bool): Whether to include an edge update step.\n edge_mlp_layers (int): Number of hidden layers for edge update\n function.\n edge_mlp_dim (int, optional): Dimension of hidden layers for edge update\n function, defaults to match output dimension.\n skip_connect_input (bool): Whether to include skip connections between\n layers.\n mlp_activation (str): MLP nonlinearity function, `relu` or `softplus`\n accepted.\n dropout (float): Dropout fraction.\n graph_distance_atom_type (int): Atom type for computing residue-residue\n distances for graph construction. Negative values will specify\n centroid across atom types. Default is `-1` (centroid).\n graph_cutoff (float, optional): Cutoff distance for graph construction:\n mask any edges further than this cutoff. Default is `None`.\n graph_mask_interfaces (bool): Restrict connections only to within\n chains, excluding-between chain interactions. Default is `False`.\n graph_criterion (str): Method used for building graph from distances.\n Currently supported methods are `{knn, random_log, random_linear}`.\n Default is `knn`.\n graph_random_min_local (int): Minimum number of neighbors in GNN that\n come from local neighborhood, before random neighbors are chosen.\n checkpoint_gradients (bool): Switch to implement gradient checkpointing\n during training.\n\n Inputs:\n X (torch.Tensor): Backbone coordinates with shape\n `(num_batch, num_residues, num_atoms, 3)`.\n C (torch.LongTensor): Chain map with shape `(num_batch, num_residues)`.\n node_h_aux (torch.LongTensor, optional): Auxiliary node features with\n shape `(num_batch, num_residues, dim_nodes)`.\n edge_h_aux (torch.LongTensor, optional): Auxiliary edge features with\n shape `(num_batch, num_residues, num_neighbors, dim_edges)`.\n edge_idx (torch.LongTensor, optional): Input edge indices for neighbors\n with shape `(num_batch, num_residues, num_neighbors)`.\n mask_ij (torch.Tensor, optional): Input edge mask with shape\n `(num_batch, num_nodes, num_neighbors)`.\n\n Outputs:\n node_h (torch.Tensor): Node features with shape\n `(num_batch, num_residues, dim_nodes)`.\n edge_h (torch.Tensor): Edge features with shape\n `(num_batch, num_residues, num_neighbors, dim_edges)`.\n edge_idx (torch.LongTensor): Edge indices for neighbors with shape\n `(num_batch, num_residues, num_neighbors)`.\n mask_i (torch.Tensor): Node mask with shape `(num_batch, num_residues)`.\n mask_ij (torch.Tensor): Edge mask with shape\n `(num_batch, num_nodes, num_neighbors)`.\n \"\"\"\n\n def __init__(\n self,\n dim_nodes: int = 128,\n dim_edges: int = 128,\n num_neighbors: int = 30,\n node_features: tuple = ((\"internal_coords\", {\"log_lengths\": True}),),\n edge_features: tuple = (\n \"distances_2mer\",\n \"orientations_2mer\",\n \"distances_chain\",\n ),\n num_layers: int = 3,\n node_mlp_layers: int = 1,\n node_mlp_dim: Optional[int] = None,\n edge_update: bool = True,\n edge_mlp_layers: int = 1,\n edge_mlp_dim: Optional[int] = None,\n skip_connect_input: bool = False,\n mlp_activation: str = \"softplus\",\n dropout: float = 0.1,\n graph_distance_atom_type: int = -1,\n graph_cutoff: Optional[float] = None,\n graph_mask_interfaces: bool = False,\n graph_criterion: str = \"knn\",\n graph_random_min_local: int = 20,\n checkpoint_gradients: bool = False,\n **kwargs\n ) -> None:\n \"\"\"Initialize BackboneEncoderGNN.\"\"\"\n super(BackboneEncoderGNN, self).__init__()\n\n # Save configuration in kwargs\n self.kwargs = locals()\n self.kwargs.pop(\"self\")\n for key in list(self.kwargs.keys()):\n if key.startswith(\"__\") and key.endswith(\"__\"):\n self.kwargs.pop(key)\n args = SimpleNamespace(**self.kwargs)\n\n # Important global options\n self.dim_nodes = dim_nodes\n self.dim_edges = dim_edges\n self.checkpoint_gradients = checkpoint_gradients\n\n graph_kwargs = {\n \"distance_atom_type\": args.graph_distance_atom_type,\n \"cutoff\": args.graph_cutoff,\n \"mask_interfaces\": args.graph_mask_interfaces,\n \"criterion\": args.graph_criterion,\n \"random_min_local\": args.graph_random_min_local,\n }\n\n self.feature_graph = protein_graph.ProteinFeatureGraph(\n dim_nodes=args.dim_nodes,\n dim_edges=args.dim_edges,\n num_neighbors=args.num_neighbors,\n graph_kwargs=graph_kwargs,\n node_features=args.node_features,\n edge_features=args.edge_features,\n )\n\n self.gnn = graph.GraphNN(\n dim_nodes=args.dim_nodes,\n dim_edges=args.dim_edges,\n num_layers=args.num_layers,\n node_mlp_layers=args.node_mlp_layers,\n node_mlp_dim=args.node_mlp_dim,\n edge_update=args.edge_update,\n edge_mlp_layers=args.edge_mlp_layers,\n edge_mlp_dim=args.edge_mlp_dim,\n mlp_activation=args.mlp_activation,\n dropout=args.dropout,\n norm=\"transformer\",\n scale=args.num_neighbors,\n skip_connect_input=args.skip_connect_input,\n checkpoint_gradients=checkpoint_gradients,\n )\n\n @validate_XC(all_atom=False)\n def forward(\n self,\n X: torch.Tensor,\n C: torch.LongTensor,\n node_h_aux: Optional[torch.Tensor] = None,\n edge_h_aux: Optional[torch.Tensor] = None,\n edge_idx: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ) -> Tuple[\n torch.Tensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.Tensor\n ]:\n \"\"\"Encode XC backbone structure into node and edge features.\"\"\"\n num_batch, num_residues = C.shape\n\n # Hack to enable checkpointing\n if self.checkpoint_gradients and (not X.requires_grad):\n X.requires_grad = True\n\n node_h, edge_h, edge_idx, mask_i, mask_ij = self._checkpoint(\n self.feature_graph, X, C, edge_idx, mask_ij\n )\n\n if node_h_aux is not None:\n node_h = node_h + mask_i.unsqueeze(-1) * node_h_aux\n if edge_h_aux is not None:\n edge_h = edge_h + mask_ij.unsqueeze(-1) * edge_h_aux\n\n node_h, edge_h = self.gnn(node_h, edge_h, edge_idx, mask_i, mask_ij)\n return node_h, edge_h, edge_idx, mask_i, mask_ij\n\n def _checkpoint(self, module: nn.Module, *args) -> nn.Module:\n if self.checkpoint_gradients:\n return checkpoint(module, *args)\n else:\n return module(*args)"
},
{
"identifier": "load_model",
"path": "chroma/utility/model.py",
"snippet": "def load_model(\n weights,\n model_class,\n device=\"cpu\",\n strict=False,\n strict_unexpected=True,\n verbose=True,\n):\n \"\"\"Load model saved with save_model.\n\n Args:\n weights (str): The destination path of the model weights to load.\n Compatible with files saved by `save_model`.\n model_class: Name of model class.\n device (str, optional): Pytorch device specification, e.g. `'cuda'` for\n GPU. Default is `'cpu'`.\n strict (bool): Whether to require that the keys match between the\n input file weights and the model created from the parameters stored\n in the model kwargs.\n strict_unexpected (bool): Whether to require that there are no\n unexpected keys when loading model weights, as distinct from the\n strict option which doesn't allow for missing keys either. By\n default, we use this option rather than strict for ease of\n development when adding model features.\n verbose (bool, optional): Show outputs from download and loading. Default True.\n\n Returns:\n model (nn.Module): Torch model with loaded weights.\n \"\"\"\n\n # Process weights path\n if str(weights).startswith(\"named:\"):\n weights = weights.split(\"named:\")[1]\n if weights not in NAMED_MODELS[model_class.__name__]:\n raise Exception(f\"Unknown {model_class.__name__} model name: {weights},\")\n weights = NAMED_MODELS[model_class.__name__][weights][\"s3_uri\"]\n\n # resolve s3 paths\n if str(weights).startswith(\"s3:\"):\n raise NotImplementedError(\"Loading Models from an S3 link not supported.\")\n\n # download public models from generate\n if str(weights).startswith(\"https:\"):\n # Decompose into arguments\n parsed_url = urlparse(weights)\n base_url = f\"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}\"\n model_name = parse_qs(parsed_url.query).get(\"weights\", [None])[0]\n weights = api.download_from_generate(\n base_url, model_name, force=False, exist_ok=True\n )\n\n # load model weights\n params = torch.load(weights, map_location=\"cpu\")\n model = model_class(**params[\"init_kwargs\"]).to(device)\n missing_keys, unexpected_keys = model.load_state_dict(\n params[\"model_state_dict\"], strict=strict\n )\n if strict_unexpected and len(unexpected_keys) > 0:\n raise Exception(\n f\"Error loading model from checkpoint file: {weights} contains {len(unexpected_keys)} unexpected keys: {unexpected_keys}\"\n )\n return model"
}
] | from types import SimpleNamespace
from chroma.data.xcs import validate_XC
from chroma.layers import basic
from chroma.layers.attention import AttentionChainPool
from chroma.layers.basic import NodeProduct, NoOp
from chroma.layers.graph import MLP, MaskedNorm
from chroma.layers.structure import diffusion
from chroma.models.graph_design import BackboneEncoderGNN
from chroma.utility.model import load_model as utility_load_model
import torch
import torch.nn as nn | 10,581 | edge_update=True,
edge_mlp_layers=1,
edge_mlp_dim=None,
skip_connect_input=False,
mlp_activation="softplus",
graph_criterion="knn",
graph_random_min_local=20,
use_time_features=True,
noise_schedule="log_snr",
noise_beta_min=0.2,
noise_beta_max=70.0,
checkpoint_gradients=False,
class_config={},
out_mlp_layers=2,
noise_covariance_model="globular",
noise_log_snr_range=(-7.0, 13.5),
time_feature_type="t",
time_log_feature_scaling=0.05,
fourier_scale=16.0,
zero_grad_fix=False,
**kwargs,
):
"""Initialize GraphBackbone network."""
super().__init__()
# Save configuration in kwargs
self.kwargs = locals()
self.kwargs.pop("self")
for key in list(self.kwargs.keys()):
if key.startswith("__") and key.endswith("__"):
self.kwargs.pop(key)
args = SimpleNamespace(**self.kwargs)
self.class_config = class_config
# Important global options
self.dim_nodes = args.dim_nodes
self.dim_edges = args.dim_edges
self.mlp_activation = args.mlp_activation
self.zero_grad_fix = zero_grad_fix
if "random_fourier_2mer" in args.edge_features:
index = args.edge_features.index("random_fourier_2mer")
args.edge_features.pop(index)
args.edge_features.append(
(
"random_fourier_2mer",
{
"dim_embedding": args.dim_edges,
"trainable": False,
"scale": args.fourier_scale,
},
)
)
# Encoder GNN process backbone
self.encoder = BackboneEncoderGNN(
dim_nodes=args.dim_nodes,
dim_edges=args.dim_edges,
num_neighbors=args.num_neighbors,
node_features=args.node_features,
edge_features=args.edge_features,
num_layers=args.num_layers,
node_mlp_layers=args.node_mlp_layers,
node_mlp_dim=args.node_mlp_dim,
edge_update=args.edge_update,
edge_mlp_layers=args.edge_mlp_layers,
edge_mlp_dim=args.edge_mlp_dim,
mlp_activation=args.mlp_activation,
dropout=args.dropout,
skip_connect_input=args.skip_connect_input,
graph_criterion=args.graph_criterion,
graph_random_min_local=args.graph_random_min_local,
checkpoint_gradients=checkpoint_gradients,
)
self.time_feature_type = args.time_feature_type
self.time_log_feature_scaling = time_log_feature_scaling
self.use_time_features = use_time_features
if self.use_time_features:
self.time_features = basic.FourierFeaturization(
d_input=1, d_model=dim_nodes, trainable=False, scale=16.0
)
self.sequence_embedding = nn.Embedding(20, dim_nodes)
self.noise_perturb = diffusion.DiffusionChainCov(
noise_schedule=args.noise_schedule,
beta_min=args.noise_beta_min,
beta_max=args.noise_beta_max,
log_snr_range=args.noise_log_snr_range,
covariance_model=args.noise_covariance_model,
)
self._init_heads(class_config, dim_nodes, out_mlp_layers, dropout)
self.condition_sequence_frequency = 0.3
def _init_heads(self, class_config, dim_nodes, out_mlp_layers, dropout):
self.heads = {"chain": {}, "first_order": {}, "second_order": {}, "complex": {}}
for label, config in class_config.items():
group = config["level"]
if label == "is_interface" or label == "contact":
dim_out = 1
else:
dim_out = len(config["tokens"])
if group == "chain":
pool = AttentionChainPool(8, dim_nodes)
elif group == "complex":
raise NotImplementedError
elif group == "second_order":
pool = NoOp()
else:
pool = NoOp()
if group != "second_order":
if self.zero_grad_fix:
node_norm_layer = MaskedNorm(
dim=1, num_features=dim_nodes, affine=True, norm="layer"
)
| # Copyright Generate Biomedicines, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for generating protein sequence and side chain conformations
given backbones. These can be used for sequence design and packing.
"""
class GraphClassifier(nn.Module):
"""Graph-based protein classification
Args:
See documention of `structure.protein_graph.ProteinFeatureGraph`,
and `graph.GraphNN` for more details.
Inputs:
X (Tensor): Backbone coordinates with shape
`(num_batch, num_residues, num_atoms, 3)`.
C (LongTensor): Chain map with shape `(num_batch, num_residues)`.
O (Tensor) (optional): One-hot sequence tensor of shape `(num_batch, num_residues)`
Outputs:
node_h (Tensor): residue-based representations that can be used to project various classification predictions
"""
def __init__(
self,
dim_nodes=128,
dim_edges=128,
num_neighbors=30,
node_features=(("internal_coords", {"log_lengths": True}),),
edge_features=["random_fourier_2mer", "orientations_2mer", "distances_chain"],
num_layers=3,
dropout=0.1,
node_mlp_layers=1,
node_mlp_dim=None,
edge_update=True,
edge_mlp_layers=1,
edge_mlp_dim=None,
skip_connect_input=False,
mlp_activation="softplus",
graph_criterion="knn",
graph_random_min_local=20,
use_time_features=True,
noise_schedule="log_snr",
noise_beta_min=0.2,
noise_beta_max=70.0,
checkpoint_gradients=False,
class_config={},
out_mlp_layers=2,
noise_covariance_model="globular",
noise_log_snr_range=(-7.0, 13.5),
time_feature_type="t",
time_log_feature_scaling=0.05,
fourier_scale=16.0,
zero_grad_fix=False,
**kwargs,
):
"""Initialize GraphBackbone network."""
super().__init__()
# Save configuration in kwargs
self.kwargs = locals()
self.kwargs.pop("self")
for key in list(self.kwargs.keys()):
if key.startswith("__") and key.endswith("__"):
self.kwargs.pop(key)
args = SimpleNamespace(**self.kwargs)
self.class_config = class_config
# Important global options
self.dim_nodes = args.dim_nodes
self.dim_edges = args.dim_edges
self.mlp_activation = args.mlp_activation
self.zero_grad_fix = zero_grad_fix
if "random_fourier_2mer" in args.edge_features:
index = args.edge_features.index("random_fourier_2mer")
args.edge_features.pop(index)
args.edge_features.append(
(
"random_fourier_2mer",
{
"dim_embedding": args.dim_edges,
"trainable": False,
"scale": args.fourier_scale,
},
)
)
# Encoder GNN process backbone
self.encoder = BackboneEncoderGNN(
dim_nodes=args.dim_nodes,
dim_edges=args.dim_edges,
num_neighbors=args.num_neighbors,
node_features=args.node_features,
edge_features=args.edge_features,
num_layers=args.num_layers,
node_mlp_layers=args.node_mlp_layers,
node_mlp_dim=args.node_mlp_dim,
edge_update=args.edge_update,
edge_mlp_layers=args.edge_mlp_layers,
edge_mlp_dim=args.edge_mlp_dim,
mlp_activation=args.mlp_activation,
dropout=args.dropout,
skip_connect_input=args.skip_connect_input,
graph_criterion=args.graph_criterion,
graph_random_min_local=args.graph_random_min_local,
checkpoint_gradients=checkpoint_gradients,
)
self.time_feature_type = args.time_feature_type
self.time_log_feature_scaling = time_log_feature_scaling
self.use_time_features = use_time_features
if self.use_time_features:
self.time_features = basic.FourierFeaturization(
d_input=1, d_model=dim_nodes, trainable=False, scale=16.0
)
self.sequence_embedding = nn.Embedding(20, dim_nodes)
self.noise_perturb = diffusion.DiffusionChainCov(
noise_schedule=args.noise_schedule,
beta_min=args.noise_beta_min,
beta_max=args.noise_beta_max,
log_snr_range=args.noise_log_snr_range,
covariance_model=args.noise_covariance_model,
)
self._init_heads(class_config, dim_nodes, out_mlp_layers, dropout)
self.condition_sequence_frequency = 0.3
def _init_heads(self, class_config, dim_nodes, out_mlp_layers, dropout):
self.heads = {"chain": {}, "first_order": {}, "second_order": {}, "complex": {}}
for label, config in class_config.items():
group = config["level"]
if label == "is_interface" or label == "contact":
dim_out = 1
else:
dim_out = len(config["tokens"])
if group == "chain":
pool = AttentionChainPool(8, dim_nodes)
elif group == "complex":
raise NotImplementedError
elif group == "second_order":
pool = NoOp()
else:
pool = NoOp()
if group != "second_order":
if self.zero_grad_fix:
node_norm_layer = MaskedNorm(
dim=1, num_features=dim_nodes, affine=True, norm="layer"
) | mlp = MLP( | 5 | 2023-11-28 00:09:40+00:00 | 12k |
RWTH-EBC/vclibpy | vclibpy/flowsheets/vapor_injection.py | [
{
"identifier": "BaseCycle",
"path": "vclibpy/flowsheets/base.py",
"snippet": "class BaseCycle:\n \"\"\"\n Base class for a heat pump. More complex systems may inherit from this class\n All HP have a compressor, two HE and a source and sink.\n Therefore, the parameters defined here are general parameters.\n\n Args:\n fluid (str): Name of the fluid\n evaporator (HeatExchanger): Instance of a heat exchanger used for the evaporator\n condenser (HeatExchanger): Instance of a heat exchanger used for the condenser\n \"\"\"\n\n flowsheet_name: str = \"BaseCLass of all HP classes - not to use for map generation\"\n\n def __init__(\n self,\n fluid: str,\n evaporator: HeatExchanger,\n condenser: HeatExchanger\n ):\n self.fluid: str = fluid\n self.evaporator = evaporator\n self.condenser = condenser\n # Instantiate dummy values\n self.med_prop = None\n self._p_min = 10000 # So that p>0 at all times\n self._p_max = None # Is set by med-prop\n\n def __str__(self):\n return self.flowsheet_name\n\n def setup_new_fluid(self, fluid):\n # Only do so if new fluid is given\n if self.med_prop is not None:\n if self.med_prop.fluid_name == fluid:\n return\n self.med_prop.terminate()\n\n # Else create new instance of MedProp\n med_prop_class, med_prop_kwargs = media.get_global_med_prop_and_kwargs()\n self.med_prop = med_prop_class(fluid_name=fluid, **med_prop_kwargs)\n\n # Write the instance to the components\n for component in self.get_all_components():\n component.med_prop = self.med_prop\n component.start_secondary_med_prop()\n\n # Get max and min pressure\n _, self._p_max, _ = self.med_prop.get_critical_point()\n self.fluid = fluid\n\n def terminate(self):\n self.med_prop.terminate()\n for component in self.get_all_components():\n component.terminate_secondary_med_prop()\n\n def get_all_components(self) -> List[BaseComponent]:\n return [self.condenser, self.evaporator]\n\n def calc_steady_state(self, inputs: Inputs, fluid: str = None, **kwargs):\n \"\"\"\n Calculate the steady-state performance of a vapor compression cycle\n based on given inputs and assumptions.\n\n This function ensures consistent assumptions across different cycles.\n It calculates the performance of the heat pump under\n specific conditions while adhering to several general assumptions.\n\n General Assumptions:\n ---------------------\n - Isenthalpic expansion valves:\n The enthalpy at the inlet equals the enthalpy at the outlet.\n - No heat losses in any component:\n The heat input to the condenser equals the heat\n output of the evaporator plus the power input.\n - Input to the evaporator is always in the two-phase region.\n - Output of the evaporator and output of the condenser maintain\n a constant overheating or subcooling (can be set in Inputs).\n\n Args:\n inputs (Inputs):\n An instance of the Inputs class containing the\n necessary parameters to calculate the flowsheet state.\n fluid (str):\n The fluid to be used in the calculations.\n Required only if 'fluid' is not specified during the object's initialization.\n\n Keyword Arguments:\n min_iteration_step (int):\n The minimum step size for iterations (default: 1).\n save_path_plots (str or None):\n The path to save plots (default: None).\n If None, no plots are created.\n show_iteration (bool):\n Whether to display iteration progress (default: False).\n T_max (float):\n Maximum temperature allowed (default: 273.15 + 150).\n use_quick_solver (bool):\n Whether to use a quick solver (default: True).\n max_err_ntu (float):\n Maximum allowable error for the heat exchanger in percent (default: 0.5).\n max_err_dT_min (float):\n Maximum allowable error for minimum temperature difference in K (default: 0.1).\n max_num_iterations (int or None):\n Maximum number of iterations allowed (default: None).\n\n Returns:\n fs_state (FlowsheetState):\n An instance of the FlowsheetState class representing\n the calculated state of the vapor compression cycle.\n \"\"\"\n # Settings\n min_iteration_step = kwargs.pop(\"min_iteration_step\", 1)\n save_path_plots = kwargs.get(\"save_path_plots\", None)\n input_name = \";\".join([k + \"=\" + str(np.round(v.value, 3)).replace(\".\", \"_\")\n for k, v in inputs.get_variables().items()])\n show_iteration = kwargs.get(\"show_iteration\", False)\n use_quick_solver = kwargs.pop(\"use_quick_solver\", True)\n err_ntu = kwargs.pop(\"max_err_ntu\", 0.5)\n err_dT_min = kwargs.pop(\"max_err_dT_min\", 0.1)\n max_num_iterations = kwargs.pop(\"max_num_iterations\", 1e5)\n p_1_history = []\n p_2_history = []\n\n if use_quick_solver:\n step_p1 = kwargs.get(\"step_max\", 10000)\n step_p2 = kwargs.get(\"step_max\", 10000)\n else:\n step_p1 = min_iteration_step\n step_p2 = min_iteration_step\n\n # Setup fluid:\n if fluid is None:\n fluid = self.fluid\n self.setup_new_fluid(fluid)\n\n # First: Iterate with given conditions to get the 4 states and the mass flow rate:\n T_1_start = inputs.T_eva_in - inputs.dT_eva_superheating\n T_3_start = inputs.T_con_in + inputs.dT_con_subcooling\n p_1_start = self.med_prop.calc_state(\"TQ\", T_1_start, 1).p\n p_2_start = self.med_prop.calc_state(\"TQ\", T_3_start, 0).p\n p_1_next = p_1_start\n p_2_next = p_2_start\n\n fs_state = FlowsheetState() # Always log what is happening in the whole flowsheet\n fs_state.set(name=\"Q_con\", value=1, unit=\"W\", description=\"Condenser heat flow rate\")\n fs_state.set(name=\"COP\", value=0, unit=\"-\", description=\"Coefficient of performance\")\n\n if show_iteration:\n fig_iterations, ax_iterations = plt.subplots(2)\n\n num_iterations = 0\n\n while True:\n if isinstance(max_num_iterations, (int, float)):\n if num_iterations > max_num_iterations:\n logger.warning(\"Maximum number of iterations %s exceeded. Stopping.\",\n max_num_iterations)\n return\n\n if (num_iterations + 1) % (0.1 * max_num_iterations) == 0:\n logger.info(\"Info: %s percent of max_num_iterations %s used\",\n 100 * (num_iterations + 1) / max_num_iterations, max_num_iterations)\n\n p_1 = p_1_next\n p_2 = p_2_next\n p_1_history.append(p_1)\n p_2_history.append(p_2)\n if show_iteration:\n ax_iterations[0].cla()\n ax_iterations[1].cla()\n ax_iterations[0].scatter(list(range(len(p_1_history))), p_1_history)\n ax_iterations[1].scatter(list(range(len(p_2_history))), p_2_history)\n plt.draw()\n plt.pause(1e-5)\n\n # Increase counter\n num_iterations += 1\n # Check critical pressures:\n if p_2 >= self._p_max:\n if step_p2 == min_iteration_step:\n logger.error(\"Pressure too high. Configuration is infeasible.\")\n return\n p_2_next = p_2 - step_p2\n step_p2 /= 10\n continue\n if p_1 <= self._p_min:\n if p_1_next == min_iteration_step:\n logger.error(\"Pressure too low. Configuration is infeasible.\")\n return\n p_1_next = p_1 + step_p1\n step_p1 /= 10\n continue\n\n # Calculate the states based on the given flowsheet\n try:\n self.calc_states(p_1, p_2, inputs=inputs, fs_state=fs_state)\n except ValueError as err:\n logger.error(\"An error occurred while calculating states. \"\n \"Can't guess next pressures, thus, exiting: %s\", err)\n return\n if save_path_plots is not None and num_iterations == 1 and show_iteration:\n self.plot_cycle(save_path=save_path_plots.joinpath(f\"{input_name}_initialization.png\"), inputs=inputs)\n\n # Check heat exchangers:\n error_eva, dT_min_eva = self.evaporator.calc(inputs=inputs, fs_state=fs_state)\n if not isinstance(error_eva, float):\n print(error_eva)\n if error_eva < 0:\n p_1_next = p_1 - step_p1\n continue\n else:\n if step_p1 > min_iteration_step:\n p_1_next = p_1 + step_p1\n step_p1 /= 10\n continue\n elif error_eva > err_ntu and dT_min_eva > err_dT_min:\n step_p1 = 1000\n p_1_next = p_1 + step_p1\n continue\n\n error_con, dT_min_con = self.condenser.calc(inputs=inputs, fs_state=fs_state)\n if error_con < 0:\n p_2_next = p_2 + step_p2\n continue\n else:\n if step_p2 > min_iteration_step:\n p_2_next = p_2 - step_p2\n step_p2 /= 10\n continue\n elif error_con > err_ntu and dT_min_con > err_dT_min:\n p_2_next = p_2 - step_p2\n step_p2 = 1000\n continue\n\n # If still here, and the values are equal, we may break.\n if p_1 == p_1_next and p_2 == p_2_next:\n # Check if solution was too far away. If so, jump back\n # And decrease the iteration step by factor 10.\n if step_p2 > min_iteration_step:\n p_2_next = p_2 - step_p2\n step_p2 /= 10\n continue\n if step_p1 > min_iteration_step:\n p_1_next = p_1 + step_p1\n step_p1 /= 10\n continue\n logger.info(\"Breaking: Converged\")\n break\n\n # Check if values are not converging at all:\n p_1_unique = set(p_1_history[-10:])\n p_2_unique = set(p_2_history[-10:])\n if len(p_1_unique) == 2 and len(p_2_unique) == 2 \\\n and step_p1 == min_iteration_step and step_p2 == min_iteration_step:\n logger.critical(\"Breaking: not converging at all\")\n break\n\n if show_iteration:\n plt.close(fig_iterations)\n\n # Calculate the heat flow rates for the selected states.\n Q_con = self.condenser.calc_Q_flow()\n Q_con_outer = self.condenser.calc_secondary_Q_flow(Q_con)\n Q_eva = self.evaporator.calc_Q_flow()\n Q_eva_outer = self.evaporator.calc_secondary_Q_flow(Q_eva)\n self.evaporator.calc(inputs=inputs, fs_state=fs_state)\n self.condenser.calc(inputs=inputs, fs_state=fs_state)\n P_el = self.calc_electrical_power(fs_state=fs_state, inputs=inputs)\n T_con_out = inputs.T_con_in + Q_con_outer / self.condenser.m_flow_secondary_cp\n\n # COP based on P_el and Q_con:\n COP_inner = Q_con / P_el\n COP_outer = Q_con_outer / P_el\n # Calculate carnot quality as a measure of reliability of model:\n COP_carnot = (T_con_out / (T_con_out - inputs.T_eva_in))\n carnot_quality = COP_inner / COP_carnot\n # Calc return temperature:\n fs_state.set(\n name=\"P_el\", value=P_el, unit=\"W\",\n description=\"Power consumption\"\n )\n fs_state.set(\n name=\"carnot_quality\", value=carnot_quality,\n unit=\"-\", description=\"Carnot Quality\"\n )\n fs_state.set(\n name=\"Q_con\", value=Q_con, unit=\"W\",\n description=\"Condenser refrigerant heat flow rate\"\n )\n # COP based on P_el and Q_con:\n fs_state.set(\n name=\"Q_con_outer\", value=Q_con_outer, unit=\"W\",\n description=\"Secondary medium condenser heat flow rate\"\n )\n fs_state.set(\n name=\"Q_eva_outer\", value=Q_eva_outer, unit=\"W\",\n description=\"Secondary medium evaporator heat flow rate\"\n )\n fs_state.set(\n name=\"COP\", value=COP_inner,\n unit=\"-\", description=\"Coefficient of Performance\"\n )\n fs_state.set(\n name=\"COP_outer\", value=COP_outer,\n unit=\"-\", description=\"Outer COP, including heat losses\"\n )\n\n if save_path_plots is not None:\n self.plot_cycle(save_path=save_path_plots.joinpath(f\"{input_name}_final_result.png\"), inputs=inputs)\n\n return fs_state\n\n @abstractmethod\n def get_states_in_order_for_plotting(self):\n \"\"\"\n Function to return all thermodynamic states of cycle\n in the correct order for plotting.\n Include phase change states to see if your simulation\n runs plausible cycles.\n\n Returns:\n - List with tuples, first entry being the state and second the mass flow rate\n \"\"\"\n return []\n\n def set_evaporator_outlet_based_on_superheating(self, p_eva: float, inputs: Inputs):\n \"\"\"\n Calculate the outlet state of the evaporator based on\n the required degree of superheating.\n\n Args:\n p_eva (float): Evaporation pressure\n inputs (Inputs): Inputs with superheating level\n \"\"\"\n T_1 = self.med_prop.calc_state(\"PQ\", p_eva, 1).T + inputs.dT_eva_superheating\n if inputs.dT_eva_superheating > 0:\n self.evaporator.state_outlet = self.med_prop.calc_state(\"PT\", p_eva, T_1)\n else:\n self.evaporator.state_outlet = self.med_prop.calc_state(\"PQ\", p_eva, 1)\n\n def set_condenser_outlet_based_on_subcooling(self, p_con: float, inputs: Inputs):\n \"\"\"\n Calculate the outlet state of the evaporator based on\n the required degree of superheating.\n\n Args:\n p_con (float): Condensing pressure\n inputs (Inputs): Inputs with superheating level\n \"\"\"\n T_3 = self.med_prop.calc_state(\"PQ\", p_con, 0).T - inputs.dT_con_subcooling\n if inputs.dT_con_subcooling > 0:\n self.condenser.state_outlet = self.med_prop.calc_state(\"PT\", p_con, T_3)\n else:\n self.condenser.state_outlet = self.med_prop.calc_state(\"PQ\", p_con, 0)\n\n def plot_cycle(self, save_path: bool, inputs: Inputs, states: list = None):\n \"\"\"Function to plot the resulting flowsheet of the steady state config.\"\"\"\n if states is None:\n states = self.get_states_in_order_for_plotting()\n states.append(states[0]) # Plot full cycle\n # Unpack state var:\n h_T = np.array([state.h for state in states]) / 1000\n T = [state.T - 273.15 for state in states]\n p = np.array([state.p for state in states])\n h_p = h_T\n\n fig, ax = plt.subplots(2, 1, sharex=True)\n ax[0].set_ylabel(\"$T$ in °C\")\n ax[1].set_xlabel(\"$h$ in kJ/kgK\")\n # Two phase limits\n ax[0].plot(\n self.med_prop.get_two_phase_limits(\"h\") / 1000,\n self.med_prop.get_two_phase_limits(\"T\") - 273.15, color=\"black\"\n )\n\n ax[0].plot(h_T, T, color=\"r\", marker=\"s\")\n self._plot_secondary_heat_flow_rates(ax=ax[0], inputs=inputs)\n ax[1].plot(h_p, np.log(p), marker=\"s\", color=\"r\")\n # Two phase limits\n ax[1].plot(\n self.med_prop.get_two_phase_limits(\"h\") / 1000,\n np.log(self.med_prop.get_two_phase_limits(\"p\")),\n color=\"black\"\n )\n plt.plot()\n ax[1].set_ylabel(\"$log(p)$\")\n ax[1].set_ylim([np.min(np.log(p)) * 0.9, np.max(np.log(p)) * 1.1])\n ax[0].set_ylim([np.min(T) - 5, np.max(T) + 5])\n ax[1].set_xlim([np.min(h_T) * 0.9, np.max(h_T) * 1.1])\n ax[0].set_xlim([np.min(h_T) * 0.9, np.max(h_T) * 1.1])\n fig.tight_layout()\n fig.savefig(save_path)\n plt.close(fig)\n\n def _plot_secondary_heat_flow_rates(self, ax, inputs):\n Q_con = self.condenser.calc_Q_flow()\n Q_eva = self.evaporator.calc_Q_flow()\n\n delta_H_con = np.array([\n self.condenser.state_outlet.h * self.condenser.m_flow,\n self.condenser.state_outlet.h * self.condenser.m_flow + Q_con\n ]) / self.condenser.m_flow\n delta_H_eva = np.array([\n self.evaporator.state_outlet.h * self.evaporator.m_flow,\n self.evaporator.state_outlet.h * self.evaporator.m_flow - Q_eva\n ]) / self.evaporator.m_flow\n self.condenser.m_flow_secondary = inputs.m_flow_con\n self.condenser.calc_secondary_cp(T=inputs.T_con_in)\n self.evaporator.m_flow_secondary = inputs.m_flow_eva\n self.evaporator.calc_secondary_cp(T=inputs.T_eva_in)\n ax.plot(delta_H_con / 1000, [\n inputs.T_con_in - 273.15,\n inputs.T_con_in + Q_con / self.condenser.m_flow_secondary_cp - 273.15\n ], color=\"b\")\n ax.plot(delta_H_eva / 1000, [\n inputs.T_eva_in - 273.15,\n inputs.T_eva_in - Q_eva / self.evaporator.m_flow_secondary_cp - 273.15\n ], color=\"b\")\n\n @abstractmethod\n def calc_electrical_power(self, inputs: Inputs, fs_state: FlowsheetState):\n \"\"\"Function to calc the electrical power consumption based on the flowsheet used\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def calc_states(self, p_1, p_2, inputs: Inputs, fs_state: FlowsheetState):\n \"\"\"\n Function to calculate the states and mass flow rates of the flowsheet\n and set these into each component based on the given pressure levels p_1 and p_2.\n\n Args:\n p_1 (float):\n Lower pressure level. If no pressure losses are assumed,\n this equals the evaporation pressure and the compressor inlet pressure.\n p_2 (float):\n Higher pressure level. If no pressure losses are assumed,\n this equals the condensing pressure and the compressor outlet pressure.\n inputs (Inputs): Inputs of calculation.\n fs_state (FlowsheetState): Flowsheet state to save important variables.\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "Inputs",
"path": "vclibpy/datamodels.py",
"snippet": "class Inputs(VariableContainer):\n \"\"\"\n Class defining inputs to calculate the FlowsheetState.\n\n While the inputs are pre-defined, you may add further ones\n using the `set` method.\n\n Args:\n n (float): Relative compressor speed between 0 and 1.\n T_eva_in (float): Secondary side evaporator inlet temperature.\n T_con_in (float): Secondary side condenser inlet temperature.\n m_flow_eva (float): Secondary side evaporator mass flow rate.\n m_flow_con (float): Secondary side condenser mass flow rate.\n dT_eva_superheating (float): Super-heating after evaporator.\n dT_con_subcooling (float): Subcooling after condenser.\n T_ambient (float): Ambient temperature of the machine.\n \"\"\"\n\n def __init__(\n self,\n n: float = None,\n T_eva_in: float = None,\n T_con_in: float = None,\n m_flow_eva: float = None,\n m_flow_con: float = None,\n dT_eva_superheating: float = None,\n dT_con_subcooling: float = None,\n T_ambient: float = None\n ):\n \"\"\"\n Initializes an Inputs object with parameters representing external conditions\n for the vapor compression cycle.\n\n Args:\n n (float): Relative compressor speed between 0 and 1 (unit: -).\n T_eva_in (float): Secondary side evaporator inlet temperature (unit: K).\n T_con_in (float): Secondary side condenser inlet temperature (unit: K).\n m_flow_eva (float): Secondary side evaporator mass flow rate (unit: kg/s).\n m_flow_con (float): Secondary side condenser mass flow rate (unit: kg/s).\n dT_eva_superheating (float): Super-heating after evaporator (unit: K).\n dT_con_subcooling (float): Subcooling after condenser (unit: K).\n T_ambient (float): Ambient temperature of the machine (unit: K).\n \"\"\"\n super().__init__()\n self.set(\n name=\"n\",\n value=n,\n unit=\"-\",\n description=\"Relative compressor speed\"\n )\n self.set(\n name=\"T_eva_in\",\n value=T_eva_in,\n unit=\"K\",\n description=\"Secondary side evaporator inlet temperature\"\n )\n self.set(\n name=\"T_con_in\",\n value=T_con_in,\n unit=\"K\",\n description=\"Secondary side condenser inlet temperature\"\n )\n self.set(\n name=\"m_flow_con\",\n value=m_flow_con,\n unit=\"kg/s\",\n description=\"Secondary side condenser mass flow rate\"\n )\n self.set(\n name=\"m_flow_eva\",\n value=m_flow_eva,\n unit=\"kg/s\",\n description=\"Secondary side evaporator mass flow rate\"\n )\n self.set(\n name=\"dT_eva_superheating\",\n value=dT_eva_superheating,\n unit=\"K\",\n description=\"Super-heating after evaporator\"\n )\n self.set(\n name=\"dT_con_subcooling\",\n value=dT_con_subcooling,\n unit=\"K\",\n description=\"Subcooling after condenser\"\n )\n if T_ambient is None:\n T_ambient = T_eva_in\n self.set(\n name=\"T_ambient\",\n value=T_ambient,\n unit=\"K\",\n description=\"Ambient temperature of machine\"\n )"
},
{
"identifier": "FlowsheetState",
"path": "vclibpy/datamodels.py",
"snippet": "class FlowsheetState(VariableContainer):\n \"\"\"\n This class is used to define the unique states of the flowsheet\n in the heat pump.\n\n The class is dynamic in the sense that attributes may be\n added during calculation of new flowsheet. This enables\n the easy adding of custom values to analyze the whole flowsheet\n and not restrict to a certain naming convention.\n \"\"\""
},
{
"identifier": "Compressor",
"path": "vclibpy/components/compressors/compressor.py",
"snippet": "class Compressor(BaseComponent):\n \"\"\"\n Base compressor class to be extended for specific compressor models.\n\n Args:\n N_max (float): Maximal rotations per second of the compressor.\n V_h (float): Volume of the compressor in m^3.\n\n Methods:\n get_lambda_h(inputs: Inputs) -> float:\n Get the volumetric efficiency.\n\n get_eta_isentropic(p_outlet: float, inputs: Inputs) -> float:\n Get the isentropic efficiency.\n\n get_eta_mech(inputs: Inputs) -> float:\n Get the mechanical efficiency.\n\n get_p_outlet() -> float:\n Get the outlet pressure.\n\n get_n_absolute(n: float) -> float:\n Return the absolute compressor frequency based on the relative speed.\n\n calc_state_outlet(p_outlet: float, inputs: Inputs, fs_state: FlowsheetState):\n Calculate the outlet state based on the high pressure level and provided inputs.\n\n calc_m_flow(inputs: Inputs, fs_state: FlowsheetState) -> float:\n Calculate the refrigerant mass flow rate.\n\n calc_electrical_power(inputs: Inputs, fs_state: FlowsheetState) -> float:\n Calculate the electrical power consumed by the compressor based on an adiabatic energy balance.\n \"\"\"\n\n def __init__(self, N_max: float, V_h: float):\n \"\"\"\n Initialize the compressor.\n\n Args:\n N_max (float): Maximal rotations per second of the compressor.\n V_h (float): Volume of the compressor in m^3.\n \"\"\"\n super().__init__()\n self.N_max = N_max\n self.V_h = V_h\n\n def get_lambda_h(self, inputs: Inputs) -> float:\n \"\"\"\n Get the volumetric efficiency.\n\n Args:\n inputs (Inputs): Inputs for the calculation.\n\n Returns:\n float: Volumetric efficiency.\n \"\"\"\n raise NotImplementedError(\"Re-implement this function to use it\")\n\n def get_eta_isentropic(self, p_outlet: float, inputs: Inputs) -> float:\n \"\"\"\n Get the isentropic efficiency.\n\n Args:\n p_outlet (float): High pressure value.\n inputs (Inputs): Inputs for the calculation.\n\n Returns:\n float: Isentropic efficiency.\n \"\"\"\n raise NotImplementedError(\"Re-implement this function to use it\")\n\n def get_eta_mech(self, inputs: Inputs) -> float:\n \"\"\"\n Get the mechanical efficiency including motor and inverter efficiencies.\n\n Args:\n inputs (Inputs): Inputs for the calculation.\n\n Returns:\n float: Mechanical efficiency including motor and inverter efficiencies.\n \"\"\"\n raise NotImplementedError(\"Re-implement this function to use it\")\n\n def get_p_outlet(self) -> float:\n \"\"\"\n Get the outlet pressure.\n\n Returns:\n float: Outlet pressure.\n \"\"\"\n assert self.state_outlet is not None, \"You have to calculate the outlet state first.\"\n return self.state_outlet.p\n\n def get_n_absolute(self, n: float) -> float:\n \"\"\"\n Return given relative n as absolute rounds/sec based on self.N_max.\n\n Args:\n n (float): Relative compressor speed between 0 and 1.\n\n Returns:\n float: Absolute compressor frequency in rounds/sec.\n \"\"\"\n return self.N_max * n\n\n def calc_state_outlet(self, p_outlet: float, inputs: Inputs, fs_state: FlowsheetState):\n \"\"\"\n Calculate the output state based on the high pressure level and the provided inputs.\n The state is automatically set as the outlet state of this component.\n\n Args:\n p_outlet (float): High pressure value.\n inputs (Inputs): Inputs for calculation.\n fs_state (FlowsheetState): Flowsheet state.\n \"\"\"\n state_outlet_isentropic = self.med_prop.calc_state(\"PS\", p_outlet, self.state_inlet.s)\n eta_is = self.get_eta_isentropic(p_outlet=p_outlet, inputs=inputs)\n h_outlet = (\n self.state_inlet.h + (state_outlet_isentropic.h - self.state_inlet.h) /\n eta_is\n )\n fs_state.set(name=\"eta_is\", value=eta_is, unit=\"%\", description=\"Isentropic efficiency\")\n self.state_outlet = self.med_prop.calc_state(\"PH\", p_outlet, h_outlet)\n\n def calc_m_flow(self, inputs: Inputs, fs_state: FlowsheetState) -> float:\n \"\"\"\n Calculate the refrigerant mass flow rate.\n\n Args:\n inputs (Inputs): Inputs for the calculation.\n fs_state (FlowsheetState): Flowsheet state.\n\n Returns:\n float: Refrigerant mass flow rate.\n \"\"\"\n lambda_h = self.get_lambda_h(inputs=inputs)\n V_flow_ref = (\n lambda_h *\n self.V_h *\n self.get_n_absolute(inputs.n)\n )\n self.m_flow = self.state_inlet.d * V_flow_ref\n fs_state.set(name=\"lambda_h\", value=lambda_h, unit=\"%\", description=\"Volumetric efficiency\")\n fs_state.set(name=\"V_flow_ref\", value=V_flow_ref, unit=\"m3/s\", description=\"Refrigerant volume flow rate\")\n fs_state.set(name=\"m_flow_ref\", value=self.m_flow, unit=\"kg/s\", description=\"Refrigerant mass flow rate\")\n return self.m_flow\n\n def calc_electrical_power(self, inputs: Inputs, fs_state: FlowsheetState) -> float:\n \"\"\"\n Calculate the electrical power consumed by the compressor based on an adiabatic energy balance.\n\n Args:\n inputs (Inputs): Inputs for the calculation.\n fs_state (FlowsheetState): Flowsheet state.\n\n Returns:\n float: Electrical power consumed.\n \"\"\"\n # Heat flow in the compressor\n P_t = self.m_flow * (self.state_outlet.h - self.state_inlet.h)\n # Electrical power consumed\n eta_mech = self.get_eta_mech(inputs=inputs)\n P_el = P_t / eta_mech\n fs_state.set(name=\"eta_mech\", value=eta_mech, unit=\"-\", description=\"Mechanical efficiency\")\n return P_el"
},
{
"identifier": "ExpansionValve",
"path": "vclibpy/components/expansion_valves/expansion_valve.py",
"snippet": "class ExpansionValve(BaseComponent, abc.ABC):\n \"\"\"Base class for an expansion valve.\n\n Args:\n A (float): Cross-sectional area of the expansion valve.\n \"\"\"\n\n def __init__(self, A):\n super().__init__()\n self.A = A # Cross-sectional area of the expansion valve\n\n @abc.abstractmethod\n def calc_m_flow_at_opening(self, opening) -> float:\n \"\"\"\n Calculate the mass flow rate for the given opening.\n\n Args:\n opening (float): Opening of valve between 0 and 1\n\n Returns:\n float: Mass flow rate in kg/s\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def calc_opening_at_m_flow(self, m_flow, **kwargs) -> float:\n \"\"\"\n Calculate the opening for the given mass flow rate\n\n Args:\n m_flow (float): Mass flow rate in kg/s\n **kwargs: Possible keyword arguments for child classes\n\n Returns:\n float: Opening\n \"\"\"\n raise NotImplementedError\n\n def calc_outlet(self, p_outlet: float):\n \"\"\"\n Calculate isenthalpic expansion valve.\n\n Args:\n p_outlet (float): Outlet pressure level\n \"\"\"\n self.state_outlet = self.med_prop.calc_state(\"PH\", p_outlet, self.state_inlet.h)"
},
{
"identifier": "ThermodynamicState",
"path": "vclibpy/media/states.py",
"snippet": "class ThermodynamicState:\n \"\"\"\n Represents a thermodynamic state within a cycle.\n\n Notes:\n Does not necessarily need to have all state variables defined!\n\n Args:\n p (float): Pressure at the state in Pa.\n T (float): Temperature at the state in K.\n u (float): Inner energy at the state in J/kg.\n h (float): Enthalpy at the state in J/kg.\n s (float): Entropy at the state in J/(kg * K).\n v (float): Specific volume at the state in m^3/kg.\n q (float): Quality at the state (between 0 and 1).\n d (float): Density at the state in kg/m^3.\n\n Methods:\n __init__: Initializes the state class.\n __str__: Provides a string representation of the state.\n get_pretty_print: Formats the state with names, units, and descriptions.\n \"\"\"\n\n def __init__(self,\n p=None,\n T=None,\n u=None,\n h=None,\n s=None,\n v=None,\n q=None,\n d=None):\n \"\"\"\n Initializes a thermodynamic state.\n\n Args:\n p (float): Pressure at the state in Pa.\n T (float): Temperature at the state in K.\n u (float): Inner energy at the state in J/kg.\n h (float): Enthalpy at the state in J/kg.\n s (float): Entropy at the state in J/(kg * K).\n v (float): Specific volume at the state in m^3/kg.\n q (float): Quality at the state (between 0 and 1).\n d (float): Density at the state in kg/m^3.\n\n Notes:\n If only v or d is provided, the other attribute will be calculated. If both are given and they are similar,\n an error will be raised.\n \"\"\"\n self.p = p\n self.T = T\n self.u = u\n self.h = h\n self.s = s\n self.v = v\n self.q = q\n self.d = d\n # Define density\n if v and d:\n if not round(1/v, 4) == round(d, 4):\n raise ValueError(\"At current state d and v do not match\", d, v)\n elif v:\n self.d = 1/v\n elif d:\n self.v = 1/d\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the state.\n \"\"\"\n return \";\".join([f\"{k}={v}\" for k, v in self.__dict__.items()])\n\n def get_pretty_print(self):\n \"\"\"\n Provides a formatted representation of the state with names, units, and descriptions.\n \"\"\"\n _container = VariableContainer()\n _container.__class__.__name__ = self.__class__.__name__\n _container.set(name=\"p\", value=self.p, unit=\"Pa\", description=\"Pressure\")\n _container.set(name=\"T\", value=self.T, unit=\"K\", description=\"Temperature\")\n _container.set(name=\"u\", value=self.u, unit=\"J/kg\", description=\"Inner energy\")\n _container.set(name=\"h\", value=self.h, unit=\"J/kg\", description=\"Enthalpy\")\n _container.set(name=\"s\", value=self.s, unit=\"J/(kg*K)\", description=\"Entropy\")\n _container.set(name=\"v\", value=self.v, unit=\"m^3/kg\", description=\"Specific volume\")\n _container.set(name=\"q\", value=self.q, unit=\"-\", description=\"Quality\")\n _container.set(name=\"d\", value=self.d, unit=\"kg/m^3\", description=\"Density\")\n return str(_container)"
}
] | import abc
import logging
import numpy as np
from copy import deepcopy
from vclibpy.flowsheets import BaseCycle
from vclibpy.datamodels import Inputs, FlowsheetState
from vclibpy.components.compressors import Compressor
from vclibpy.components.expansion_valves import ExpansionValve
from vclibpy.media import ThermodynamicState | 8,798 |
logger = logging.getLogger(__name__)
class BaseVaporInjection(BaseCycle, abc.ABC):
"""
Partial cycle with vapor injection, using
two separated compressors and expansion valves.
Notes
-----
See parent docstring for info on further assumptions and parameters.
"""
flowsheet_name = "VaporInjectionPhaseSeparator"
def __init__(
self,
high_pressure_compressor: Compressor,
low_pressure_compressor: Compressor,
|
logger = logging.getLogger(__name__)
class BaseVaporInjection(BaseCycle, abc.ABC):
"""
Partial cycle with vapor injection, using
two separated compressors and expansion valves.
Notes
-----
See parent docstring for info on further assumptions and parameters.
"""
flowsheet_name = "VaporInjectionPhaseSeparator"
def __init__(
self,
high_pressure_compressor: Compressor,
low_pressure_compressor: Compressor, | high_pressure_valve: ExpansionValve, | 4 | 2023-11-30 12:54:59+00:00 | 12k |
BiQiWHU/CMFormer | train_net.py | [
{
"identifier": "add_maskformer2_config",
"path": "mask2former/config.py",
"snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75"
},
{
"identifier": "COCOInstanceNewBaselineDatasetMapper",
"path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py",
"snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "COCOPanopticNewBaselineDatasetMapper",
"path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py",
"snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "MaskFormerInstanceDatasetMapper",
"path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py",
"snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "MaskFormerPanopticDatasetMapper",
"path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py",
"snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "MaskFormerSemanticDatasetMapper",
"path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py",
"snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "SemanticSegmentorWithTTA",
"path": "mask2former/test_time_augmentation.py",
"snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms"
},
{
"identifier": "InstanceSegEvaluator",
"path": "mask2former/evaluation/instance_evaluation.py",
"snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res"
}
] | from shapely.errors import ShapelyDeprecationWarning
from collections import OrderedDict
from typing import Any, Dict, List, Set
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import (
DefaultTrainer,
default_argument_parser,
default_setup,
launch,
)
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
from mask2former import (
COCOInstanceNewBaselineDatasetMapper,
COCOPanopticNewBaselineDatasetMapper,
InstanceSegEvaluator,
MaskFormerInstanceDatasetMapper,
MaskFormerPanopticDatasetMapper,
MaskFormerSemanticDatasetMapper,
SemanticSegmentorWithTTA,
add_maskformer2_config,
)
import warnings
import copy
import itertools
import logging
import os
import torch
import detectron2.utils.comm as comm | 10,728 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
MaskFormer Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
try:
# ignore ShapelyDeprecationWarning from fvcore
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
except:
pass
os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets'
# MaskFormer
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to MaskFormer.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each
builtin dataset. For your own dataset, you can simply create an
evaluator manually in your script and do not have to worry about the
hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
# semantic segmentation
if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
# instance segmentation
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# panoptic segmentation
if evaluator_type in [
"coco_panoptic_seg",
"ade20k_panoptic_seg",
"cityscapes_panoptic_seg",
"mapillary_vistas_panoptic_seg",
]:
if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON:
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
# COCO
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder))
# Mapillary Vistas
if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
MaskFormer Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
try:
# ignore ShapelyDeprecationWarning from fvcore
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
except:
pass
os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets'
# MaskFormer
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to MaskFormer.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each
builtin dataset. For your own dataset, you can simply create an
evaluator manually in your script and do not have to worry about the
hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
# semantic segmentation
if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
# instance segmentation
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# panoptic segmentation
if evaluator_type in [
"coco_panoptic_seg",
"ade20k_panoptic_seg",
"cityscapes_panoptic_seg",
"mapillary_vistas_panoptic_seg",
]:
if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON:
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
# COCO
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder))
# Mapillary Vistas
if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: | evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) | 7 | 2023-11-29 15:26:53+00:00 | 12k |
soumik-kanad/diffssl | finetune.py | [
{
"identifier": "DiffSSLModelFeedbackFusion",
"path": "ssl_diff/ssl_model_feedback_fusion.py",
"snippet": "class DiffSSLModelFeedbackFusion(nn.Module):\n\n \"\"\" SSL model with feedback loop between the decoder features of the UNet and the \n encoder features of the UNet\n \"\"\"\n def __init__(self, encoder, diffusion, head, device, mode='freeze', \n feedback_arch=\"C_B_R_C\", use_feedback=False, feedback_b_list=None, first_fw_b_list=None, second_fw_b_list=None):\n \n super().__init__()\n self.encoder = encoder\n self.diffusion = diffusion\n self.head = head\n self.use_feedback = use_feedback\n self.feedback_b_list = feedback_b_list\n self.first_fw_b_list = first_fw_b_list\n self.second_fw_b_list = second_fw_b_list\n self.mode = mode\n assert self.mode in ['freeze', 'update', 'mult_fpn', 'add_fpn', 'multi_scale_freeze', \"finetune\"], f\"Mode {self.mode} not supported\"\n \n if self.mode == 'freeze' and not use_feedback: \n for param in self.encoder.parameters():\n param.requires_grad = False\n else:\n # including fusion finetune, feedback finetune, feedback freeze\n # print(\"=======Freezed param=======\")\n frozen_decoder_idx = max(self.first_fw_b_list + self.second_fw_b_list) - 19 # -19 to convert block idx to decoder idx\n for name, param in self.encoder.named_parameters():\n if name.startswith(\"out.\"):\n param.requires_grad = False\n # print(name)\n elif name.startswith(\"output_blocks\"):\n if int(name.split(\".\")[1]) >= frozen_decoder_idx:\n param.requires_grad = False\n # print(name)\n\n self.device = device \n \n if use_feedback:\n \"\"\" \n generate feedback layers\n Feedback Architecture: feedback_arch = \"C_B_R_C\" = Conv, BN, ReLU, Conv\n \"\"\"\n feedback_layers = []\n for feedback_b in self.feedback_b_list:\n in_dim = DM_FEAT_DIM_DICT[feedback_b]\n out_dim = DM_FEAT_DIM_DICT[38-feedback_b]\n sequential_model_lst = self.make_layers(feedback_arch, in_dim, out_dim)\n feedback_layers.append(nn.Sequential(*sequential_model_lst))\n self.feedback_layers = nn.ModuleList(feedback_layers)\n \n def make_layers(self, feedback_arch, in_dim, out_dim):\n sequential_model_lst = [] \n for j in range(len(feedback_arch)):\n if feedback_arch[j] == \"Res\":\n \"\"\" Use first block to change in_dim to out_dim and then the rest operate on out_dim \"\"\"\n if j == 0: # if the first resblock\n sequential_model_lst.append(ResBlock(in_dim, dropout=0.0, out_channels=out_dim, use_conv=False))\n else: # if the last resblock\n sequential_model_lst.append(ResBlock(out_dim, dropout=0.0, out_channels=out_dim, use_conv=False))\n elif feedback_arch[j] == \"R\":\n sequential_model_lst.append(nn.ReLU(inplace=True))\n elif feedback_arch[j] == \"B\":\n sequential_model_lst.append(nn.BatchNorm2d(out_dim))\n elif feedback_arch[j] == \"C\":\n \"\"\" Use first conv to change in_dim to out_dim and then the rest operate on out_dim \"\"\"\n if j == 0:\n sequential_model_lst.append(nn.Conv2d(in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=False))\n else:\n sequential_model_lst.append(nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1, groups=out_dim, bias=False))\n elif feedback_arch[j] == \"C2\":\n \"\"\" Operate on in_dim the entire time and then for the last conv, change in_dim to out_dim \"\"\"\n if j == len(feedback_arch) - 1:\n sequential_model_lst.append(nn.Conv2d(in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=False))\n else:\n sequential_model_lst.append(nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1, groups=in_dim, bias=False))\n elif feedback_arch[j] == \"S\": \n sequential_model_lst.append(nn.SiLU(inplace=True))\n elif feedback_arch[j] == \"G\":\n # want to use this as a group norm layer\n sequential_model_lst.append(nn.GroupNorm(num_groups=32, num_channels=out_dim, dtype=self.encoder.dtype))\n return sequential_model_lst\n\n \n def generate_feedback(self, features):\n \"\"\" generate feedback features from decoder features \"\"\"\n feedback_features = []\n for idx, b in enumerate(self.feedback_b_list):\n feedback_features.append(self.feedback_layers[idx](features[b-1]))\n return feedback_features\n\n def forward(self, x, t, unet_model_kwargs={}):\n first_fw_feat = []\n second_fw_feat = []\n for t_ in t:\n t_ = t_*torch.ones(x.shape[0],).long().to(self.device)\n x_start = x.to(self.device)\n x_start = x_start.type(torch.float16) if self.use_fp16 else x_start.type(torch.float32)\n noise = torch.randn_like(x_start)\n\n x_t = self.diffusion.q_sample(x_start, t_, noise=noise)\n\n # encoder_features = self.encoder.get_encoder_features(x_t, self.diffusion._scale_timesteps(t), **unet_model_kwargs)\n # print([x.shape for x in encoder_features])\n\n \"\"\" extract encoder features and decoder features depending on the mode \"\"\"\n \n if self.use_feedback:\n with torch.no_grad():\n # TODO : getting all features wastes GPU memory\n encoder_features, _, mid_feature, decoder_features = self.encoder.get_all_features(x_t, \n self.diffusion._scale_timesteps(t_), 0,\n ['encoder_features', 'resume_encoder_feature', 'mid_feature', 'decoder_features'], \n **unet_model_kwargs)\n features = encoder_features + mid_feature + decoder_features\n first_fw_feat.append([features[b-1].detach().float() for b in self.first_fw_b_list])\n else:\n block_feat_lst = self.encoder.get_multiple_features(x_t, \n self.diffusion._scale_timesteps(t_),\n block_num_lst = self.first_fw_b_list, \n **unet_model_kwargs)\n first_fw_feat.append([block_feat.float() for block_feat in block_feat_lst])\n \n \n\n \n if self.use_feedback: # use feedback \n \"\"\" generate feedback features from decoder features \"\"\"\n feedback_features = self.generate_feedback(features)\n feedback_features = feedback_features[::-1] # reverse the list of feedback features \n\n \"\"\" generate the final features based on the mode \"\"\"\n block_feat_list = self.encoder.get_multiple_features_with_specified_feedback(x=x_t, \n timesteps=self.diffusion._scale_timesteps(t_), \n block_num_lst=self.second_fw_b_list, \n feedback_features=feedback_features, # list of features [0: len(input_blocks) - feedback_starting_point]\n feedback_b_list=self.feedback_b_list,\n **unet_model_kwargs)\n second_fw_feat.append([block_feat.float() for block_feat in block_feat_list])\n\n x = self.head(self.first_fw_b_list, first_fw_feat, self.second_fw_b_list, second_fw_feat, t)\n \n return x\n \n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.use_fp16 = True\n self.encoder.convert_to_fp16()\n if self.use_feedback:\n for idx in range(len(self.feedback_b_list)):\n self.feedback_layers[idx].apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.use_fp16 = False \n self.encoder.convert_to_fp32()\n if self.use_feedback:\n for idx in range(len(self.feedback_b_list)):\n self.feedback_layers[idx].apply(convert_module_to_f32)"
},
{
"identifier": "Head",
"path": "ssl_diff/head.py",
"snippet": "class Head(nn.Module):\n def __init__(self, args, feature_dim_dict, feature_size_dict):\n super().__init__()\n self.fcs = nn.ModuleList()\n self.pool = nn.AdaptiveAvgPool2d(args.pre_pool_size)\n feature_dims = feature_dim_dict[args.first_fw_b_list[0]] * args.pre_pool_size * args.pre_pool_size\n if args.head_arc == '':\n self.fcs.append(nn.Linear(feature_dims, args.num_classes))\n else:\n if '_' in args.head_arc:\n hidden_dims = args.head_arc.split('_')\n self.fcs.append(nn.Linear(feature_dims, int(hidden_dims[0])))\n last_hidden = int(hidden_dims[0])\n for hidden_dim in hidden_dims[1:]:\n self.fcs.append(nn.Linear(last_hidden, int(hidden_dim)))\n last_hidden = int(hidden_dim)\n self.fcs.append(nn.Linear(last_hidden, args.num_classes))\n else:\n self.fcs.append(nn.Linear(feature_dims, int(args.head_arc)))\n self.fcs.append(nn.Linear(int(args.head_arc), args.num_classes))\n \n def forward(self, first_fw_b_list, first_fw_feat, second_fw_b_list, second_fw_feat, t_list):\n x = first_fw_feat[0][0]\n x = self.pool(x)\n x = torch.flatten(x, start_dim=1)\n if len(self.fcs) == 1:\n return self.fcs[0](x)\n else:\n for fc in self.fcs[:-1]:\n x = nn.functional.relu(fc(x))\n return self.fcs[-1](x)"
},
{
"identifier": "AttentionFusion",
"path": "ssl_diff/attention_fusion.py",
"snippet": "class AttentionFusion(nn.Module):\n def __init__(self, args, feature_dim_dict, fature_size_dict=None):\n super(AttentionFusion, self).__init__()\n attention_dims = int(args.fusion_arc.split(',')[0].strip().split(':')[2])\n pre_layer = {}\n for b in set(args.first_fw_b_list + args.second_fw_b_list):\n feat_size = min(fature_size_dict[b], args.pre_pool_size)\n norm = nn.BatchNorm2d(feature_dim_dict[b]) if args.norm_type == \"batch\" else nn.LayerNorm([feature_dim_dict[b], feat_size, feat_size])\n pre_layer[str(b)] = nn.Sequential(\n nn.AdaptiveAvgPool2d(feat_size),\n norm,\n nn.Conv2d(feature_dim_dict[b], attention_dims, 1),\n LambdaLayer(lambda x: rearrange(x, 'b c h w -> b (h w) c')),\n )\n self.pre_layer = nn.ModuleDict(pre_layer) \n\n self.intra_inter_block_attention = AttentionHead(args.fusion_arc.split(\"/\")[0])\n self.feature_dims = attention_dims * len(args.t_list)\n self.head = nn.Linear(self.feature_dims, args.num_classes)\n\n def forward(self, first_fw_b_list, first_fw_feat, second_fw_b_list, second_fw_feat, t_list):\n if t_list is None: t_list = [0] # for other than Diffusion Model\n inter_noise_step_feat = []\n for t_idx, t in enumerate(t_list):\n block_feat = []\n for b_idx, b in enumerate(first_fw_b_list):\n x = self.pre_layer[str(b)](first_fw_feat[t_idx][b_idx])\n block_feat.append(x)\n for b_idx, b in enumerate(second_fw_b_list):\n x = self.pre_layer[str(b)](second_fw_feat[t_idx][b_idx])\n block_feat.append(x)\n x = torch.concat(block_feat, dim=1)\n # print(\"DEBUG: intra_inter_block_feat.in.shape\", x.shape)\n x = self.intra_inter_block_attention(x)\n # print(\"DEBUG: intra_inter_block_feat.out.shape\", x.shape)\n inter_noise_step_feat.append(x)\n x = torch.concat(inter_noise_step_feat, dim=1)\n # print(\"DEBUG: inter_noise_feat.shape\", x.shape)\n x = self.head(x)\n return x"
},
{
"identifier": "DM_FEAT_DIM_DICT",
"path": "ssl_diff/const.py",
"snippet": "DM_FEAT_DIM_DICT = {}"
},
{
"identifier": "DM_FEAT_SIZE_DICT",
"path": "ssl_diff/const.py",
"snippet": "DM_FEAT_SIZE_DICT = {}"
},
{
"identifier": "load_data",
"path": "guided_diffusion/image_datasets.py",
"snippet": "def load_data(\n *,\n data_dir,\n batch_size,\n image_size,\n class_cond=False,\n deterministic=False,\n random_crop=False,\n random_flip=True,\n):\n \"\"\"\n For a dataset, create a generator over (images, kwargs) pairs.\n\n Each images is an NCHW float tensor, and the kwargs dict contains zero or\n more keys, each of which map to a batched Tensor of their own.\n The kwargs dict can be used for class labels, in which case the key is \"y\"\n and the values are integer tensors of class labels.\n\n :param data_dir: a dataset directory.\n :param batch_size: the batch size of each returned pair.\n :param image_size: the size to which images are resized.\n :param class_cond: if True, include a \"y\" key in returned dicts for class\n label. If classes are not available and this is true, an\n exception will be raised.\n :param deterministic: if True, yield results in a deterministic order.\n :param random_crop: if True, randomly crop the images for augmentation.\n :param random_flip: if True, randomly flip the images for augmentation.\n \"\"\"\n if not data_dir:\n raise ValueError(\"unspecified data directory\")\n all_files = _list_image_files_recursively(data_dir)\n classes = None\n if class_cond:\n # Assume classes are the first part of the filename,\n # before an underscore.\n # class_names = [bf.basename(path).split(\"_\")[0] for path in all_files]\n # Assume classes are the parent directory name\n class_names = [bf.basename(bf.dirname(path)) for path in all_files] \n sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}\n classes = [sorted_classes[x] for x in class_names]\n dataset = ImageDataset(\n image_size,\n all_files,\n classes=classes,\n shard=get_rank(),\n num_shards=get_world_size(),\n random_crop=random_crop,\n random_flip=random_flip,\n )\n if deterministic:\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True\n )\n else:\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True\n )\n return loader"
},
{
"identifier": "dist_util",
"path": "guided_diffusion/dist_util.py",
"snippet": "GPUS_PER_NODE = 8\nSETUP_RETRY_COUNT = 3\ndef setup_dist():\ndef is_main_process():\ndef get_world_size():\ndef get_rank():\ndef dev():\ndef load_state_dict(path, **kwargs):\ndef sync_params(params):\ndef _find_free_port():"
},
{
"identifier": "load_data",
"path": "guided_diffusion/image_datasets.py",
"snippet": "def load_data(\n *,\n data_dir,\n batch_size,\n image_size,\n class_cond=False,\n deterministic=False,\n random_crop=False,\n random_flip=True,\n):\n \"\"\"\n For a dataset, create a generator over (images, kwargs) pairs.\n\n Each images is an NCHW float tensor, and the kwargs dict contains zero or\n more keys, each of which map to a batched Tensor of their own.\n The kwargs dict can be used for class labels, in which case the key is \"y\"\n and the values are integer tensors of class labels.\n\n :param data_dir: a dataset directory.\n :param batch_size: the batch size of each returned pair.\n :param image_size: the size to which images are resized.\n :param class_cond: if True, include a \"y\" key in returned dicts for class\n label. If classes are not available and this is true, an\n exception will be raised.\n :param deterministic: if True, yield results in a deterministic order.\n :param random_crop: if True, randomly crop the images for augmentation.\n :param random_flip: if True, randomly flip the images for augmentation.\n \"\"\"\n if not data_dir:\n raise ValueError(\"unspecified data directory\")\n all_files = _list_image_files_recursively(data_dir)\n classes = None\n if class_cond:\n # Assume classes are the first part of the filename,\n # before an underscore.\n # class_names = [bf.basename(path).split(\"_\")[0] for path in all_files]\n # Assume classes are the parent directory name\n class_names = [bf.basename(bf.dirname(path)) for path in all_files] \n sorted_classes = {x: i for i, x in enumerate(sorted(set(class_names)))}\n classes = [sorted_classes[x] for x in class_names]\n dataset = ImageDataset(\n image_size,\n all_files,\n classes=classes,\n shard=get_rank(),\n num_shards=get_world_size(),\n random_crop=random_crop,\n random_flip=random_flip,\n )\n if deterministic:\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=True\n )\n else:\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True\n )\n return loader"
},
{
"identifier": "create_named_schedule_sampler",
"path": "guided_diffusion/resample.py",
"snippet": "def create_named_schedule_sampler(name, diffusion):\n \"\"\"\n Create a ScheduleSampler from a library of pre-defined samplers.\n\n :param name: the name of the sampler.\n :param diffusion: the diffusion object to sample for.\n \"\"\"\n if name == \"uniform\":\n return UniformSampler(diffusion)\n elif name == \"loss-second-moment\":\n return LossSecondMomentResampler(diffusion)\n else:\n raise NotImplementedError(f\"unknown schedule sampler: {name}\")"
},
{
"identifier": "model_and_diffusion_defaults",
"path": "guided_diffusion/script_util.py",
"snippet": "def model_and_diffusion_defaults():\n \"\"\"\n Defaults for image training.\n \"\"\"\n res = model_defaults()\n res.update(diffusion_defaults())\n return res"
},
{
"identifier": "create_model_and_diffusion",
"path": "guided_diffusion/script_util.py",
"snippet": "def create_model_and_diffusion(\n image_size,\n class_cond,\n learn_sigma,\n num_channels,\n num_res_blocks,\n channel_mult,\n num_heads,\n num_head_channels,\n num_heads_upsample,\n attention_resolutions,\n dropout,\n diffusion_steps,\n noise_schedule,\n timestep_respacing,\n use_kl,\n predict_xstart,\n rescale_timesteps,\n rescale_learned_sigmas,\n use_checkpoint,\n use_scale_shift_norm,\n resblock_updown,\n use_fp16,\n use_new_attention_order,\n):\n model = create_model(\n image_size,\n num_channels,\n num_res_blocks,\n channel_mult=channel_mult,\n learn_sigma=learn_sigma,\n class_cond=class_cond,\n use_checkpoint=use_checkpoint,\n attention_resolutions=attention_resolutions,\n num_heads=num_heads,\n num_head_channels=num_head_channels,\n num_heads_upsample=num_heads_upsample,\n use_scale_shift_norm=use_scale_shift_norm,\n dropout=dropout,\n resblock_updown=resblock_updown,\n use_fp16=use_fp16,\n use_new_attention_order=use_new_attention_order,\n )\n diffusion = create_gaussian_diffusion(\n steps=diffusion_steps,\n learn_sigma=learn_sigma,\n noise_schedule=noise_schedule,\n use_kl=use_kl,\n predict_xstart=predict_xstart,\n rescale_timesteps=rescale_timesteps,\n rescale_learned_sigmas=rescale_learned_sigmas,\n timestep_respacing=timestep_respacing,\n )\n return model, diffusion"
},
{
"identifier": "args_to_dict",
"path": "guided_diffusion/script_util.py",
"snippet": "def args_to_dict(args, keys):\n return {k: getattr(args, k) for k in keys}"
},
{
"identifier": "add_dict_to_argparser",
"path": "guided_diffusion/script_util.py",
"snippet": "def add_dict_to_argparser(parser, default_dict):\n for k, v in default_dict.items():\n v_type = type(v)\n if v is None:\n v_type = str\n elif isinstance(v, bool):\n v_type = str2bool\n parser.add_argument(f\"--{k}\", default=v, type=v_type)"
}
] | import argparse
import torch
import numpy as np
import sys
import os
import glob
import torch.distributed as dist
import wandb
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm
from ssl_diff import DiffSSLModelFeedbackFusion
from ssl_diff import AttentionFusion, Head
from ssl_diff import DM_FEAT_DIM_DICT,DM_FEAT_SIZE_DICT
from guided_diffusion.image_datasets import load_data
from guided_diffusion import dist_util #, logger
from guided_diffusion.image_datasets import load_data
from guided_diffusion.resample import create_named_schedule_sampler
from guided_diffusion.script_util import (
model_and_diffusion_defaults,
create_model_and_diffusion,
args_to_dict,
add_dict_to_argparser,
) | 7,868 | targets = extra["y"].to(dist_util.dev())
output = model(imgs, args.t_list)
pred = torch.argmax(output, dim=1)
# print("Pred:", pred)
# print("Targets:", targets)
num_correct += (pred == targets).sum()
total += pred.shape[0]
# print(dist_util.get_rank(), total, num_correct)
# print("Acc now:", num_correct/total)
all_num_correct = sync_tensor_across_gpus(torch.tensor(num_correct).to(dist_util.dev()).reshape(1))
all_total = sync_tensor_across_gpus(torch.tensor(total).to(dist_util.dev()).reshape(1))
if dist_util.is_main_process():
num_correct = all_num_correct.sum().item()
total = all_total.sum().item()
if args.use_wandb:
wandb.log({f"Accuracy/{split}": num_correct / total, "epoch": epoch})
print(f'{split} accuracy: {num_correct / total}, Num correct: {num_correct}, Total: {total}')
def create_argparser():
defaults = dict(
# dataset
data_dir="",
val_data_dir="",
num_classes=50,
# training setting
schedule_sampler="uniform",
weight_decay=0.0,
lr_anneal_steps=0,
epochs = 50,
lr=1e-2, # use 1e-2 for freeze, 1e-3 for finetune
batch_size=16,
microbatch=-1, # -1 disables microbatches
ema_rate="0.9999", # comma-separated list of EMA values
use_fp16=False,
fp16_scale_growth=1e-3,
mode='freeze', # "freeze" or "finetune" for backbone
# feedback&fusion
head_type="attention", # "linear" or "attention"
head_arc='', #can be "h1_h2_h3" to use mlp head(when head_type=="linear")
norm_type="", # ["batch", "layer", ""],
pre_pool_size=16, # pooling size before attention or linear head
fusion_arc="", # architecture of attention head
feedback_arch='C_B_R', # architecture for feedback network(Conv-BatchNorm-ReLU)
checkpoint_path='', # encoder path
# add distributed training args
dist_url='env://',
dist_backend='nccl',
world_size=1,
# log
output_dir='./output',
resume_checkpoint="",
log_interval=10,
save_interval=10000,
eval_interval=5,
only_eval=False,
wandb_run_name=None,
use_wandb=True,
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
parser.add_argument("--local-rank", type=int, default=0,
help="For distributed training.")
parser.add_argument("--t_list", nargs='+', required=True, type=int, help="list of noise step t to use")
parser.add_argument("--first_fw_b_list", nargs='+', required=True, type=int, help="list of feature block to use from first forward(-1 if not used)")
parser.add_argument("--second_fw_b_list", nargs='+', required=True, type=int, help="list of feature block to use from second forward(=feedback)(-1 if not used)")
parser.add_argument("--feedback_b_list", nargs='+', required=True, type=int, help="list of feature block for feedback(-1 if not used)")
add_dict_to_argparser(parser, defaults)
return parser
def main():
print('Reading args')
args = create_argparser().parse_args()
args.device = 'cuda'
if args.first_fw_b_list[0] == -1:
args.first_fw_b_list = []
if args.second_fw_b_list[0] == -1:
args.second_fw_b_list = []
use_feedback = len(args.second_fw_b_list) > 0
if args.feedback_b_list[0] == -1:
args.feedback_b_list = []
assert use_feedback==False, "blocks for feedback are not specified"
if args.head_type == "linear":
assert len(args.first_fw_b_list) == 1 and len(args.t_list) == 1 and len(args.second_fw_b_list) == 0, "linear head cannot be used for feedback/fusion"
print('Setting up dist')
# Setup CUDA, GPU & distributed training
args.num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
args.distributed = args.num_gpus > 1
# args.distributed = True
args.device = torch.device(args.device)
if args.distributed:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.local_rank = int(os.environ['LOCAL_RANK'])
print("")
print("Init distributed training on local rank {} ({}), world size {}".format(args.local_rank, int(os.environ["LOCAL_RANK"]), args.num_gpus))
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
local_rank = int(os.environ["LOCAL_RANK"])
args.device = torch.device("cuda", local_rank)
torch.distributed.barrier()
if dist_util.is_main_process():
print(f'args: {args}')
exist_ok = len(glob.glob(os.path.join(args.output_dir, "*"))) == 0 or (args.output_dir == os.path.dirname(args.resume_checkpoint)) or (os.path.basename(args.output_dir) == "debug")
os.makedirs(args.output_dir, exist_ok=exist_ok)
print('Creating model')
if args.head_type =="linear":
|
def train(model, lr, e, bs, train_dataloader, test_dataloader, args, checkpoint_dict=None):
# optimizer = torch.optim.SGD(model.parameters(), lr=lr)
# apply optimizer only on the head and feedback layers
use_feedback = len(args.second_fw_b_list) > 0
if args.distributed:
optimized_params_lst = [{'params': model.module.head.parameters()}]
if use_feedback:
optimized_params_lst.append({'params': model.module.feedback_layers.parameters()})
if args.mode == 'update':
optimized_params_lst.append({'params': model.module.update_blocks.parameters()})
if args.mode == 'add_fpn' or args.mode == 'mult_fpn':
optimized_params_lst.append({'params': model.module.fpn_blocks.parameters()})
if args.mode == "finetune":
optimized_params_lst.append({'params': model.module.encoder.parameters()})
else:
optimized_params_lst = [{'params': model.head.parameters()}]
if use_feedback:
optimized_params_lst.append({'params': model.feedback_layers.parameters()})
if args.mode == 'update':
optimized_params_lst.append({'params': model.update_blocks.parameters()})
if args.mode == 'add_fpn' or args.mode == 'mult_fpn':
optimized_params_lst.append({'params': model.fpn_blocks.parameters()})
if args.mode == "finetune":
optimized_params_lst.append({'params': model.encoder.parameters()})
optimizer = torch.optim.SGD(optimized_params_lst, lr=lr)
loss_fn = torch.nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 7, 0.1)
if checkpoint_dict is not None:
print(f"Loading model, optimizer, and scheduler from checkpoint from!")
model.module.head.load_state_dict(checkpoint_dict['model_head'])
if use_feedback:
print("Loading feedback layers")
model.module.feedback_layers.load_state_dict(checkpoint_dict['model_feedback'])
if args.mode == 'update':
print("Loading update blocks")
model.module.update_blocks.load_state_dict(checkpoint_dict['model_update'])
elif args.mode == 'add_fpn' or args.mode == 'mult_fpn':
print("Loading fpn blocks")
model.module.fpn_blocks.load_state_dict(checkpoint_dict['model_fpn'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
scheduler.load_state_dict(checkpoint_dict['scheduler'])
start_epoch = checkpoint_dict['epoch']
else:
start_epoch = 0
losses = []
model.train()
batch_num = 0
for i in range(start_epoch, e):
for batch in (tqdm(train_dataloader, total=len(train_dataloader))):
# # measure execution time in pytorch
# start = torch.cuda.Event(enable_timing=True)
# end = torch.cuda.Event(enable_timing=True)
# start.record()
imgs, extra = batch #next(train_dataloader)
imgs = imgs.to(dist_util.dev())
targets = extra["y"].to(dist_util.dev())
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Inputs: ", start.elapsed_time(end))
# start.record()
output = model(imgs, args.t_list)
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Forward: ", start.elapsed_time(end))
# start.record()
#calculate loss
loss = loss_fn(output, targets)
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Loss: ", start.elapsed_time(end))
#backprop
# start.record()
optimizer.zero_grad()
loss.backward()
# store 'module.encoder.time_embed.0.bias' weight
# import pdb;x pdb.set_trace()
# print(old - model.module.encoder.time_embed[0].bias.clone().detach())
# old = model.module.encoder.time_embed[0].bias.clone().detach()
optimizer.step()
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Backward: ", start.elapsed_time(end))
# start.record()
if len(losses) == 100:
losses = losses[1:]
losses.append(loss.item())
if dist_util.is_main_process():
if (batch_num + 1) % 100 == 0:
print(f'Epoch: {i+1}/{e}, Batch Num: {batch_num+1}: Loss: {np.mean(losses):0.6f}', flush=True)
if args.use_wandb:
wandb.log({"Loss/train": np.mean(losses), "epoch": (batch_num+1) / len(train_dataloader)})
batch_num += 1
# end.record()
# # Waits for everything to finish running
# torch.cuda.synchronize()
# print("Logging: ", start.elapsed_time(end))
scheduler.step()
if (i + 1) % args.eval_interval == 0:
test(model, test_dataloader, args, 'Val (Test)', i+1)
# Save checkpoint every epoch
if dist_util.is_main_process():
save_file = os.path.join(args.output_dir, f'epoch_latest.pth')
print(f"Saving checkpoint @ Epoch: {i+1} to {save_file}")
save_dict ={
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'epoch': i+1
}
save_dict['model_head'] = model.module.head.state_dict()
if use_feedback:
save_dict['model_feedback'] = model.module.feedback_layers.state_dict()
if args.mode == 'update':
save_dict['model_update'] = model.module.update_blocks.state_dict()
elif args.mode == 'add_fpn' or args.mode == 'mult_fpn':
save_dict['model_fpn'] = model.module.fpn_blocks.state_dict()
# torch.save(save_dict, save_file)
torch.save(save_dict, os.path.join(args.output_dir, f'latest.pth'))
# https://discuss.pytorch.org/t/ddp-evaluation-gather-output-loss-and-stuff-how-to/130593/2
def sync_tensor_across_gpus(t):
# t needs to have dim 0 for torch.cat below.
# if not, you need to prepare it.
if t is None:
return None
group = dist.group.WORLD
group_size = torch.distributed.get_world_size(group)
gather_t_tensor = [torch.zeros_like(t) for _ in
range(group_size)]
dist.all_gather(gather_t_tensor, t) # this works with nccl backend when tensors need to be on gpu.
# for gloo and mpi backends, tensors need to be on cpu. also this works single machine with
# multiple gpus. for multiple nodes, you should use dist.all_gather_multigpu. both have the
# same definition... see [here](https://pytorch.org/docs/stable/distributed.html).
# somewhere in the same page, it was mentioned that dist.all_gather_multigpu is more for
# multi-nodes. still dont see the benefit of all_gather_multigpu. the provided working case in
# the doc is vague...
return torch.cat(gather_t_tensor, dim=0)
def test(model, dataloader, args, split='Test', epoch=0):
model.eval()
num_correct = 0
total = 0
num_val_batches = len(dataloader)
with torch.no_grad():
for batch in tqdm(dataloader, total=num_val_batches):
imgs, extra = batch
imgs = imgs.to(dist_util.dev())
targets = extra["y"].to(dist_util.dev())
output = model(imgs, args.t_list)
pred = torch.argmax(output, dim=1)
# print("Pred:", pred)
# print("Targets:", targets)
num_correct += (pred == targets).sum()
total += pred.shape[0]
# print(dist_util.get_rank(), total, num_correct)
# print("Acc now:", num_correct/total)
all_num_correct = sync_tensor_across_gpus(torch.tensor(num_correct).to(dist_util.dev()).reshape(1))
all_total = sync_tensor_across_gpus(torch.tensor(total).to(dist_util.dev()).reshape(1))
if dist_util.is_main_process():
num_correct = all_num_correct.sum().item()
total = all_total.sum().item()
if args.use_wandb:
wandb.log({f"Accuracy/{split}": num_correct / total, "epoch": epoch})
print(f'{split} accuracy: {num_correct / total}, Num correct: {num_correct}, Total: {total}')
def create_argparser():
defaults = dict(
# dataset
data_dir="",
val_data_dir="",
num_classes=50,
# training setting
schedule_sampler="uniform",
weight_decay=0.0,
lr_anneal_steps=0,
epochs = 50,
lr=1e-2, # use 1e-2 for freeze, 1e-3 for finetune
batch_size=16,
microbatch=-1, # -1 disables microbatches
ema_rate="0.9999", # comma-separated list of EMA values
use_fp16=False,
fp16_scale_growth=1e-3,
mode='freeze', # "freeze" or "finetune" for backbone
# feedback&fusion
head_type="attention", # "linear" or "attention"
head_arc='', #can be "h1_h2_h3" to use mlp head(when head_type=="linear")
norm_type="", # ["batch", "layer", ""],
pre_pool_size=16, # pooling size before attention or linear head
fusion_arc="", # architecture of attention head
feedback_arch='C_B_R', # architecture for feedback network(Conv-BatchNorm-ReLU)
checkpoint_path='', # encoder path
# add distributed training args
dist_url='env://',
dist_backend='nccl',
world_size=1,
# log
output_dir='./output',
resume_checkpoint="",
log_interval=10,
save_interval=10000,
eval_interval=5,
only_eval=False,
wandb_run_name=None,
use_wandb=True,
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
parser.add_argument("--local-rank", type=int, default=0,
help="For distributed training.")
parser.add_argument("--t_list", nargs='+', required=True, type=int, help="list of noise step t to use")
parser.add_argument("--first_fw_b_list", nargs='+', required=True, type=int, help="list of feature block to use from first forward(-1 if not used)")
parser.add_argument("--second_fw_b_list", nargs='+', required=True, type=int, help="list of feature block to use from second forward(=feedback)(-1 if not used)")
parser.add_argument("--feedback_b_list", nargs='+', required=True, type=int, help="list of feature block for feedback(-1 if not used)")
add_dict_to_argparser(parser, defaults)
return parser
def main():
print('Reading args')
args = create_argparser().parse_args()
args.device = 'cuda'
if args.first_fw_b_list[0] == -1:
args.first_fw_b_list = []
if args.second_fw_b_list[0] == -1:
args.second_fw_b_list = []
use_feedback = len(args.second_fw_b_list) > 0
if args.feedback_b_list[0] == -1:
args.feedback_b_list = []
assert use_feedback==False, "blocks for feedback are not specified"
if args.head_type == "linear":
assert len(args.first_fw_b_list) == 1 and len(args.t_list) == 1 and len(args.second_fw_b_list) == 0, "linear head cannot be used for feedback/fusion"
print('Setting up dist')
# Setup CUDA, GPU & distributed training
args.num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
args.distributed = args.num_gpus > 1
# args.distributed = True
args.device = torch.device(args.device)
if args.distributed:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.local_rank = int(os.environ['LOCAL_RANK'])
print("")
print("Init distributed training on local rank {} ({}), world size {}".format(args.local_rank, int(os.environ["LOCAL_RANK"]), args.num_gpus))
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
local_rank = int(os.environ["LOCAL_RANK"])
args.device = torch.device("cuda", local_rank)
torch.distributed.barrier()
if dist_util.is_main_process():
print(f'args: {args}')
exist_ok = len(glob.glob(os.path.join(args.output_dir, "*"))) == 0 or (args.output_dir == os.path.dirname(args.resume_checkpoint)) or (os.path.basename(args.output_dir) == "debug")
os.makedirs(args.output_dir, exist_ok=exist_ok)
print('Creating model')
if args.head_type =="linear": | head = Head(args, DM_FEAT_DIM_DICT,DM_FEAT_SIZE_DICT) | 1 | 2023-11-29 17:46:18+00:00 | 12k |
Uli-Z/autoPDFtagger | autoPDFtagger/AIAgents_OpenAI_pdf.py | [
{
"identifier": "AIAgent_OpenAI",
"path": "autoPDFtagger/AIAgents.py",
"snippet": "class AIAgent_OpenAI(AIAgent):\n def __init__(self, \n model=\"gpt-3.5-turbo-1106\", \n system_message=\"You are a helpful assistant\"):\n super().__init__()\n \n self.api_key = api_key\n self.client = OpenAI(api_key=self.api_key)\n self.set_model(model)\n self.messages = []\n self.add_message(system_message, role=\"system\")\n\n # Cost-Control\n self.max_tokens=4096\n self.cost = 0\n\n\n def add_message(self, content, role=\"user\"):\n self.messages.append({\"role\": role, \"content\": content})\n\n @tenacity.retry(\n wait=tenacity.wait_random_exponential(min=5, max=60), \n stop=tenacity.stop_after_attempt(6))\n def send_request(self,\n temperature=0.7,\n response_format=\"text\" # Alt: \"object-json\"\n ):\n logging.debug(\"Trying to send API-Request\")\n # Temporäres Ändern des Logging-Levels\n original_level = logging.getLogger().getEffectiveLevel()\n logging.getLogger().setLevel(logging.ERROR) # Ändere das Logging-Level auf ERROR\n\n try:\n encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')\n\n\n if response_format:\n response = self.client.chat.completions.create(\n model=self.model,\n messages=self.messages,\n response_format={\"type\": response_format},\n temperature=temperature,\n max_tokens=self.max_tokens\n ) \n else: \n response = self.client.chat.completions.create(\n model=self.model,\n messages=self.messages,\n temperature=temperature,\n max_tokens=self.max_tokens\n ) \n\n # Logging Data in seperate file if log_file is set\n self.write_to_log_file(\n \"API-REQUEST:\\n\" \n + pprint.pformat(self.messages) \n + \"\\n\\nAPI-ANSWER:\\n\" \n + pprint.pformat(response))\n\n self.cost += self.get_costs(response.usage.prompt_tokens, response.usage.completion_tokens)\n\n \n logging.getLogger().setLevel(original_level)\n\n return self.clean_json(response.choices[0].message.content)\n \n except Exception as e: \n logging.error(e)\n # Restore original logging level\n logging.getLogger().setLevel(original_level)\n raise e\n\n def get_costs(self, token_input, token_output):\n if self.model in OpenAI_model_pricelist:\n cost_per_token_input, cost_per_token_output, limit = OpenAI_model_pricelist[self.model]\n total_cost_input = token_input * cost_per_token_input / 1000\n total_cost_output = token_output * cost_per_token_output / 1000\n return total_cost_input + total_cost_output\n else:\n raise ValueError(\"Model '\" + self.model + \"' not found in the price list.\")\n\n def set_model(self, model):\n if model in OpenAI_model_pricelist:\n self.model = model\n else:\n raise ValueError(\"Model '\" + model + \"' not available.\")"
},
{
"identifier": "OpenAI_model_pricelist",
"path": "autoPDFtagger/AIAgents.py",
"snippet": "LANGUAGE = config['DEFAULT']['language']\nclass AIAgent:\nclass AIAgent_OpenAI(AIAgent):\n def __init__(self): \n def send_request(self, user_message):\n def clean_json(self, json_text):\n def write_to_log_file(self, text):\n def __init__(self, \n model=\"gpt-3.5-turbo-1106\", \n system_message=\"You are a helpful assistant\"):\n def add_message(self, content, role=\"user\"):\n def send_request(self,\n temperature=0.7,\n response_format=\"text\" # Alt: \"object-json\"\n ):\n def get_costs(self, token_input, token_output):\n def set_model(self, model):"
},
{
"identifier": "config",
"path": "autoPDFtagger/config.py",
"snippet": ""
},
{
"identifier": "PDFDocument",
"path": "autoPDFtagger/PDFDocument.py",
"snippet": "class PDFDocument:\n \"\"\"\n Class for handling operations on PDF documents.\n Includes reading, analyzing, and extracting information from PDF files.\n \"\"\"\n def __init__(self, path, base_directory):\n\n # Validate and initialize file paths\n if not os.path.exists(path):\n raise ValueError(f\"File {path} does not exist\")\n if not os.path.exists(base_directory):\n raise ValueError(f\"Basedirectory {base_directory} does not exist\")\n self.file_name = os.path.basename(path)\n self.folder_path_abs = os.path.dirname(os.path.abspath(path))\n self.base_directory_abs = os.path.abspath(base_directory)\n self.relative_path = os.path.relpath(self.folder_path_abs, self.base_directory_abs)\n\n # Initialize parameters for analysis\n self.summary = \"\"\n self.summary_confidence = 0\n self.title = \"\"\n self.title_confidence = 0\n self.creation_date = \"\"\n self.creation_date_confidence = 0\n self.creator = \"\"\n self.creator_confidence = 0\n self.tags = []\n self.tags_confidence = []\n self.importance = None\n self.importance_confidence = 0\n\n # Analyze document\n self.modification_date = self.get_modification_date()\n self.pages = []\n self.images = []\n self.images_already_analyzed = False\n self.image_coverage = None\n self.pdf_text = \"\"\n\n def get_absolute_path(self):\n return os.path.join(self.folder_path_abs, self.file_name)\n\n # Get text stored inside the document\n def get_pdf_text(self):\n if not self.pdf_text:\n self.pdf_text = self.read_ocr()\n return self.pdf_text\n \n\n def analyze_file(self):\n \"\"\"\n Performs an analysis of the document. \n It extracts the date, title, and tags from the document's filename and relative path.\n \"\"\"\n # Extract the creation date from the file name\n self.extract_date_from_filename()\n\n # Extract the title from the file name\n self.extract_title_from_filename()\n\n # Extract tags from the relative path\n self.extract_tags_from_relative_path()\n\n # Extract useful information from Metadata\n self.extract_metadata()\n \n def save_to_file(self, new_file_path):\n \"\"\"\n Saves the current state of the PDF document to a new file.\n This includes updating the metadata based on the current attributes of the object.\n \"\"\"\n # Ensure the directory for the new file exists\n os.makedirs(os.path.dirname(new_file_path), exist_ok=True)\n\n # Open the existing PDF document\n pdf_document = fitz.open(self.get_absolute_path())\n\n # Update the metadata of the PDF document\n metadata = pdf_document.metadata\n metadata['title'] = self.title\n metadata['summary'] = self.summary\n metadata['author'] = self.creator\n metadata['keywords'] = ', '.join(self.tags)\n\n # Storing additional information about confidences in keyword-list\n tags_confidence_str = ','.join([str(conf) for conf in self.tags_confidence])\n metadata['keywords'] = f\"{metadata['keywords']} - Metadata automatically updated by autoPDFtagger, title_confidence={self.title_confidence}, summary_confidence={self.summary_confidence}, creation_date_confidence={self.creation_date_confidence}, creator_confidence={self.creator_confidence}, tag_confidence={tags_confidence_str}\"\n\n if self.creation_date:\n # Konvertiere das Datum in das PDF-Format\n # Annahme: Die Zeitzone ist UTC\n utc_creation_date = self.creation_date.astimezone(pytz.utc)\n metadata['creationDate'] = utc_creation_date.strftime(\"D:%Y%m%d%H%M%S+00'00'\")\n\n pdf_document.set_metadata(metadata)\n \n # Save the updated document to the new file path\n pdf_document.save(new_file_path)\n pdf_document.close()\n logging.info(f\"PDF saved: {new_file_path}\")\n\n\n def to_dict(self):\n \"\"\"\n Converts the PDF document's data into a dictionary format.\n This includes paths, text content, metadata, and analyzed information.\n \"\"\"\n pdf_dict = {\n \"folder_path_abs\": os.path.dirname(self.get_absolute_path()),\n \"relative_path\": self.relative_path,\n \"base_directory_abs\": self.base_directory_abs,\n \"file_name\": self.file_name,\n \"summary\": self.summary,\n \"summary_confidence\": self.summary_confidence,\n \"title\": self.title,\n \"title_confidence\": self.title_confidence,\n \"creation_date\": self.get_creation_date_as_str(),\n \"creation_date_confidence\": self.creation_date_confidence,\n \"creator\": self.creator,\n \"creator_confidence\": self.creator_confidence,\n \"tags\": self.tags,\n \"tags_confidence\": self.tags_confidence,\n \"importance\": self.importance,\n \"importance_confidence\": self.importance_confidence\n }\n return pdf_dict\n\n\n def to_api_json(self):\n \"\"\"\n Converts selected attributes of the PDF document into a JSON string.\n This JSON representation can be used for API interactions.\n \"\"\"\n return json.dumps({\n \"summary\": self.summary,\n \"summary_confidence\": self.summary_confidence,\n \"title\": self.title,\n \"title_confidence\": self.title_confidence,\n \"creation_date\": self.get_creation_date_as_str(),\n \"creation_date_confidence\": self.creation_date_confidence,\n \"creator\": self.creator,\n \"creator_confidence\": self.creator_confidence,\n \"tags\": self.tags,\n \"tags_confidence\": self.tags_confidence,\n \"importance\": self.importance,\n \"importance_confidence\": self.importance_confidence\n })\n\n def read_ocr(self):\n \"\"\"\n Reads and extracts text from all pages of the PDF document.\n Cleans the text by removing non-readable characters and replacing line breaks.\n \"\"\"\n try:\n pdf_document = fitz.open(self.get_absolute_path())\n\n # Initialize text extraction\n pdf_text = \"\"\n for page_num in range(len(pdf_document)):\n page = pdf_document[page_num]\n page_text = page.get_text(\"text\")\n pdf_text += page_text\n\n # Clean text by removing unwanted characters and line breaks\n pdf_text = pdf_text.replace('\\n', ' ').replace('\\r', ' ')\n pdf_text = re.sub(r'[^a-zA-Z0-9 .:äöüÄÖÜß/]+', '', pdf_text)\n\n pdf_document.close()\n #logging.debug(f\"Extracted text from {self.file_name}:\\n{self.pdf_text}\\n----------------\\n\")\n return pdf_text\n\n except Exception as e:\n logging.error(f\"Failed to extract text from {self.file_name}:\\nError Message: {e}\")\n return None\n\n\n def create_thumbnail(self, thumbnail_filename, max_width=64):\n \"\"\"\n Creates a thumbnail image of the first page of the PDF document.\n The thumbnail is saved as a PNG image.\n \"\"\"\n try:\n pdf_path = self.get_absolute_path()\n pdf_document = fitz.open(pdf_path)\n \n # Select the first page for the thumbnail\n page = pdf_document[0]\n\n # Create a pixmap object from the page and save as a PNG image\n pix = page.get_pixmap(dpi=50)\n pix.save(thumbnail_filename)\n pdf_document.close()\n\n logging.info(f\"Thumbnail created: {thumbnail_filename}\")\n return\n except Exception as e:\n logging.error(f\"Error creating thumbnail: {e}\")\n return None\n\n \n def get_png_image_base64_by_xref(self, xref):\n \"\"\"\n Extracts a PNG image from the PDF using its xref (cross-reference) and encodes it in base64.\n This method is useful for extracting and transmitting images in a format suitable for web use.\n \"\"\"\n logging.debug(f\"Extracting Image {xref} from Document {self.file_name}\")\n try:\n pdf_path = self.get_absolute_path()\n pdf_fitz = fitz.open(pdf_path)\n\n # Create a pixmap (image) object from the PDF based on the provided xref\n pix = fitz.Pixmap(pdf_fitz, xref)\n\n # Convert the pixmap object to PNG bytes and then encode to base64\n img_bytes = pix.tobytes(\"png\")\n encoded_image = base64.b64encode(img_bytes).decode()\n\n pdf_fitz.close()\n logging.debug(\"Returning \" + str(len(encoded_image)) + \" character base_64\")\n return encoded_image\n\n except Exception as e:\n logging.error(f\"Error extracting PNG image by xref: {e}\")\n return None\n\n\n def get_modification_date(self):\n try:\n modification_date = os.path.getmtime(self.get_absolute_path())\n modification_date = datetime.fromtimestamp(modification_date)\n return modification_date\n except Exception as e:\n return None\n\n def extract_date_from_filename(self):\n \"\"\"\n Extracts the creation date from the file name using predefined regular expressions.\n Sets the creation date of the document if a matching date format is found.\n \"\"\"\n for regex, date_format in date_formats.items():\n date_match = re.search(regex, self.file_name)\n if date_match:\n date_string = date_match.group()\n try:\n # Parse the date string according to the matched format\n date_object = datetime.strptime(date_string, date_format)\n # Set the creation date with a high confidence level\n self.set_creation_date(date_object.strftime(\"%Y-%m-%d\"), 8)\n return\n except ValueError:\n # Continue searching if the current format does not match\n continue\n\n # Return None if no date format matches\n return None\n\n\n def extract_title_from_filename(self):\n \"\"\"\n Extracts the title from the file name by removing date information and unwanted characters.\n Sets the title of the document with a moderate confidence level.\n \"\"\"\n # Remove date from the file name\n file_name = self.file_name\n for regex in date_formats:\n file_name = re.sub(regex, '', file_name).strip()\n\n # Remove additional characters and use the rest as the title\n file_name = re.sub(r'[^\\w\\s.-]', '', file_name)\n file_name = re.sub(r'^-|\\.pdf$', '', file_name)\n\n # Set the extracted file name as the title\n self.set_title(file_name, 2)\n\n\n def extract_tags_from_relative_path(self):\n \"\"\"\n Extracts tags from the relative path of the PDF file. \n Tags are derived from the directory names in the path.\n Sets the extracted tags with a moderate confidence level.\n \"\"\"\n # Split the relative path by the directory separator and clean up tags\n tags = self.relative_path.split(os.path.sep)\n tags = [tag.strip() for tag in tags if tag.strip()] # Remove empty entries\n tags = [tag.strip() for tag in tags if tag != \".\"]\n # Set extracted tags if any are found\n if tags:\n self.tags = tags\n self.tags_confidence = [6] * len(tags) # Moderate confidence for each tag\n\n def extract_metadata(self):\n \"\"\"\n Extracts metadata such as title, summary, and keywords from the PDF document.\n Updates the class attributes based on the extracted metadata.\n \"\"\"\n try:\n pdf_document = fitz.open(self.get_absolute_path())\n metadata = pdf_document.metadata\n\n # Default confidence value\n default_confidence = 5\n\n def extract_confidence(pattern, text, default_confidence):\n \"\"\"Extracts a confidence value using regex or returns the default value.\"\"\"\n match = re.search(pattern, text)\n return float(match.group(1)) if match else default_confidence\n\n\n # Extract confidence values\n keywords = metadata.get('keywords', '')\n title_conf = extract_confidence(r\"title_confidence=(\\d+\\.?\\d*)\", keywords, default_confidence)\n summary_conf = extract_confidence(r\"summary_confidence=(\\d+\\.?\\d*)\", keywords, default_confidence)\n creation_date_conf = extract_confidence(r\"creation_date_confidence=(\\d+\\.?\\d*)\", keywords, default_confidence)\n creator_conf = extract_confidence(r\"creator_confidence=(\\d+\\.?\\d*)\", keywords, default_confidence)\n tags_conf_str = extract_confidence(r\"tag_confidence=([\\d,.]+)\", keywords, '')\n\n # Set metadata values if not empty\n if metadata.get('title'):\n self.set_title(metadata['title'], title_conf)\n if metadata.get('summary'):\n self.set_summary(metadata['summary'], summary_conf)\n if metadata.get('creationDate'):\n self.set_creation_date(metadata['creationDate'], creation_date_conf)\n if metadata.get('author'):\n self.set_creator(metadata['author'], creator_conf)\n\n # Process tag confidence values\n tag_confidences = [float(conf) for conf in tags_conf_str.split(',')] if tags_conf_str else [default_confidence]\n keywords = metadata.get('keywords', '').split(', ')[:len(tag_confidences)]\n self.set_tags(keywords, tag_confidences)\n\n pdf_document.close()\n\n except Exception as e:\n logging.error(f\"Error extracting metadata from {self.file_name}: {e}\")\n traceback.print_exc()\n\n\n def analyze_document_images(self):\n \"\"\"\n Analyzes images in the PDF document. It calculates the total image area and page area\n and counts the number of words on each page.\n \"\"\"\n if self.images_already_analyzed:\n return\n \n pdf_path = self.get_absolute_path()\n pdf_document = fitz.open(pdf_path)\n\n self.images = []\n self.pages = []\n self.total_image_area = 0\n self.total_page_area = 0\n\n word_regex = re.compile(r'[a-zA-ZäöüÄÖÜß]{3,}')\n\n for page_num, page in enumerate(pdf_document):\n page_images, page_image_area, max_img_xref = self.analyze_page_images(page)\n page_data = self.analyze_page_data(page, page_num, max_img_xref)\n\n # Append image and page data to respective lists\n self.images.append(page_images)\n self.pages.append(page_data)\n\n # Accumulate total image and page areas\n self.total_image_area += page_image_area\n self.total_page_area += page_data['page_area']\n\n # Extract and count words on the page\n page_text = page.get_text(\"text\")\n page_data['words_count'] = len(word_regex.findall(page_text))\n\n pdf_document.close()\n\n # Calculate the percentage of the document covered by images\n self.image_coverage = (self.total_image_area / self.total_page_area) * 100 if self.total_page_area > 0 else 0 \n self.images_already_analyzed = True\n\n\n def analyze_page_data(self, page, page_num, max_img_xref):\n \"\"\"\n Analyzes basic data of a page such as dimensions and area.\n \"\"\"\n page_area = page.rect.width * page.rect.height\n return {\n \"page_number\": page_num + 1,\n \"width\": page.rect.width,\n \"height\": page.rect.height,\n \"page_area\": page_area,\n \"max_img_xref\": max_img_xref\n }\n\n def analyze_page_images(self, page):\n \"\"\"\n Analyzes images on a page, extracting details and calculating the total image area.\n \"\"\"\n page_images = []\n page_image_area = 0\n images = page.get_images(full=True)\n max_img_area = 0\n max_img_xref = None\n\n for img in images:\n image_data = self.extract_image_data(page, img)\n page_images.append(image_data)\n page_image_area += image_data['area']\n\n # Überprüfe, ob dieses Bild die größte Flächenabdeckung hat\n if image_data['area'] > max_img_area:\n max_img_area = image_data['area']\n max_img_xref = image_data['xref']\n\n return page_images, page_image_area, max_img_xref\n\n def extract_image_data(self, page, img):\n \"\"\"\n Extracts data of a single image, including dimensions, area, and coverage percentage.\n \"\"\"\n xref, img_area, rect = img[0], 0, None\n img_rects = page.get_image_rects(xref)\n if img_rects:\n rect = img_rects[0]\n img_area = rect.width * rect.height\n\n pix = fitz.Pixmap(page.parent, xref)\n return {\n \"xref\": xref,\n \"width\": rect.width if rect else 0,\n \"height\": rect.height if rect else 0,\n \"original_width\": pix.width,\n \"original_height\": pix.height,\n \"area\": img_area,\n \"page_coverage_percent\": (img_area / page.rect.width * page.rect.height) * 100 if rect else 0\n }\n \n\n\n def set_title(self, title, confidence):\n \"\"\"\n Sets the title of the document with a given confidence level.\n The title is updated only if the new confidence level is equal to or higher than the current level.\n \"\"\"\n if confidence >= self.title_confidence:\n self.title = title\n self.title_confidence = confidence\n else: logging.info(\"Title not set due to lower confidence-level\")\n\n def set_creation_date(self, creation_date, confidence):\n \"\"\"\n Sets the creation date of the document with a given confidence level.\n The date is validated against predefined formats and updated only if the new confidence is high enough.\n \"\"\"\n if not creation_date:\n self.creation_date = None\n return\n \n date_obj = None\n for regex, date_format in date_formats.items():\n if re.match(regex, creation_date):\n try:\n date_obj = datetime.strptime(creation_date, date_format)\n break\n except ValueError:\n continue # Try the next format if the current one does not match\n\n if date_obj:\n if confidence >= self.creation_date_confidence:\n self.creation_date = date_obj\n self.creation_date_confidence = confidence\n else: logging.info(\"Creation date not set due to lower confidence-level\")\n \n\n \n def set_summary(self, summary, confidence):\n \"\"\"\n Sets the summary of the document with a given confidence level.\n The summary is updated only if the new confidence level is equal to or higher than the current level.\n \"\"\"\n if confidence >= self.summary_confidence:\n self.summary = summary\n self.summary_confidence = confidence\n else: logging.info(\"Summary not set due to lower confidence-level\")\n\n def set_creator(self, creator, confidence):\n \"\"\"\n Sets the creator of the document with a given confidence level.\n The creator is updated only if the new confidence level is equal to or higher than the current level.\n \"\"\"\n if confidence >= self.creator_confidence:\n self.creator = creator\n self.creator_confidence = confidence\n \n else: logging.info(\"Creator not set due to lower confidence-level\")\n\n def set_importance(self, importance, confidence):\n \"\"\"\n Sets the importance of the document with a given confidence level.\n The importance is updated only if the new confidence level is equal to or higher than the current level.\n \"\"\"\n if confidence >= self.importance_confidence:\n self.importance = importance\n self.importance_confidence = confidence\n else: logging.info(\"Importance not set due to lower confidence-level\")\n\n def set_tags(self, tag_list, confidence_list):\n \"\"\"\n Sets tags for the document with corresponding confidence levels.\n Validates the length of the tag and confidence lists and updates the tags only if the \n confidence level is high enough.\n \"\"\"\n if len(tag_list) != len(confidence_list):\n raise ValueError(\"Length of tag_list and confidence_list must be equal.\")\n\n for tag, confidence in zip(tag_list, confidence_list):\n self.tags.append(tag)\n self.tags_confidence.append(confidence)\n\n def set_from_json(self, input_json):\n \"\"\"\n Updates the PDFDocument object's attributes from a JSON string.\n The JSON string should represent a dictionary of attribute values.\n \"\"\"\n try:\n # Convert the JSON string into a Python dictionary\n input_dict = json.loads(input_json)\n\n # Update values in the PDFDocument object using the dictionary\n self.set_from_dict(input_dict)\n return True\n except Exception as e:\n logging.error(f\"Error while processing the JSON string: {e}\")\n traceback.print_exc()\n return None\n\n\n def set_from_dict(self, input_dict):\n \"\"\"\n Updates the attributes of the PDFDocument object from a given dictionary.\n The dictionary should contain key-value pairs corresponding to the attributes of the PDFDocument.\n \"\"\"\n\n # Update the title if provided in the input dictionary\n if 'title' in input_dict and 'title_confidence' in input_dict:\n self.set_title(input_dict['title'], input_dict['title_confidence'])\n\n # Update the summary if provided in the input dictionary\n if 'summary' in input_dict and 'summary_confidence' in input_dict:\n self.set_summary(input_dict['summary'], input_dict['summary_confidence'])\n\n # Update the creation date if provided in the input dictionary\n if 'creation_date' in input_dict and 'creation_date_confidence' in input_dict:\n self.set_creation_date(input_dict['creation_date'], input_dict['creation_date_confidence'])\n\n # Update the creator if provided in the input dictionary\n if 'creator' in input_dict and 'creator_confidence' in input_dict:\n self.set_creator(input_dict['creator'], input_dict['creator_confidence'])\n\n\n # Update the importance if provided in the input dictionary\n if 'importance' in input_dict and 'importance_confidence' in input_dict:\n self.set_importance(input_dict['importance'], input_dict['importance_confidence'])\n\n # Update the tags if provided in the input dictionary\n if 'tags' in input_dict and 'tags_confidence' in input_dict:\n self.set_tags(input_dict['tags'], input_dict['tags_confidence'])\n\n\n\n def get_confidence_if_tag_exists(self, tag):\n \"\"\"\n Returns the confidence level of a given tag if it exists in the tags list.\n Returns False if the tag is not present.\n \"\"\"\n if tag in self.tags:\n index = self.tags.index(tag)\n return self.tags_confidence[index]\n return False\n\n # Calculate a single number to represent the overall confidence\n # of the documents metadata to be uses to sort and filter documents. \n # In the future we need to find a more sophisticated method...\n def get_confidence_index(self):\n # Tage average of all confidences excluding tags\n average = (\n self.creation_date_confidence\n + self.title_confidence\n + self.summary_confidence\n + self.importance_confidence\n + self.creator_confidence\n ) / 5\n # Title and creation date are most relevant, they define the lower limit\n return min(average, self.title_confidence, self.creation_date_confidence)\n\n def add_parent_tags_recursive(self, tag_hierarchy: dict):\n \"\"\"\n Recursively adds parent tags from a tag hierarchy.\n The hierarchy should be provided in a nested dictionary format.\n Returns the highest confidence level found in the hierarchy.\n \"\"\"\n confidence = 0\n for tag in tag_hierarchy: \n # Recursively process the child tags\n confidence_new = self.add_parent_tags_recursive(tag_hierarchy[tag])\n if confidence_new:\n self.set_tags([tag], [confidence_new])\n # Update the confidence level with the highest value\n confidence = max(confidence_new, confidence, self.get_confidence_if_tag_exists(tag))\n return confidence\n\n def apply_tag_replacements(self, replacements):\n \"\"\"\n Applies tag replacements based on a given replacement mapping.\n The method updates tags, their confidence, and specificity values accordingly.\n \"\"\"\n # Create a mapping from original to new tags, ignoring empty replacements\n replacement_dict = {rep['original']: rep['replacement'] for rep in replacements}\n\n # Structure to store the updated confidence and specificity for each tag\n tag_info = {}\n\n for i, tag in enumerate(self.tags):\n # Determine the new tag, default to the original tag if no replacement is found\n new_tag = replacement_dict.get(tag, tag)\n if new_tag != \"\": \n # Access or default confidence and specificity values\n confidence = self.tags_confidence[i] if i < len(self.tags_confidence) else 0\n\n # Update with the latest values for confidence and specificity\n tag_info[new_tag] = {\n 'confidence': confidence\n }\n\n # Update the class attributes with the final lists\n self.tags = list(tag_info.keys())\n self.tags_confidence = [info['confidence'] for info in tag_info.values()]\n \n def get_short_description(self):\n return (\n \"Filename: \" + self.file_name + \", \"\n + \"Path: \" + self.relative_path + \"\\n\"\n + \"Content: \" + self.get_pdf_text()\n )\n\n def has_sufficient_information(self, threshold=7): \n return self.get_confidence_index() >= threshold\n \n def get_creation_date_as_str(self):\n return self.creation_date.strftime(\"%Y-%m-%d\") if self.creation_date else None\n \n def get_image_number(self): \n self.analyze_document_images()\n return len(self.images)\n\n def create_new_filename(self, format_str=\"%Y-%m-%d-{CREATOR}-{TITLE}.pdf\"):\n \"\"\"\n Creates a new filename based on a specified format.\n The format can include date formatting strings and {TITLE} as a placeholder for the document title.\n If no format is provided, the default format \"YY-MM-DD-{TITLE}.pdf\" is used.\n \"\"\"\n # Replace date parts in the format with the actual date\n if self.creation_date:\n date_str = self.creation_date.strftime(format_str)\n else:\n # If no creation date is available, use the modification date\n date_str = self.modification_date.strftime(format_str)\n\n # Replace {TITLE} with the document title\n new_filename = date_str.replace('{TITLE}', self.title)\n new_filename = new_filename.replace('{CREATOR}', self.creator)\n # Store the new filename\n self.new_file_name = new_filename\n return self"
}
] | from autoPDFtagger.AIAgents import AIAgent_OpenAI
from autoPDFtagger.AIAgents import OpenAI_model_pricelist
from autoPDFtagger.config import config
from autoPDFtagger.PDFDocument import PDFDocument
import logging
import json
import pprint
import re
import copy
import tiktoken | 10,135 | try:
pdf_document.set_from_json(response)
except Exception as e:
logging.error("API-Call for image analysis failed")
logging.error(e)
if pdf_document.has_sufficient_information():
logging.info("Document information sufficient. Proceeding with next document.")
return pdf_document
else:
logging.info("Still lacking information, looking for more images")
logging.info("No more images found.")
return pdf_document
def process_images_by_page(self, pdf_document: PDFDocument):
for page in pdf_document.pages:
logging.debug(f"Checking Page {page['page_number']} looking for largest image")
# Skip page if no images are present
if 'max_img_xref' not in page or not page['max_img_xref']:
logging.debug("Page not analyzed: (no images)")
continue
# Get the largest image of the site (assuming it to be the scan-image)
image_base64 = pdf_document.get_png_image_base64_by_xref(page['max_img_xref'])
# Send it to GPT
logging.info("Asking AI for analyzing scanned page")
response = self.send_image_request(pdf_document, [image_base64])
try:
pdf_document.set_from_json(response)
except Exception as e:
logging.error("API-Call for image analysis failed")
logging.error(e)
# Only proceed if the information about the document is still insufficient
if pdf_document.has_sufficient_information():
logging.info("Document information sufficient, proceeding.")
return pdf_document
else:
logging.info("Still lacking information, looking for more pages")
logging.info("No more pages available.")
return pdf_document
# TEXT-Analysis
class AIAgent_OpenAI_pdf_text_analysis(AIAgent_OpenAI):
def __init__(self):
system_message = (
"You are a helpful assistant analyzing OCR outputs. It's important "
"to remember that these outputs may represent only a part of the document. "
"Provide the following information:\n"
"1. Creation date of the document.\n"
"2. A short title of 3-4 words.\n"
"3. A meaningful summary of 3-4 sentences.\n"
"4. Creator/Issuer\n"
"5. Suitable keywords/tags related to the content.\n"
"6. Rate the importance of the document on a scale from 0 (unimportant) to "
"10 (vital).\n"
"7. Rate your confidence for each of the above points on a scale "
"from 0 (no information, text not readable) over 5 (possibly right, but only "
"few hints about the content of the whole document) to 10 (very sure). "
"You always answer in {LANGUAGE} language. For gathering information, "
"you use the given filename, pathname and OCR-analyzed text. "
"If you are seeing a blank document, your title-confidence is alway 0."
"You always answer in a specified JSON-Format like in this example:\n"
"{\n"
" 'summary': '[summary]',\n"
" 'summary_confidence': [number],\n"
" 'title': '[title]',\n"
" 'title_confidence': [number],\n"
" 'creation_date': '[Date YY-mm-dd]',\n"
" 'creation_date_confidence': [number],\n"
" 'creator': '[creator name]',\n"
" 'creator_confidence': [number],\n"
" 'tags': ['[tag 1]', '[tag 2]', ...],\n"
" 'tags_confidence': [[confidence tag 1], [confidence tag 2]],\n"
" 'importance': [number],\n"
" 'importance_confidence': [number]\n"
"}"
)
# Parent constructor
super().__init__(model="gpt-4-1106-preview", system_message=system_message)
self.response_format="json_object"
# Main working function to get info about a PDFDocument by
# sending a GPT-API-Request
def analyze_text(self, pdf_document: PDFDocument):
# Step 1: Analyze the number of potentially meaningful words
# to decide which model to use
# GPT-3.5 is good enough for long texts and much cheaper.
# Especially in shorter texts, GPT-4 gives much more high-quality answers
word_count = len([word for word in re.split(r'\W+', pdf_document.get_pdf_text()) if len(word) >= 3])
model_choice = "gpt-3.5-turbo-1106" if word_count > 100 else "gpt-4-1106-preview"
#model_choice = "gpt-4-1106-preview" # for test purposes
logging.debug("Opting for " + model_choice)
self.set_model(model_choice)
message = ("Analyze following OCR-Output. Try to imagine as many valuable keywords and categories as possible. "
"Imagine additional keywords thinking of a wider context and possible categories in an archive system. "
f"Use {LANGUAGE} Language. Answer in the given pattern (JSON): "
+ pdf_document.get_short_description()
)
# in case of very long text, we have to shorten it depending on
# the specific token-limit of the actual model
# Optimize request data
# Convert message-list to str
request_test_str = pprint.pformat(self.messages)
# estimate token-number for request
num_tokens = num_tokens_from_string(request_test_str)
# estimate 500 Tokens for answer
tokens_required = num_tokens + 500
# max tokens of the actual model stored in price list table
| # Here, the task-specific AI agents for text analysis,
# image analysis, and keyword/tag analysis are specified.
# The general logic for API communication with OpenAI is
# established in the parent classes AIAgent and AIAgent_OpenAI
# (see AIAgents.py).
api_key = config['OPENAI-API']['API-Key']
LANGUAGE = config['DEFAULT']['language']
# IMAGE-Analysis
class AIAgent_OpenAI_pdf_image_analysis(AIAgent_OpenAI):
def __init__(self):
system_message = f"""
You are a helpful assistant analyzing images inside of documents.
Based on the shown images, provide the following information:\n
1. Creation date of the document.\n
2. A short title of 3-4 words.\n
3. A short summary of 3-4 sentences.\n
4. Creator/Issuer\n
5. Suitable keywords/tags related to the content.\n
6. Rate the importance of the document on a scale from 0 (unimportant) to 10 (vital).\n
7. Rate your confidence for each of the above points on a scale from 0 (no information)
over 5 (possibly right, but only few hints) to 10 (very sure).
You always answer in {LANGUAGE} language. For gathering information,
you use the given filename, pathname and ocr-analyzed text. You always
answer in a specified JSON-Format which is given in the question. """
# calling parent class constructor
super().__init__(model="gpt-4-vision-preview", system_message=system_message)
self.response_format="json_object"
# Main function of this class: Try to extract relevant metadata
# out of a PDFDocument (pdf_document) by analyzing their images
# and return result as a json-string
def analyze_images(self, pdf_document: PDFDocument):
pdf_document.analyze_document_images()
# Prevent modifying the original document
working_doc = copy.deepcopy(pdf_document)
# For the general requirement of this function,
# I have different approaches, which vary depending
# on the structure of the document. In the case of a
# scanned document, at least the first page should be
# completely analyzed, as it is where most of the relevant
# information (title, summary, date) can be expected.
# Subsequent pages should only be examined for cost reasons
# if the collected information is insufficient. This approach
# is implemented in the function process_images_by_page.
# In cases where the document is not a scanned one, but an
# originally digital document, images contained within it
# (of a certain minimum size) can also hold valuable information.
# These images can be analyzed in blocks (currently 3 at a time)
# by GPT. We start with the largest images. At least one block
# will be analyzed. Additional blocks only if the quality of the
# metadata is not yet sufficient (similar to process_images_by_pages).
# This approach is specified by the function process_images_by_size.
# No scanned document
if pdf_document.image_coverage < 100:
logging.info("Analyzing smaller Images")
# Wir sortieren alle Bilder mit einer bestimmten Mindestgröße und
# fangen von oben an, diese in 5er Gruppen in einzelnen Anfragen
# an GPT-Vision zu schicken, so lange, bis entweder alle Bilder
# analysiert sind oder has_sufficient_information true ergibt
working_doc = self.process_images_by_size(working_doc)
# Scanned document
if pdf_document.image_coverage >= 100:
# Hier alle Seiten als Bild GTP-Vision vorlegen, welche weniger als 100 Wörter enthalten
logging.info("Recognizing scanned document")
working_doc = self.process_images_by_page(working_doc)
return working_doc.to_api_json()
# A generic function to ask GPT to analyze a list of Images (list_imgaes_base64)
# in context of information of a PDFDocument (document)
# The decision regarding the selection of images and their
# extraction from the document is made separately, therefore
# these must be passed as additional parameters.
def send_image_request(self, document: PDFDocument, list_images_base64):
logging.info("Asking GPT-Vision for analysis of " + str(len(list_images_base64)) + " Images found in " + document.get_absolute_path())
user_message = (
"Analyze following Images which are found in a document. "
"Please extend the existing information by keeping their JSON-Format: "
+ document.to_api_json() +
" Try to imagine as many valuable keywords and categories as possible. "
"Imagine additional keywords thinking of a wider context and possible categories in an archive system. "
"Answer in JSON-Format corresponding to given input."
)
message_content = [
{
"type": "text",
"text": user_message
}]
# Add individual images to the message
for base64_image in list_images_base64:
image_content = {
"type": "image_url",
"image_url": {
"url": f"data:image/png;base64,{base64_image}"
}
}
message_content.append(image_content)
self.add_message(message_content)
try:
response = super().send_request(temperature=0.2, response_format = None, )
return response
except Exception as e:
logging.error("API-Call failed")
logging.error(e)
return None
def process_images_by_size(self, pdf_document: PDFDocument):
# Create a list of all images from each page
all_images = [image for page in pdf_document.images for image in page]
# Sort images by pixel count (width x height)
sorted_images = sorted(all_images, key=lambda img: img["original_width"] * img["original_height"], reverse=True)
# Filter out images smaller than 90000 pixels (e.g., less than 300x300)
relevant_images = [img for img in sorted_images if img["original_width"] * img["original_height"] >= 90000]
# Process images in groups of 3
for i in range(0, len(relevant_images), 3):
group = relevant_images[i:i + 3]
base64_group = []
for image in group:
base64_group.append(pdf_document.get_png_image_base64_by_xref(image['xref']))
# Call ai_analyze_images with the group of images
response = self.send_image_request(pdf_document, base64_group)
try:
pdf_document.set_from_json(response)
except Exception as e:
logging.error("API-Call for image analysis failed")
logging.error(e)
if pdf_document.has_sufficient_information():
logging.info("Document information sufficient. Proceeding with next document.")
return pdf_document
else:
logging.info("Still lacking information, looking for more images")
logging.info("No more images found.")
return pdf_document
def process_images_by_page(self, pdf_document: PDFDocument):
for page in pdf_document.pages:
logging.debug(f"Checking Page {page['page_number']} looking for largest image")
# Skip page if no images are present
if 'max_img_xref' not in page or not page['max_img_xref']:
logging.debug("Page not analyzed: (no images)")
continue
# Get the largest image of the site (assuming it to be the scan-image)
image_base64 = pdf_document.get_png_image_base64_by_xref(page['max_img_xref'])
# Send it to GPT
logging.info("Asking AI for analyzing scanned page")
response = self.send_image_request(pdf_document, [image_base64])
try:
pdf_document.set_from_json(response)
except Exception as e:
logging.error("API-Call for image analysis failed")
logging.error(e)
# Only proceed if the information about the document is still insufficient
if pdf_document.has_sufficient_information():
logging.info("Document information sufficient, proceeding.")
return pdf_document
else:
logging.info("Still lacking information, looking for more pages")
logging.info("No more pages available.")
return pdf_document
# TEXT-Analysis
class AIAgent_OpenAI_pdf_text_analysis(AIAgent_OpenAI):
def __init__(self):
system_message = (
"You are a helpful assistant analyzing OCR outputs. It's important "
"to remember that these outputs may represent only a part of the document. "
"Provide the following information:\n"
"1. Creation date of the document.\n"
"2. A short title of 3-4 words.\n"
"3. A meaningful summary of 3-4 sentences.\n"
"4. Creator/Issuer\n"
"5. Suitable keywords/tags related to the content.\n"
"6. Rate the importance of the document on a scale from 0 (unimportant) to "
"10 (vital).\n"
"7. Rate your confidence for each of the above points on a scale "
"from 0 (no information, text not readable) over 5 (possibly right, but only "
"few hints about the content of the whole document) to 10 (very sure). "
"You always answer in {LANGUAGE} language. For gathering information, "
"you use the given filename, pathname and OCR-analyzed text. "
"If you are seeing a blank document, your title-confidence is alway 0."
"You always answer in a specified JSON-Format like in this example:\n"
"{\n"
" 'summary': '[summary]',\n"
" 'summary_confidence': [number],\n"
" 'title': '[title]',\n"
" 'title_confidence': [number],\n"
" 'creation_date': '[Date YY-mm-dd]',\n"
" 'creation_date_confidence': [number],\n"
" 'creator': '[creator name]',\n"
" 'creator_confidence': [number],\n"
" 'tags': ['[tag 1]', '[tag 2]', ...],\n"
" 'tags_confidence': [[confidence tag 1], [confidence tag 2]],\n"
" 'importance': [number],\n"
" 'importance_confidence': [number]\n"
"}"
)
# Parent constructor
super().__init__(model="gpt-4-1106-preview", system_message=system_message)
self.response_format="json_object"
# Main working function to get info about a PDFDocument by
# sending a GPT-API-Request
def analyze_text(self, pdf_document: PDFDocument):
# Step 1: Analyze the number of potentially meaningful words
# to decide which model to use
# GPT-3.5 is good enough for long texts and much cheaper.
# Especially in shorter texts, GPT-4 gives much more high-quality answers
word_count = len([word for word in re.split(r'\W+', pdf_document.get_pdf_text()) if len(word) >= 3])
model_choice = "gpt-3.5-turbo-1106" if word_count > 100 else "gpt-4-1106-preview"
#model_choice = "gpt-4-1106-preview" # for test purposes
logging.debug("Opting for " + model_choice)
self.set_model(model_choice)
message = ("Analyze following OCR-Output. Try to imagine as many valuable keywords and categories as possible. "
"Imagine additional keywords thinking of a wider context and possible categories in an archive system. "
f"Use {LANGUAGE} Language. Answer in the given pattern (JSON): "
+ pdf_document.get_short_description()
)
# in case of very long text, we have to shorten it depending on
# the specific token-limit of the actual model
# Optimize request data
# Convert message-list to str
request_test_str = pprint.pformat(self.messages)
# estimate token-number for request
num_tokens = num_tokens_from_string(request_test_str)
# estimate 500 Tokens for answer
tokens_required = num_tokens + 500
# max tokens of the actual model stored in price list table | diff_to_max = tokens_required - OpenAI_model_pricelist[self.model][2] | 1 | 2023-12-04 09:30:53+00:00 | 12k |
PopicLab/insilicoSV | insilicosv/simulate.py | [
{
"identifier": "utils",
"path": "insilicosv/utils.py",
"snippet": "class NestedDict(defaultdict):\nclass OverlapEvents:\n def __call__(self):\ndef is_overlapping(event_ranges, addition, called_from_helper=False, strictly_partial=False):\ndef fail_if_any_overlapping(arr):\ndef validate_symbols(source, target):\ndef remove_file(file):\ndef reset_file(filename):\ndef generate_seq(length):\ndef percent_N(seq):\ndef complement(seq):\ndef divergence(seq, divergence_prob=None):\ndef get_sv_config_identifier(sv_config):\n def __init__(self, config, allow_chroms=None):\n def get_num_overlap_counts(self, config):\n def parse_bed_file(self, bed_fname, allow_chroms=None, allow_types=None):\n def get_single_element_interval(self, sv_config_id, sv_config, partial_overlap):\n def populate_alu_pairs(self, svs_config):\n def get_alu_mediated_interval(self, sv_config_id):\n def remove_alu_from_overlap_dict(self, chrom, start, end):\n def midpoint(start, end):\n def get_intrvl_len(chr, st, end):\n def elt_type_is_allowed(self, elt_type):\n def get_partially_overlapping_interval(elt_chrom, elt_start, elt_stop, sv_min, sv_max):\n def draw_from_unif(a, b):\n def decrement_counts(self, sv_config_id, input_elt_type, partial_overlap):\n def __getitem__(self, sv_config_id, minsize, maxsize, elt_type=None, partial_overlap=False):"
},
{
"identifier": "FormatterIO",
"path": "insilicosv/processing.py",
"snippet": "class FormatterIO:\n def __init__(self, par_file):\n self.bedpe_counter = 1\n self.par_file = par_file\n self.config = None\n\n @staticmethod\n def run_checks_randomized(config):\n \"\"\"\n check method for yaml given with SVs given for randomized placement on reference\n \"\"\"\n config_svs = config['variant_sets']\n for config_sv in config_svs:\n if \"avoid_intervals\" in config_sv:\n continue\n elif \"type\" not in config_sv:\n raise Exception(\"\\\"Type\\\" attribute must be specified! For custom transformations, enter in \\\"Custom\\\"\")\n elif config_sv[\"type\"] == \"SNP\": # SNP events are only specified by count (size is deterministic)\n if \"number\" in config_sv and isinstance(config_sv[\"number\"], int) and config_sv[\"number\"] > 0:\n continue\n else:\n raise Exception(\"Number (of type int > 0) is a required parameter for all SVs\")\n if \"min_length\" not in config_sv:\n raise Exception(\"Min length must be specified on all SVs!\")\n if \"max_length\" not in config_sv:\n raise Exception(\"Max length must be specified on all SVs!\")\n if \"number\" not in config_sv:\n raise Exception(\"Number is a required parameter for all SVs\")\n\n elif \"type\" in config_sv and not isinstance(config_sv[\"type\"], str):\n raise Exception(\"Invalid {} type for SV \\'type\\' attribute, str expected\".format(type(config_sv[\"type\"])))\n valid_optional_par = [\"fail_if_placement_issues\", \"max_tries\", \"generate_log_file\", \"filter_small_chr\",\n \"prioritize_top\", \"homozygous_only\", \"reference\"] # valid arguments within sim_settings\n for parameter in config['sim_settings']:\n if parameter not in valid_optional_par:\n raise Exception(\"\\\"{}\\\" is an invalid argument under sim_settings\".format(parameter))\n valid_keys = [\"sim_settings\", \"variant_sets\", \"overlap_events\", \"avoid_intervals\"] # valid arguments at the top level\n for key in config:\n if key not in valid_keys:\n raise Exception(\"Unknown argument \\\"{}\\\"\".format(key))\n\n def postproc_config_dict(self):\n if 'sim_settings' not in self.config.keys():\n raise Exception(\"Must include \\'sim_settings\\' sections specifying at least \\'reference\\' path\")\n if \"filter_small_chr\" in self.config.keys() and not isinstance(self.config[\"filter_small_chr\"], int):\n raise Exception(\"Must provide value of type int to \\'filter_small_chr\\'\")\n if \"reference\" not in self.config[\"sim_settings\"]:\n raise Exception(\"Must include reference FASTA file in \\'reference\\' field of \\'sim_settings\\'\")\n elif self.config[\"sim_settings\"][\"reference\"].split(\".\")[-1] not in [\"fa\", \"fna\", \"fasta\"]:\n raise Exception(\"Input reference must be of type .fa, .fna, or .fasta\")\n if \"vcf_path\" not in self.config[\"variant_sets\"][0]:\n self.run_checks_randomized(self.config)\n for config_sv in self.config['variant_sets']:\n if \"vcf_path\" in config_sv:\n continue\n # SV event length specification - not applicable for SNPs\n if config_sv[\"type\"] != \"SNP\":\n if not isinstance(config_sv[\"min_length\"], list) or not isinstance(config_sv[\"max_length\"], list):\n raise Exception(\"Must provide entries of type list to \\'min_length\\' and \\'max_length\\'\")\n else:\n config_sv[\"length_ranges\"] = list(zip(config_sv[\"min_length\"], config_sv[\"max_length\"]))\n assert all(max_len >= min_len >= 0 for (min_len, max_len) in config_sv[\"length_ranges\"]), \"Max length must be >= min length for all SVs! Also ensure that all length values are >= 0.\"\n if \"divergence_prob\" in config_sv:\n if config_sv[\"type\"] != \"DIVERGENCE\":\n raise Exception(\"divergence_prob can only be given for event type DIVERGENCE\")\n else:\n assert isinstance(config_sv[\"divergence_prob\"], int) or isinstance(config_sv[\"divergence_prob\"], float), \\\n \"Must give \\'divergence_prob\\'\"\n assert 1 >= config_sv[\"divergence_prob\"] > 0, \"divergence_prob must be in (0,1]\"\n\n config_sv[\"type\"] = Variant_Type(config_sv[\"type\"])\n if config_sv[\"type\"] != Variant_Type.Custom:\n config_sv[\"source\"] = None\n config_sv[\"target\"] = None\n\n # setting default values for sim_settings fields\n if 'max_tries' not in self.config['sim_settings']:\n self.config['sim_settings']['max_tries'] = 50\n if 'fail_if_placement_issues' not in self.config['sim_settings']:\n self.config['sim_settings']['fail_if_placement_issues'] = False\n\n def yaml_to_var_list(self):\n try:\n with open(self.par_file) as yaml_file:\n self.config = yaml.full_load(yaml_file)\n except:\n raise Exception(\"YAML File {} failed to be open\".format(self.par_file))\n self.postproc_config_dict()\n\n def write_to_file(self, sv, bedfile, source_s, source_e, target_s, target_e, transform, event, sv_id):\n assert (not event.symbol.startswith(Symbols.DIS.value))\n if transform == Operations.INS.value:\n transform_length = event.length\n else:\n transform_length = source_e - source_s\n if event.length > 0:\n with open(bedfile, \"a\") as fout:\n row = [str(event.source_chr),\n str(source_s),\n str(source_e),\n str(event.source_chr),\n str(target_s),\n str(target_e),\n transform,\n str(transform_length),\n '%d/%d' % (int(sv.hap[0]), int(sv.hap[1])),\n sv.name,\n str(sv_id)]\n fout.write(\"\\t\".join(row) + \"\\n\")\n\n @staticmethod\n def symbol_is_inversion(symbol):\n return any(c.islower() for c in symbol)\n\n @staticmethod\n def export_insertions(chr, start_pos, seq, ins_fasta):\n \"\"\"\n Exports foreign insertion sequences to separate fasta file, append only\n \"\"\"\n with open(ins_fasta, \"a\") as fout_ins:\n fout_ins.write(\">{}_{}\\n\".format(chr, start_pos))\n fout_ins.write(\"{}\\n\".format(seq))\n\n @staticmethod\n def get_event_target_operation(ev, target_events_dict, source_events_dict):\n \"\"\"\n determines target interval and operation for multi-source events\n \"\"\"\n # A -> A'\n if ev + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), \\\n Operations.DUP.value if ev in target_events_dict.keys() else Operations.TRA.value\n # A -> a'\n elif ev.lower() + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev.lower() + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INVDUP.value\n # A -> a\n elif ev.lower() in target_events_dict.keys():\n trg_sym = ev.lower()\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INV.value\n # A -> A* (in the case of a custom event in which an event is divergently duplicated)\n elif ev + Symbols.DIV.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DIV.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.DIV.value\n # A -> A (insertion if source A is undefined, identity otherwise)\n elif ev in target_events_dict.keys():\n return (target_events_dict[ev].start, target_events_dict[ev].end), \\\n Operations.INS.value if source_events_dict[ev].start is None else Operations.IDENTITY.value\n # A -> [none]\n elif ev not in [sym[0] for sym in target_events_dict.keys()]:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.DEL.value\n # otherwise unknown mapping\n else:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.UNDEFINED.value\n\n @staticmethod\n def postprocess_record_params(sv, sv_record_info):\n \"\"\"\n arrange the bed_record parameter dictionaries in order of ascending source interval start position\n and assign order values to the relevant entries\n \"\"\"\n # for TRA/INS/DUP events with the same target position, 'order' describes the order in which they\n # are compiled (i.e., the order in which they appear in the target sequence)\n order = 0\n ins_pos = None\n for block in sv.target_symbol_blocks:\n for target_event in block:\n if target_event.symbol.startswith(Symbols.DIS.value) or \\\n target_event.symbol in sv_record_info.keys(): # <- prevent collision with A' and A if both in target\n continue\n src_sym = target_event.symbol[0].upper()\n if sv_record_info[src_sym]['transform'] in NONZERO_ORDER_OPERATIONS:\n if ins_pos is None:\n ins_pos = sv_record_info[src_sym]['target_s']\n order += 1\n elif sv_record_info[src_sym]['target_s'] == ins_pos:\n order += 1\n else:\n ins_pos = None\n order = 0\n # sv_record_info[src_sym]['order'] = order\n return sorted([params for params in sv_record_info.values()], key=lambda params: params['source_s'])\n\n def export_to_bedpe(self, svs, bedfile, ins_fasta=None, reset_file=True):\n if reset_file:\n utils.reset_file(bedfile)\n if ins_fasta:\n utils.reset_file(ins_fasta)\n for sv_id, sv in enumerate(svs):\n # SVs with multiple source events will be split into multiple bed records (one for each)\n if len(sv.events_dict) == 1:\n ev = list(sv.sv_blocks.target_events_dict.values())[0] if sv.type == Variant_Type.INS\\\n else list(sv.events_dict.values())[0]\n op = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)[1]\n record_info = {'source_s': ev.start, 'source_e': ev.end, 'target_s': ev.start, 'target_e': ev.end,\n 'transform': op, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n self.write_to_file(**record_info)\n if op == Operations.INS.value:\n self.export_insertions(sv.start_chr, ev.start, ev.source_frag, ins_fasta)\n else:\n # multiple source events: source intervals taken from the source events\n # and target intervals taken from corresponding target events (if no match, then deletion)\n sv_record_info = {}\n for ev in sv.events_dict.values():\n if ev.symbol.startswith(Symbols.DIS.value):\n continue\n sv_record_info[ev.symbol] = {'source_s': ev.start, 'source_e': ev.end, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n (target_s, target_e), operation = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)\n sv_record_info[ev.symbol]['target_s'] = target_s\n sv_record_info[ev.symbol]['target_e'] = target_e\n sv_record_info[ev.symbol]['transform'] = operation\n for param_dict in self.postprocess_record_params(sv, sv_record_info):\n self.write_to_file(**param_dict)\n\n def export_to_vcf(self, svs, stats, vcffile):\n with open(vcffile, \"w\") as vcf:\n vcf.write(\"##fileformat=VCFv4.2\\n\")\n for chrm, chrm_len in stats.chr_lengths.items():\n vcf.write(\"##contig=<ID=%s,length=%d>\\n\" % (chrm, chrm_len))\n vcf.write(\"#%s\\n\" % \"\\t\".join([\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\",\n \"SAMPLE\"]))\n # *** This will throw an error with pysam version 0.18, need 0.16.0.1\n vcf_file = pysam.VariantFile(vcffile)\n vcf_file.header.info.add('END', number=1, type='Integer', description=\"End position of the variant \"\n \"described in this record\")\n vcf_file.header.info.add('CIPOS', number=2, type='Integer', description=\"Confidence interval around POS for \"\n \"imprecise variants\")\n vcf_file.header.info.add('CIEND', number=2, type='Integer', description=\"Confidence interval around END for \"\n \"imprecise variants\")\n vcf_file.header.info.add('SVTYPE', number=1, type='String', description=\"Type of structural variant\")\n vcf_file.header.info.add('SVLEN', number=1, type='Integer', description=\"Length of structural variant\")\n vcf_file.header.info.add('SVMETHOD', number=1, type='String', description=\"SV detection method\")\n vcf_file.header.info.add('TARGET', number=1, type='Integer', description=\"Target location for divergent repeat\")\n vcf_file.header.info.add('OVERLAP_EV', number=1, type='String', description=\"Bool. indicator for the event being\"\n \"placed at an overlap_events interval\")\n vcf_file.header.formats.add('GT', number=1, type='String', description=\"Genotype\")\n\n vcf_out_file = pysam.VariantFile(vcffile, 'w', header=vcf_file.header)\n\n for sv in svs:\n zyg = (int(sv.hap[0]), int(sv.hap[1]))\n dispersion_target = None\n if sv.type in DISPERSION_TYPES:\n source_event = sv.events_dict[Symbols.REQUIRED_SOURCE.value]\n disp_event = sv.events_dict['_1']\n rec_start = source_event.start\n rec_end = source_event.end\n if disp_event.start == source_event.end:\n dispersion_target = disp_event.end\n else:\n dispersion_target = disp_event.start\n else:\n rec_start = min([frag[1] for frag in sv.changed_fragments])\n rec_end = max(frag[2] for frag in sv.changed_fragments)\n if dispersion_target is not None:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start, 'TARGET': dispersion_target}\n else:\n if sv.type == Variant_Type.INS:\n # special case of simple INS: sv length \\neq (sv end - sv start)\n # **pysam will delete END fields that are equal to POS, therefore INS records won't have an END\n rec_end += 1\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': sv.events_dict[Symbols.REQUIRED_SOURCE.value].length}\n else:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start}\n if sv.overlap_event is not None:\n info_field['OVERLAP_EV'] = sv.overlap_event[3]\n\n vcf_record = vcf_out_file.header.new_record(contig=sv.start_chr, start=rec_start, stop=rec_end,\n alleles=['N', '<%s>' % sv.type.value], id=sv.type.value,\n info=info_field,\n qual=100, filter='PASS',\n samples=[{'GT': zyg}])\n vcf_out_file.write(vcf_record)\n\n vcf_out_file.close()\n\n def export_variants_to_fasta(self, id, edits, fasta_out, fasta_file, verbose=False):\n \"\"\"\n Exports list of changes from simulator to fasta file\n\n id: chr_id to apply edits to\n edits: list with elements of the form (start, end, new_frag)\n fasta_out: Fasta file to export changes to\n fasta_file: FastaFile with access to reference\n \"\"\"\n with open(fasta_out, \"a\") as fout_export:\n if id not in fasta_file.references:\n raise KeyError(\"ID {} not found in inputted fasta file\".format(id))\n if verbose:\n print(\"New ID: \", id)\n fout_export.write(\">\" + str(id) + \"\\n\")\n chr_variants = list(edits)\n chr_variants.sort()\n chr_variants.append([fasta_file.get_reference_length(id), fasta_file.get_reference_length(id), \"\"])\n pos = 0\n for variant in chr_variants:\n var_start, var_end = variant[0], variant[1]\n while pos < var_start:\n appropriate_buffer = MAX_BUFFER_SIZE if var_start - pos > MAX_BUFFER_SIZE else var_start - pos\n c = fasta_file.fetch(id, pos, pos + appropriate_buffer)\n fout_export.write(c)\n pos += appropriate_buffer\n assert (pos == var_start), \"Replacement fragment about to be inserted at position {} instead of var_start {}\".format(pos, var_start)\n fout_export.write(variant[2])\n pos = var_end\n fout_export.write(\"\\n\")\n\n def close(self):\n self.fin_export1.close()\n self.fin_export2.close()"
},
{
"identifier": "collect_args",
"path": "insilicosv/processing.py",
"snippet": "def collect_args():\n parser = argparse.ArgumentParser(description='insilicoSV is a software to design and simulate complex structural variants, both novel and known.')\n parser.add_argument(\"config\", help=\"YAML config file\")\n parser.add_argument(\"-r\", \"--root\", action=\"store\", metavar=\"DIR\", dest=\"root_dir\", help=\"root directory for all files given\")\n parser.add_argument(\"--random_seed\", type=int,\n help=\"if non-zero, random seed for random number generation\")\n\n args = parser.parse_args()\n output_dir = os.path.dirname(args.config)\n args_dict = {\"config\": args.config, \"ins_fasta\": os.path.join(output_dir, \"sim.insertions.fa\"),\n \"hap1\": os.path.join(output_dir, \"sim.hapA.fa\"), \"hap2\": os.path.join(output_dir, \"sim.hapB.fa\"),\n \"bedpe\": os.path.join(output_dir, \"sim.bed\"), \"stats\": os.path.join(output_dir, \"sim.stats.txt\"),\n \"log_file\": os.path.join(output_dir, \"sim.log\"), \"random_seed\": args.random_seed}\n if args.root_dir:\n for key, curr_path in args_dict.items():\n args_dict[key] = os.path.join(args.root_dir, curr_path)\n return args_dict"
},
{
"identifier": "Structural_Variant",
"path": "insilicosv/structural_variant.py",
"snippet": "class Structural_Variant:\n def __init__(self, sv_type, mode, length_ranges=None, source=None, target=None, vcf_rec=None, ref_fasta=None,\n overlap_event=None, div_prob=None):\n \"\"\"\n sv_type: Enum either specifying one of the prewritten classes or a Custom transformation\n mode: flag to indicate whether constructor called in fixed or randomized mode\n length_ranges: list containing tuple(s) (min_length, max_length) or singleton int\n source: tuple representing source sequence, optional\n target: tuple representing target sequence, optional\n vcf_rec: (fixed mode) vcf record giving sv information that will instantiate the event\n ref_fasta: for extracting reference frag for vcf records in fixed mode initialization\n overlap_event: (chr, start, end, elt_type) tuple representing the genome element that this SV is meant to overlap, optional\n \"\"\"\n self.type = sv_type\n if self.type != Variant_Type.Custom:\n self.source, self.target = SV_KEY[self.type]\n self.name = self.type.name\n else:\n self.source, self.target = tuple(source), tuple(target)\n self.name = str(\"\".join(self.source)) + \">\" + str(\"\".join(self.target))\n\n utils.validate_symbols(self.source, self.target)\n self.source_unique_char, self.target_unique_char = Structural_Variant.reformat_seq(\n self.source), Structural_Variant.reformat_seq(self.target)\n\n self.start = None\n self.end = None\n self.start_chr = None\n self.req_space = None # required space for SV, sum of event lengths\n self.source_events = [] # list of Event classes for every symbol in source sequence\n self.events_dict = dict() # maps every unique symbol in source and target to an Event class\n self.changed_fragments = [] # list recording new fragments to be placed in the output ref\n self.dispersion_flip = False # orientation of dispersion event\n self.insseq_from_rec = None # space to store INSSEQ for fixed-mode INS event\n self.overlap_event = overlap_event # <- element tuple of form (chrom, start, end, type) (optional)\n self.div_prob = div_prob # <- divergence probability param (optionally given for type == DIVERGENCE)\n\n if self.type in DISPERSION_TYPES:\n if random.randint(0, 1):\n self.dispersion_flip = True\n if mode == 'randomized':\n self.initialize_events(length_ranges)\n else:\n self.initialize_events_fixed(vcf_rec, ref_fasta)\n\n self.sv_blocks = Blocks(self)\n self.target_symbol_blocks = self.sv_blocks.target_blocks\n\n if mode == 'randomized':\n self.active = False\n self.ishomozygous = Zygosity.UNDEFINED\n self.hap = [False, False]\n else:\n # manual call to assign_locations in fixed mode\n self.assign_locations(self.start)\n\n def __repr__(self):\n return \"<SV transformation \\\"{}\\\" -> \\\"{}\\\" taking up {} non-dispersion spaces>\".format(\n ''.join(self.source), ''.join(self.target),\n sum([event.length for event in self.source_events if not event.symbol.startswith(Symbols.DIS.value)]))\n\n @staticmethod\n def reformat_seq(transformation):\n # transformation: tuple, user inputted source and target\n # if dispersion events exist in transformation, tag on unique ids to make them distinct as they all are \"_\"\n unique_transform = []\n unique_id = 1\n for component in transformation:\n if component != Symbols.DIS.value and component != Symbols.DUP.value and component != Symbols.DIV.value:\n unique_transform.append(component)\n elif component == Symbols.DUP.value: # duplication event case, need to group together symbol and duplication marking\n unique_transform[-1] += Symbols.DUP.value\n elif component == Symbols.DIV.value: # divergence event case, want to keep track of interval needing modification\n unique_transform[-1] += Symbols.DIV.value\n else: # dispersion event case, component = dispersion\n unique_transform.append(component + str(unique_id))\n unique_id += 1\n return tuple(unique_transform)\n\n def get_event_frag(self, event, symbol):\n # event: source event from events_dict\n # symbol: target symbol\n decode_funcs = {\"invert\": lambda string: utils.complement(string[::-1]),\n \"identity\": lambda string: string,\n \"complement\": utils.complement,\n \"diverge\": lambda string: utils.divergence(string,\n divergence_prob=(1 if self.type == Variant_Type.SNP\n else self.div_prob))}\n if any(c.islower() for c in symbol):\n return decode_funcs[\"invert\"](event.source_frag)\n elif symbol[-1] == Symbols.DIV.value:\n return decode_funcs[\"diverge\"](event.source_frag)\n else:\n return event.source_frag\n\n def initialize_events(self, lengths):\n \"\"\"\n Initializes event classes and creates a mapping of symbol to event\n\n lengths: list of tuples specifying min and max length for events within SV\n -> returns list of events in source sequence\n \"\"\"\n all_symbols = []\n for ele in self.source_unique_char + self.target_unique_char:\n if len(ele) > 0 and (len(ele) == 1 or ele.startswith(Symbols.DIS.value)) and ele.upper() not in all_symbols:\n all_symbols.append(ele.upper())\n all_symbols.sort()\n\n # symbols_dict: (key = symbol, value = (chosen length, length range))\n symbols_dict = dict()\n if len(lengths) > 1: # values given by user represents custom ranges for each event symbol of variant in lexicographical order\n assert (len(lengths) == len(all_symbols)), \\\n \"Number of lengths entered does not match the number of symbols (remember foreign insertions and dispersions) present!\"\n for idx, symbol in enumerate(all_symbols):\n symbols_dict[symbol] = (random.randint(lengths[idx][0], lengths[idx][1]), lengths[idx])\n elif len(lengths) == 1: # value given by user represents length (same range) of each event within variant in lexicographical order\n for symbol in all_symbols:\n symbols_dict[symbol] = (random.randint(lengths[0][0], lengths[0][1]), lengths[0])\n else:\n raise Exception(\"Lengths parameter expects at least one tuple\")\n\n ovlp_frag = random.choice([frag for frag in self.source_unique_char if frag[0] != '_']) if self.overlap_event is not None else None\n for idx, symbol in enumerate(all_symbols):\n if self.overlap_event is not None and symbol == ovlp_frag:\n ovlp_event_len = int(self.overlap_event[2]) - int(self.overlap_event[1])\n event = Event(self, ovlp_event_len, (ovlp_event_len, ovlp_event_len), symbol,\n start=int(self.overlap_event[1]), end=int(self.overlap_event[2]))\n else:\n event = Event(self, symbols_dict[symbol][0], symbols_dict[symbol][1], symbol)\n self.events_dict[symbol] = event\n\n for symbol in self.source_unique_char:\n self.source_events.append(self.events_dict[symbol])\n\n if self.dispersion_flip:\n self.source_events = self.source_events[::-1]\n\n self.req_space = sum([event.length for event in self.source_events])\n\n def initialize_events_fixed(self, vcf_record, ref_fasta):\n # initialization method for SV read in from vcf\n source_len = vcf_record.stop - vcf_record.start if 'SVLEN' not in vcf_record.info else vcf_record.info['SVLEN']\n for symbol in self.source_unique_char:\n if symbol == Symbols.REQUIRED_SOURCE.value:\n source_ev = Event(self, source_len, (source_len, source_len), symbol)\n source_ev.start = vcf_record.start\n source_ev.end = vcf_record.stop\n source_ev.source_chr = vcf_record.chrom\n source_ev.source_frag = ref_fasta.fetch(source_ev.source_chr, source_ev.start, source_ev.end) if \\\n vcf_record.id != Variant_Type.SNP.value else vcf_record.ref\n self.events_dict[symbol] = source_ev\n if symbol.startswith(Symbols.DIS.value):\n self.dispersion_flip = vcf_record.info['TARGET'] < vcf_record.start\n disp_len = vcf_record.info['TARGET'] - vcf_record.stop if not self.dispersion_flip else \\\n vcf_record.start - vcf_record.info['TARGET']\n disp_ev = Event(self, disp_len, (disp_len, disp_len), symbol)\n disp_ev.start = vcf_record.stop if not self.dispersion_flip else vcf_record.info['TARGET']\n disp_ev.end = vcf_record.info['TARGET'] if not self.dispersion_flip else vcf_record.start\n disp_ev.source_chr = vcf_record.chrom\n disp_ev.source_frag = ref_fasta.fetch(disp_ev.source_chr, disp_ev.start, disp_ev.end)\n self.events_dict[symbol] = disp_ev\n # storing the insertion seq for INSs/SNPs with an insertion sequence / alt allele given in the vcf\n if vcf_record.info['SVTYPE'] == 'SNP':\n # NB: only supporting SNP records with a single allele ALT reported\n self.insseq_from_rec = vcf_record.alts[0]\n if vcf_record.info['SVTYPE'] == 'INS':\n if 'INSSEQ' in vcf_record.info:\n self.insseq_from_rec = vcf_record.info['INSSEQ'][0]\n source_ev = Event(self, source_len, (source_len, source_len), Symbols.REQUIRED_SOURCE.value)\n self.events_dict[Symbols.REQUIRED_SOURCE.value] = source_ev\n self.start = vcf_record.start\n self.end = self.start\n else:\n self.start = self.events_dict[Symbols.REQUIRED_SOURCE.value].start\n self.end = self.events_dict[Symbols.REQUIRED_SOURCE.value].end if '_1' not in self.events_dict.keys() else self.events_dict['_1'].end\n self.start_chr = vcf_record.chrom\n\n # handling for divergent repeat simulation logic (div_dDUPs placed into R1 need to correspond to dDUPs in R2)\n if self.type == Variant_Type.div_dDUP:\n self.target_unique_char = (\"A\", \"_1\", \"A'\")\n\n self.active = True\n if vcf_record.samples[0]['GT'] == (1, 1):\n self.ishomozygous = Zygosity.HOMOZYGOUS\n self.hap = [True, True]\n else:\n self.ishomozygous = Zygosity.HETEROZYGOUS\n self.hap = random.choice([[True, False], [False, True]])\n\n def assign_locations(self, start_pos):\n \"\"\"\n assign events start and end positions (once target blocks are populated and in the right order)\n \"\"\"\n for block in self.target_symbol_blocks:\n for ev in block:\n ev.source_chr = self.start_chr\n # if the event is one also found in the source, place it at the location given in events_dict\n if ev.symbol.upper() in self.events_dict.keys() or ev.symbol[-1] == Symbols.DIV.value:\n source_ev = self.events_dict[ev.symbol.upper() if ev.symbol.upper() in self.events_dict.keys() else ev.symbol[0]]\n ev.start = source_ev.start\n ev.end = source_ev.end\n ev.source_frag = self.get_event_frag(source_ev, ev.symbol) if self.insseq_from_rec is None else self.insseq_from_rec\n\n # everything that wasn't assigned above will be modeled as insertion fragment placed at the nearest event boundary\n target_events = [ev for bl in self.target_symbol_blocks for ev in bl]\n # singleton event that's novel (i.e., INS)\n if len(target_events) == 1 and target_events[0].start is None:\n ev = target_events[0]\n ev.start = start_pos\n ev.end = start_pos\n source_event = self.events_dict[ev.symbol[0].upper()]\n if self.insseq_from_rec is not None:\n ev.source_frag = self.insseq_from_rec\n else:\n ev.source_frag = self.get_event_frag(source_event, ev.symbol)\n if ev.source_frag is None:\n ev.source_frag = utils.generate_seq(ev.length)\n else:\n for i in range(len(target_events)):\n ev = target_events[i]\n if ev.start is None:\n if i == 0:\n # if the first event is novel, set start/end to the start of the nearest event\n j = i + 1\n while target_events[j].start is None:\n j += 1\n ev.start = target_events[j].start\n ev.end = target_events[j].start\n else:\n ev.start = target_events[i - 1].end\n ev.end = target_events[i - 1].end\n source_event = self.events_dict[ev.symbol[0].upper()]\n ev.source_frag = self.get_event_frag(source_event, ev.symbol)\n\n self.sv_blocks.generate_target_events_dict()\n\n def change_fragment(self):\n \"\"\"\n Takes the mapping of symbols to events and the target sequence to construct a replacement sequence for the reference fragment\n \"\"\"\n changed_fragments = []\n assert (self.start is not None and self.end is not None), \"Undefined SV start for {}\".format(\n self)\n block_start = None\n block_end = None\n\n if self.target_symbol_blocks == [[]]: # special case: simple deletion -- len(target_symbol_blocks) == 0\n changed_fragments.append([self.start_chr, self.start, self.end, ''])\n else:\n for idx, block in enumerate(self.target_symbol_blocks):\n new_frag = ''\n if len(block) == 0:\n # TRA branch: delete the A-length interval on the opposite side of the dispersion as our A'\n if idx == 0:\n del_len = len(self.target_symbol_blocks[2][0].source_frag)\n disp_ev = self.target_symbol_blocks[1][0]\n block_start = disp_ev.start - del_len\n block_end = disp_ev.start\n else:\n del_len = len(self.target_symbol_blocks[idx - 2][0].source_frag)\n disp_ev = self.target_symbol_blocks[idx - 1][0]\n block_start = disp_ev.end\n block_end = block_start + del_len\n changed_fragments.append([self.start_chr, block_start, block_end, new_frag])\n continue\n if block[0].symbol.startswith(Symbols.DIS.value):\n continue\n for i in range(len(block)):\n ev = block[i]\n new_frag += ev.source_frag\n if i == 0:\n block_start = ev.start\n if i == len(block) - 1:\n block_end = ev.end\n changed_fragments.append([self.start_chr, block_start, block_end, new_frag])\n # DEL logic: for all source events whose symbol doesn't appear (in any form) in the target symbols,\n # create a deletion fragment over that interval\n target_symbols = [ev.symbol[0].upper() for bl in self.target_symbol_blocks for ev in bl]\n for source_sym in self.events_dict.keys():\n if not source_sym.startswith(Symbols.DIS.value) and source_sym not in target_symbols:\n del_ev = self.events_dict[source_sym]\n changed_fragments.append([del_ev.source_chr, del_ev.start, del_ev.end, ''])\n\n self.changed_fragments = changed_fragments\n self.clean_event_storage()\n\n def clean_event_storage(self):\n # remove source fragments from events to save space as they are no longer needed\n for event in self.events_dict.values():\n if event.symbol in self.source_unique_char: # do not clean out insertion fragments as they'll need to be exported later\n event.source_frag = \"Removed\""
},
{
"identifier": "Event",
"path": "insilicosv/structural_variant.py",
"snippet": "class Event:\n \"\"\"\n represents the symbols, also known as the \"events,\" within a SV transformation\n \"\"\"\n\n def __init__(self, sv_parent, length, length_range, symbol, source_frag=None, start=None, end=None):\n self.sv_parent = sv_parent # sv_parent: Structural_Variant, event is always part of larger SV\n self.length = length\n self.length_range = length_range\n self.symbol = symbol # refers to symbol in SV's transformation\n self.source_chr = None\n self.source_frag = None if not source_frag else source_frag\n self.start = start\n self.end = end\n\n def __repr__(self):\n return \"<Event {}>\".format({\"length\": self.length, \"symbol\": self.symbol, \"start\": self.start, \"end\": self.end,\n \"source_chr\": self.source_chr})"
}
] | import random
import logging
import time
from insilicosv import utils
from os import write
from insilicosv.processing import FormatterIO, collect_args
from pysam import FastaFile
from pysam import VariantFile
from insilicosv.constants import *
from insilicosv.structural_variant import Structural_Variant, Event
from collections import defaultdict | 10,773 | self.avg_len[0] += event.length
self.avg_len[1] += 1
self.avg_len = self.avg_len[0] // self.avg_len[1] if self.avg_len[1] != 0 else 0
def export_data(self, fileout):
"""
Exports all collected data to entered file
fileout: Location to export stats file
"""
def write_item(fout, name, item, prefix=""):
fout.write("{}{}: {}\n".format(prefix, str(name), str(item)))
with open(fileout, "w") as fout:
fout.write("===== Overview =====\n")
write_item(fout, "SVs successfully simulated", str(self.active_svs) + "/" + str(self.total_svs))
for sv_type in self.sv_types:
write_item(fout, sv_type, self.sv_types[sv_type], prefix="\t- ")
write_item(fout, "Homozygous SVs", self.num_homozygous)
write_item(fout, "Heterozygous SVs", self.num_heterozygous)
write_item(fout, "Average length of SV symbols/components (excluding dispersions)", self.avg_len)
write_item(fout, "Min length of non-dispersion event", self.min_event_len)
write_item(fout, "Max length of non-dispersion event", self.max_event_len)
for id in self.chr_ids:
fout.write("\n===== {} =====\n".format(id))
write_item(fout, "Length of sequence", self.chr_lengths[id])
write_item(fout, "Total impacted length of reference chromosome", self.len_frags_chr[id])
class SV_Simulator:
def __init__(self, par_file, log_file=None):
"""
par_file: file location to configuration file (.yaml)
log_file: location to store log file with diagnostic information if config parameters indicate so
"""
global time_start
print("Setting up Simulator...")
self.formatter = FormatterIO(par_file)
self.formatter.yaml_to_var_list()
config = self.formatter.config
self.ref_file = config['sim_settings']['reference']
self.ref_fasta = FastaFile(self.ref_file)
self.svs_config = config['variant_sets']
self.sim_settings = config['sim_settings']
if log_file and "generate_log_file" in self.sim_settings.keys():
logging.basicConfig(filename=log_file, filemode="w", level=logging.DEBUG,
format='[%(name)s: %(levelname)s - %(asctime)s] %(message)s')
self.log_to_file("YAML Configuration: {}".format(config))
# get all chromosome ids
self.order_ids = self.ref_fasta.references
self.len_dict = dict() # stores mapping with key = chromosome, value = chromosome length
for id in self.order_ids:
chrom_len = self.ref_fasta.get_reference_length(id)
if 'filter_small_chr' in self.sim_settings and chrom_len < self.sim_settings['filter_small_chr']:
print("Filtering chromosome {}: Length of {} below threshold of {}".format(id, chrom_len, self.sim_settings['filter_small_chr']))
else:
self.len_dict[id] = chrom_len
print("Length of chromosome {}: {}".format(id, self.len_dict[id]))
# initialize stats file to be generated after all edits and exporting are finished
self.stats = StatsCollection(self.order_ids, self.len_dict)
self.mode = "randomized"
self.vcf_path = None
if "vcf_path" in self.svs_config[0]:
self.mode = "fixed"
self.vcf_path = self.svs_config[0]["vcf_path"]
self.svs = []
self.event_ranges = defaultdict(list)
if "avoid_intervals" in config:
# extract {chrom: [(start, end)]} intervals from vcf, add intervals from vcf to event range
self.extract_vcf_event_intervals(config["avoid_intervals"])
self.overlap_events = None if "overlap_events" not in config.keys() \
else utils.OverlapEvents(config, allow_chroms=self.order_ids)
self.initialize_svs()
print("Finished Setting up Simulator in {} seconds\n".format(time.time() - time_start))
time_start = time.time()
def __repr__(self):
return "All structural variants entered into simulator: {}".format(self.svs)
def log_to_file(self, info, key="DEBUG"):
# only logs to file if config setting indicates so
key_to_func = {"DEBUG": logging.debug, "WARNING": logging.warning}
if "generate_log_file" in self.sim_settings and self.sim_settings["generate_log_file"]:
key_to_func[key](info)
def get_rand_chr(self, check_size=None, fixed_chrom=None):
# random assignment of SV to a chromosome (unless we have a predetermined chromosome for this event)
valid_chrs = self.order_ids
if check_size is not None:
valid_chrs = [chrom for chrom, chr_size in self.len_dict.items() if chr_size >= check_size]
if len(valid_chrs) == 0:
raise Exception("SVs are too big for the reference!")
rand_id = valid_chrs[random.randint(0, len(valid_chrs) - 1)] if fixed_chrom is None else fixed_chrom
chr_len = self.len_dict[rand_id]
chr_event_ranges = self.event_ranges[rand_id]
assert rand_id is not None
return rand_id, chr_len, chr_event_ranges
def extract_vcf_event_intervals(self, vcf_path):
vcf = VariantFile(vcf_path)
for rec in vcf.fetch():
self.event_ranges[rec.chrom].append((rec.start, rec.stop))
def process_vcf(self, vcf_path):
# process vcf containing SVs to be added (deterministically) to reference
active_svs_total = 0
time_start_local = 0
vcf = VariantFile(vcf_path)
for rec in vcf.fetch():
svtype = Variant_Type(rec.info['SVTYPE']) if 'SVTYPE' in rec.info else Variant_Type(rec.id)
self.event_ranges[rec.chrom].append((rec.start, rec.stop))
|
time_start = time.time()
class StatsCollection:
"""collection of information for stats file, if requested"""
def __init__(self, chr_ids, chr_lens):
self.num_heterozygous = 0
self.num_homozygous = 0
self.total_svs = 0
self.active_svs = 0
self.active_events_chr = dict()
self.chr_ids = chr_ids
self.chr_lengths = chr_lens
self.avg_len = [0, 0] # Average length of SV events/components
self.len_frags_chr = dict() # Lengths of altered fragments within chromosome
self.sv_types = dict()
for id in self.chr_ids:
self.len_frags_chr[id] = 0
self.active_events_chr[id] = 0
def get_info(self, svs):
"""
collects all information for stats file after all edits are completed
svs: list of Structural_Variant objects
-> return None
"""
self.total_svs = len(svs)
self.min_event_len = min([event.length for sv in svs if sv.active for key, event in sv.events_dict.items() if
not event.symbol.startswith(Symbols.DIS.value)])
self.max_event_len = max([event.length for sv in svs if sv.active for key, event in sv.events_dict.items() if
not event.symbol.startswith(Symbols.DIS.value)])
for sv in svs:
if sv.active:
self.active_svs += 1
if sv.name in self.sv_types:
self.sv_types[sv.name] += 1
else:
self.sv_types[sv.name] = 1
if sv.hap[0] and sv.hap[1]: # homozygous SV
self.num_homozygous += 1
else: # heterozygous SV
self.num_heterozygous += 1
# add up the lengths of impacted regions on the reference
for frag in sv.changed_fragments:
self.len_frags_chr[frag[0]] += frag[2] - frag[1]
# count up average length of non-dispersion events
for symbol in sv.events_dict:
if not symbol.startswith(Symbols.DIS.value):
event = sv.events_dict[symbol]
self.avg_len[0] += event.length
self.avg_len[1] += 1
self.avg_len = self.avg_len[0] // self.avg_len[1] if self.avg_len[1] != 0 else 0
def export_data(self, fileout):
"""
Exports all collected data to entered file
fileout: Location to export stats file
"""
def write_item(fout, name, item, prefix=""):
fout.write("{}{}: {}\n".format(prefix, str(name), str(item)))
with open(fileout, "w") as fout:
fout.write("===== Overview =====\n")
write_item(fout, "SVs successfully simulated", str(self.active_svs) + "/" + str(self.total_svs))
for sv_type in self.sv_types:
write_item(fout, sv_type, self.sv_types[sv_type], prefix="\t- ")
write_item(fout, "Homozygous SVs", self.num_homozygous)
write_item(fout, "Heterozygous SVs", self.num_heterozygous)
write_item(fout, "Average length of SV symbols/components (excluding dispersions)", self.avg_len)
write_item(fout, "Min length of non-dispersion event", self.min_event_len)
write_item(fout, "Max length of non-dispersion event", self.max_event_len)
for id in self.chr_ids:
fout.write("\n===== {} =====\n".format(id))
write_item(fout, "Length of sequence", self.chr_lengths[id])
write_item(fout, "Total impacted length of reference chromosome", self.len_frags_chr[id])
class SV_Simulator:
def __init__(self, par_file, log_file=None):
"""
par_file: file location to configuration file (.yaml)
log_file: location to store log file with diagnostic information if config parameters indicate so
"""
global time_start
print("Setting up Simulator...")
self.formatter = FormatterIO(par_file)
self.formatter.yaml_to_var_list()
config = self.formatter.config
self.ref_file = config['sim_settings']['reference']
self.ref_fasta = FastaFile(self.ref_file)
self.svs_config = config['variant_sets']
self.sim_settings = config['sim_settings']
if log_file and "generate_log_file" in self.sim_settings.keys():
logging.basicConfig(filename=log_file, filemode="w", level=logging.DEBUG,
format='[%(name)s: %(levelname)s - %(asctime)s] %(message)s')
self.log_to_file("YAML Configuration: {}".format(config))
# get all chromosome ids
self.order_ids = self.ref_fasta.references
self.len_dict = dict() # stores mapping with key = chromosome, value = chromosome length
for id in self.order_ids:
chrom_len = self.ref_fasta.get_reference_length(id)
if 'filter_small_chr' in self.sim_settings and chrom_len < self.sim_settings['filter_small_chr']:
print("Filtering chromosome {}: Length of {} below threshold of {}".format(id, chrom_len, self.sim_settings['filter_small_chr']))
else:
self.len_dict[id] = chrom_len
print("Length of chromosome {}: {}".format(id, self.len_dict[id]))
# initialize stats file to be generated after all edits and exporting are finished
self.stats = StatsCollection(self.order_ids, self.len_dict)
self.mode = "randomized"
self.vcf_path = None
if "vcf_path" in self.svs_config[0]:
self.mode = "fixed"
self.vcf_path = self.svs_config[0]["vcf_path"]
self.svs = []
self.event_ranges = defaultdict(list)
if "avoid_intervals" in config:
# extract {chrom: [(start, end)]} intervals from vcf, add intervals from vcf to event range
self.extract_vcf_event_intervals(config["avoid_intervals"])
self.overlap_events = None if "overlap_events" not in config.keys() \
else utils.OverlapEvents(config, allow_chroms=self.order_ids)
self.initialize_svs()
print("Finished Setting up Simulator in {} seconds\n".format(time.time() - time_start))
time_start = time.time()
def __repr__(self):
return "All structural variants entered into simulator: {}".format(self.svs)
def log_to_file(self, info, key="DEBUG"):
# only logs to file if config setting indicates so
key_to_func = {"DEBUG": logging.debug, "WARNING": logging.warning}
if "generate_log_file" in self.sim_settings and self.sim_settings["generate_log_file"]:
key_to_func[key](info)
def get_rand_chr(self, check_size=None, fixed_chrom=None):
# random assignment of SV to a chromosome (unless we have a predetermined chromosome for this event)
valid_chrs = self.order_ids
if check_size is not None:
valid_chrs = [chrom for chrom, chr_size in self.len_dict.items() if chr_size >= check_size]
if len(valid_chrs) == 0:
raise Exception("SVs are too big for the reference!")
rand_id = valid_chrs[random.randint(0, len(valid_chrs) - 1)] if fixed_chrom is None else fixed_chrom
chr_len = self.len_dict[rand_id]
chr_event_ranges = self.event_ranges[rand_id]
assert rand_id is not None
return rand_id, chr_len, chr_event_ranges
def extract_vcf_event_intervals(self, vcf_path):
vcf = VariantFile(vcf_path)
for rec in vcf.fetch():
self.event_ranges[rec.chrom].append((rec.start, rec.stop))
def process_vcf(self, vcf_path):
# process vcf containing SVs to be added (deterministically) to reference
active_svs_total = 0
time_start_local = 0
vcf = VariantFile(vcf_path)
for rec in vcf.fetch():
svtype = Variant_Type(rec.info['SVTYPE']) if 'SVTYPE' in rec.info else Variant_Type(rec.id)
self.event_ranges[rec.chrom].append((rec.start, rec.stop)) | sv = Structural_Variant(sv_type=svtype, mode='fixed', vcf_rec=rec, ref_fasta=self.ref_fasta) | 3 | 2023-12-01 14:39:20+00:00 | 12k |
BiQiWHU/BWG | train_net.py | [
{
"identifier": "add_maskformer2_config",
"path": "mask2former/config.py",
"snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75"
},
{
"identifier": "COCOInstanceNewBaselineDatasetMapper",
"path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py",
"snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "COCOPanopticNewBaselineDatasetMapper",
"path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py",
"snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "MaskFormerInstanceDatasetMapper",
"path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py",
"snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "MaskFormerPanopticDatasetMapper",
"path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py",
"snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "MaskFormerSemanticDatasetMapper",
"path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py",
"snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict"
},
{
"identifier": "SemanticSegmentorWithTTA",
"path": "mask2former/test_time_augmentation.py",
"snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms"
},
{
"identifier": "InstanceSegEvaluator",
"path": "mask2former/evaluation/instance_evaluation.py",
"snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res"
}
] | from shapely.errors import ShapelyDeprecationWarning
from collections import OrderedDict
from typing import Any, Dict, List, Set
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, build_detection_train_loader
from detectron2.engine import (
DefaultTrainer,
default_argument_parser,
default_setup,
launch,
)
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
from detectron2.solver.build import maybe_add_gradient_clipping
from detectron2.utils.logger import setup_logger
from mask2former import (
COCOInstanceNewBaselineDatasetMapper,
COCOPanopticNewBaselineDatasetMapper,
InstanceSegEvaluator,
MaskFormerInstanceDatasetMapper,
MaskFormerPanopticDatasetMapper,
MaskFormerSemanticDatasetMapper,
SemanticSegmentorWithTTA,
add_maskformer2_config,
)
import warnings
import copy
import itertools
import logging
import os
import torch
import detectron2.utils.comm as comm | 10,728 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
MaskFormer Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
try:
# ignore ShapelyDeprecationWarning from fvcore
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
except:
pass
os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets'
# MaskFormer
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to MaskFormer.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each
builtin dataset. For your own dataset, you can simply create an
evaluator manually in your script and do not have to worry about the
hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
# semantic segmentation
if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
# instance segmentation
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# panoptic segmentation
if evaluator_type in [
"coco_panoptic_seg",
"ade20k_panoptic_seg",
"cityscapes_panoptic_seg",
"mapillary_vistas_panoptic_seg",
]:
if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON:
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
# COCO
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder))
# Mapillary Vistas
if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
MaskFormer Training Script.
This script is a simplified version of the training script in detectron2/tools.
"""
try:
# ignore ShapelyDeprecationWarning from fvcore
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning)
except:
pass
os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets'
# MaskFormer
class Trainer(DefaultTrainer):
"""
Extension of the Trainer class adapted to MaskFormer.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each
builtin dataset. For your own dataset, you can simply create an
evaluator manually in your script and do not have to worry about the
hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
# semantic segmentation
if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
# instance segmentation
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
# panoptic segmentation
if evaluator_type in [
"coco_panoptic_seg",
"ade20k_panoptic_seg",
"cityscapes_panoptic_seg",
"mapillary_vistas_panoptic_seg",
]:
if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON:
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
# COCO
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON:
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder))
# Mapillary Vistas
if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: | evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) | 7 | 2023-11-29 17:15:46+00:00 | 12k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.