repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
liebrandapps/FindMyGUI
api.py
[ { "identifier": "AirTag", "path": "airTag.py", "snippet": "class AirTag:\n\n def __init__(self, ctx, jsonFile=None):\n self.log = ctx.log\n self.cfg = ctx.cfg\n self.__id = uuid.uuid4().hex\n self._name = \"\"\n self._privateKey = None\n self._advertisementKey = None\n self._hashedKey = None\n self._needsSave = False\n self._lastSeen = None\n self._latitude = None\n self._longitude = None\n self._history = {}\n self._imgId = \"airtag\"\n if jsonFile is None:\n airTagDir = ctx.cfg.general_airTagDirectory\n airTagSuffix = ctx.cfg.general_airTagSuffix\n self.fileName = join(airTagDir, self.__id + airTagSuffix)\n self._needsSave = True\n else:\n self.fileName = jsonFile\n self.load(jsonFile)\n\n @property\n def id(self):\n return self.__id\n\n def load(self, jsonFile):\n with open(jsonFile) as f:\n dta = json.load(f)\n self._name = dta['name']\n self._privateKey = base64.b64decode(dta['privateKey'])\n self._advertisementKey = base64.b64decode(dta['advertisementKey'])\n s256 = hashlib.sha256()\n s256.update(self._advertisementKey)\n self._hashedKey = base64.b64encode(s256.digest()).decode(\"ascii\")\n if 'id' in dta.keys():\n self.__id = dta['id']\n else:\n self.save()\n if 'lastSeen' in dta.keys():\n self._lastSeen = dta['lastSeen']\n self._longitude = dta['longitude']\n self._latitude = dta['latitude']\n if 'history' in dta.keys():\n self._history = dta['history']\n if 'imgId' in dta.keys():\n self._imgId = dta['imgId']\n self.log.info(f\"Loaded AirTag [{self._name} / {self.__id}] from file {self.fileName}\")\n self._needsSave = False\n\n def save(self):\n toRemove = []\n cutOff = datetime.now() - timedelta(days=self.cfg.general_history)\n for h in self._history.keys():\n if int(h) < cutOff.timestamp():\n toRemove.append(h)\n for r in toRemove:\n del self._history[r]\n j = self.toJSON()\n with open(self.fileName, 'w') as f:\n print(j, file=f)\n self.log.info(f\"Saved AirTag [{self._name} / {self.__id}] to file {self.fileName}\")\n self._needsSave = False\n\n @property\n def needsSave(self):\n return self._needsSave\n\n def toJSON(self):\n return json.dumps(self.toDict(), indent=4)\n\n def toDict(self):\n return {'name': self._name,\n 'privateKey': base64.b64encode(self._privateKey).decode('ascii'),\n 'advertisementKey': base64.b64encode(self._advertisementKey).decode('ascii'),\n 'lastSeen': self._lastSeen,\n 'longitude': self._longitude,\n 'latitude': self._latitude,\n 'history': self._history,\n 'imgId': self._imgId,\n 'id': self.id}\n\n def resolveTag(self, tag):\n value = \"notFound\"\n if tag == '##NAME##':\n value = self._name\n if tag == '##ID##':\n value = self.id\n if tag == '##LASTSEEN##':\n if self._lastSeen is None or int(self._lastSeen) == 0:\n value = \"Never\"\n else:\n value = datetime.utcfromtimestamp(self._lastSeen).strftime('%H:%M:%S %d.%m.%Y')\n return value\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._needsSave = self._needsSave or (value != self._name)\n self._name = value\n\n @property\n def privateKey(self):\n return base64.b64encode(self._privateKey).decode('ascii')\n\n @privateKey.setter\n def privateKey(self, value):\n v = base64.b64decode(value)\n self._needsSave = self._needsSave or (v != self._privateKey)\n self._privateKey = v\n\n @property\n def advertisementKey(self):\n return base64.b64encode(self._advertisementKey).decode('ascii')\n\n @advertisementKey.setter\n def advertisementKey(self, value):\n v = base64.b64decode(value)\n self._needsSave = self._needsSave or (v != self._advertisementKey)\n self._advertisementKey = v\n\n @property\n def hashedAdvKey(self):\n return self._hashedKey\n\n @property\n def lastSeen(self):\n return self._lastSeen\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n def updateLocation(self, when, latitude, longitude):\n if self._lastSeen is None or when > self._lastSeen:\n self._longitude = longitude\n self._latitude = latitude\n self._lastSeen = when\n self._history[when] = {'lat': latitude, 'lon': longitude}\n self._needsSave = True\n\n @property\n def history(self):\n return self._history\n\n @property\n def imgId(self):\n return self._imgId\n\n @imgId.setter\n def imgId(self, value):\n self._needsSave = self._needsSave or value != self.imgId\n self._imgId = value" }, { "identifier": "FindMy", "path": "findmy/request_reports.py", "snippet": "class FindMy:\n\n def __init__(self, ctx):\n self.ctx = ctx\n\n def sha256(self, data):\n digest = hashlib.new(\"sha256\")\n digest.update(data)\n return digest.digest()\n\n def decrypt(self, enc_data, algorithm_dkey, mode):\n decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor()\n return decryptor.update(enc_data) + decryptor.finalize()\n\n def decode_tag(self, data):\n latitude = struct.unpack(\">i\", data[0:4])[0] / 10000000.0\n longitude = struct.unpack(\">i\", data[4:8])[0] / 10000000.0\n confidence = int.from_bytes(data[8:9], 'big')\n status = int.from_bytes(data[9:10], 'big')\n return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status}\n\n def getAuth(self, regenerate=False, second_factor='sms'):\n CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + \"/auth.json\"\n if os.path.exists(CONFIG_PATH) and not regenerate:\n with open(CONFIG_PATH, \"r\") as f:\n j = json.load(f)\n else:\n mobileme = None\n try:\n mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor)\n except requests.exceptions.ConnectionError as e:\n msg = f\"[ICLOUD] Anisette Server not running: {str(e)}\"\n self.ctx.errMsg = msg\n self.ctx.log.error(msg)\n if mobileme is None:\n return None\n j = {'dsid': mobileme['dsid'],\n 'searchPartyToken': mobileme['delegates']['com.apple.mobileme']['service-data']['tokens'][\n 'searchPartyToken']}\n with open(CONFIG_PATH, \"w\") as f:\n json.dump(j, f)\n return (j['dsid'], j['searchPartyToken'])\n\n def retrieveLocations(self):\n privkeys = {}\n names = {}\n for tag in self.ctx.airtags.values():\n hashedKey = tag.hashedAdvKey\n privkeys[hashedKey] = tag.privateKey\n names[hashedKey] = tag.name\n\n unixEpoch = int(datetime.datetime.now().strftime('%s'))\n startdate = unixEpoch - (60 * 60 * 24)\n data = {\"search\": [{\"startDate\": startdate * 1000, \"endDate\": unixEpoch * 1000, \"ids\": list(names.keys())}]}\n\n auth = self.getAuth(regenerate=False,\n second_factor='trusted_device' if self.ctx.cfg.general_trustedDevice else 'sms')\n if auth is None:\n return\n r = requests.post(\"https://gateway.icloud.com/acsnservice/fetch\",\n auth=auth,\n headers=generate_anisette_headers(self.ctx.cfg.general_anisetteHost+\":\"+str(self.ctx.cfg.general_anisettePort)),\n json=data)\n res = json.loads(r.content.decode())['results']\n self.ctx.log.info(f'{r.status_code}: {len(res)} reports received.')\n\n ordered = []\n found = set()\n for report in res:\n priv = int.from_bytes(base64.b64decode(privkeys[report['id']]), 'big')\n data = base64.b64decode(report['payload'])\n\n # the following is all copied from https://github.com/hatomist/openhaystack-python, thanks @hatomist!\n timestamp = int.from_bytes(data[0:4], 'big') + 978307200\n if timestamp >= startdate:\n eph_key = ec.EllipticCurvePublicKey.from_encoded_point(ec.SECP224R1(), data[5:62])\n shared_key = ec.derive_private_key(priv, ec.SECP224R1(), default_backend()).exchange(ec.ECDH(), eph_key)\n symmetric_key = self.sha256(shared_key + b'\\x00\\x00\\x00\\x01' + data[5:62])\n decryption_key = symmetric_key[:16]\n iv = symmetric_key[16:]\n enc_data = data[62:72]\n tag = data[72:]\n\n decrypted = self.decrypt(enc_data, algorithms.AES(decryption_key), modes.GCM(iv, tag))\n tag = self.decode_tag(decrypted)\n tag['timestamp'] = timestamp\n tag['isodatetime'] = datetime.datetime.fromtimestamp(timestamp).isoformat()\n tag['key'] = names[report['id']]\n tag['goog'] = 'https://maps.google.com/maps?q=' + str(tag['lat']) + ',' + str(tag['lon'])\n for t in self.ctx.airtags.values():\n if report['id'] == t.hashedAdvKey:\n t.updateLocation(timestamp, tag['lat'], tag['lon'])\n found.add(tag['key'])\n ordered.append(tag)\n self.ctx.log.info(f'{len(ordered)} reports used.')\n ordered.sort(key=lambda item: item.get('timestamp'))\n for rep in ordered: print(rep)\n for t in self.ctx.airtags.values():\n if t.needsSave:\n t.save()\n self.ctx.log.info(f'found: {list(found)}')\n self.ctx.log.info(f'missing: {[key for key in names.values() if key not in found]}')\n self.ctx.signInDone = True\n self.ctx.lastLocationUpdate = int(datetime.datetime.now().timestamp())" } ]
import json import time import requests.exceptions from airTag import AirTag from findmy.request_reports import FindMy
3,610
""" Mark Liebrand 2024 This file is part of FindMyGUI which is released under the Apache 2.0 License See file LICENSE or go to for full license details https://github.com/liebrandapps/FindMyGUI """ class API: def __init__(self, ctx): self.ctx = ctx self.log = ctx.log def call(self, cmd, params=None): self.log.debug(f"[API] Handling API command <{cmd}>") result = {} if cmd == "listTags": result = self._listTags() if cmd == 'getPos': result = self._getPos() if cmd == 'refresh': result = self._refresh() if cmd == 'getTagData': result = self._getTagData(params['id'][0]) if cmd == 'editTag': result = self._editTag(params['id'][0], params['name'][0], params['privateKey'][0], params['advertisementKey'][0], params['imgId'][0]) if cmd == 'addTag': result = self._addTag(params['id'][0], params['name'][0], params['privateKey'][0], params['advertisementKey'][0], params['imgId'][0]) if cmd == 'signInStatus': result = self._signInStatus(int(params['timeStamp'][0])) if cmd == 'creds': result = self._creds(params['userName'][0], params['password'][0]) if cmd == 'auth': result = self._auth(params['ndFactor'][0]) if cmd == 'lastLocationUpdate': result = self._lastLocationUpdate() return json.dumps(result if result is not None else {}) def _listTags(self): dct = {} for id in self.ctx.airtags.keys(): dct[id] = self.ctx.airtags[id].toDict() return dct def _getPos(self): findMy = FindMy(self.ctx) data = findMy.retrieveLocations() return data def _refresh(self): self.ctx.signInDone = False findMy = FindMy(self.ctx) try: data = findMy.retrieveLocations() except requests.exceptions.ConnectTimeout as e: msg = f"[API] Anisette Server not running: {str(e)}" self.ctx.errMsg = msg self.ctx.log.error(msg) data = {"status": "fail", "msg": msg} return data def _getTagData(self, id): self.log.debug(f"[API] Cmds' getTagData parameter is id={id}") if id in self.ctx.airtags.keys(): tag = self.ctx.airtags[id] dct = tag.toDict() dct['status'] = 'ok' else: dct = {'status': 'fail', 'msg': 'tag not found', 'id': id} return dct def _editTag(self, id, name, privKey, advKey, imgId): self.log.debug(f"[API] Cmds' editTag parameter are id={id}, name={name}, private Key={privKey}, " f"advertisementKey={advKey}") if id in self.ctx.airtags.keys(): tag = self.ctx.airtags[id] tag.name = name tag.privateKey = privKey tag.advertisementKey = advKey tag.imgId = imgId if tag.needsSave: tag.save() dct = {'status': 'ok', 'dataChanged': str(tag.needsSave)} else: dct = {'status': 'fail', 'msg': 'tag not found', 'id': id} return dct def _addTag(self, id, name, privKey, advKey, imgId): self.log.debug(f"[API] Cmds' addTag parameter are id={id}, name={name}, private Key={privKey}, " f"advertisementKey={advKey}")
""" Mark Liebrand 2024 This file is part of FindMyGUI which is released under the Apache 2.0 License See file LICENSE or go to for full license details https://github.com/liebrandapps/FindMyGUI """ class API: def __init__(self, ctx): self.ctx = ctx self.log = ctx.log def call(self, cmd, params=None): self.log.debug(f"[API] Handling API command <{cmd}>") result = {} if cmd == "listTags": result = self._listTags() if cmd == 'getPos': result = self._getPos() if cmd == 'refresh': result = self._refresh() if cmd == 'getTagData': result = self._getTagData(params['id'][0]) if cmd == 'editTag': result = self._editTag(params['id'][0], params['name'][0], params['privateKey'][0], params['advertisementKey'][0], params['imgId'][0]) if cmd == 'addTag': result = self._addTag(params['id'][0], params['name'][0], params['privateKey'][0], params['advertisementKey'][0], params['imgId'][0]) if cmd == 'signInStatus': result = self._signInStatus(int(params['timeStamp'][0])) if cmd == 'creds': result = self._creds(params['userName'][0], params['password'][0]) if cmd == 'auth': result = self._auth(params['ndFactor'][0]) if cmd == 'lastLocationUpdate': result = self._lastLocationUpdate() return json.dumps(result if result is not None else {}) def _listTags(self): dct = {} for id in self.ctx.airtags.keys(): dct[id] = self.ctx.airtags[id].toDict() return dct def _getPos(self): findMy = FindMy(self.ctx) data = findMy.retrieveLocations() return data def _refresh(self): self.ctx.signInDone = False findMy = FindMy(self.ctx) try: data = findMy.retrieveLocations() except requests.exceptions.ConnectTimeout as e: msg = f"[API] Anisette Server not running: {str(e)}" self.ctx.errMsg = msg self.ctx.log.error(msg) data = {"status": "fail", "msg": msg} return data def _getTagData(self, id): self.log.debug(f"[API] Cmds' getTagData parameter is id={id}") if id in self.ctx.airtags.keys(): tag = self.ctx.airtags[id] dct = tag.toDict() dct['status'] = 'ok' else: dct = {'status': 'fail', 'msg': 'tag not found', 'id': id} return dct def _editTag(self, id, name, privKey, advKey, imgId): self.log.debug(f"[API] Cmds' editTag parameter are id={id}, name={name}, private Key={privKey}, " f"advertisementKey={advKey}") if id in self.ctx.airtags.keys(): tag = self.ctx.airtags[id] tag.name = name tag.privateKey = privKey tag.advertisementKey = advKey tag.imgId = imgId if tag.needsSave: tag.save() dct = {'status': 'ok', 'dataChanged': str(tag.needsSave)} else: dct = {'status': 'fail', 'msg': 'tag not found', 'id': id} return dct def _addTag(self, id, name, privKey, advKey, imgId): self.log.debug(f"[API] Cmds' addTag parameter are id={id}, name={name}, private Key={privKey}, " f"advertisementKey={advKey}")
tag = AirTag(self.ctx)
0
2023-12-16 12:39:52+00:00
8k
Samuel-Effiong/Django-Dynamic-Table
django_dynamic_table/tests.py
[ { "identifier": "DynamicTable", "path": "django_dynamic_table/models.py", "snippet": "class DynamicTable(models.Model):\r\n\r\n table_name = models.CharField(_('Table Name'), max_length=255, unique=True)\r\n table_description = models.TextField(_('Table Description'), blank=True)\r\n date_created = models.DateTimeField(_('Date Created'), default=timezone.now)\r\n\r\n table_columns = models.ManyToManyField('TableColumn', blank=True)\r\n table_rows = models.ManyToManyField('TableRow', blank=True)\r\n\r\n class Meta:\r\n ordering = ('-date_created', )\r\n\r\n def __str__(self) -> str:\r\n return f\"{self.table_name}\"\r\n\r\n def __total_table_rows(self) -> int:\r\n field = self.table_columns.first()\r\n\r\n if field and isinstance(field, TableColumn):\r\n return self.table_columns.all().count()\r\n else:\r\n # the table is empty\r\n return 0\r\n\r\n def __total_table_columns(self) -> int:\r\n return self.table_columns.all().count()\r\n\r\n def table_info(self) -> dict[str, int]:\r\n description = {\r\n 'rows': self.__total_table_rows(),\r\n 'columns': self.__total_table_columns()\r\n }\r\n return description\r\n\r\n def is_empty(self) -> bool:\r\n table_info = self.table_info()\r\n\r\n rows = table_info['rows']\r\n columns = table_info['columns']\r\n\r\n return True if columns == 0 or rows == 0 else False\r\n\r\n def is_column(self, column_name: str) -> bool:\r\n if not isinstance(column_name, str):\r\n raise ValueError(\"column name must be a str\")\r\n\r\n try:\r\n column = self.table_columns.get(column_name=column_name)\r\n return True\r\n except TableColumn.DoesNotExist:\r\n return False\r\n\r\n def get_supported_data_types(self) -> list[str]:\r\n return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__]\r\n\r\n def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]:\r\n supported_data_types = self.get_supported_data_types()\r\n\r\n if isinstance(data_type, str):\r\n return data_type.lower().strip() in supported_data_types\r\n elif isinstance(data_type, (list, tuple, set)):\r\n return [_type.lower().strip() in supported_data_types for _type in data_type]\r\n else:\r\n raise ValueError('arg must be either a str or a sequence')\r\n\r\n def add_column(self, column_name: str, data_type: str):\r\n if isinstance(column_name, str) and isinstance(data_type, str):\r\n if not self.data_type_is_supported(data_type):\r\n raise UnSupportedDataType()\r\n if self.is_column(column_name):\r\n raise DuplicateColumnInTable()\r\n\r\n table_column = TableColumn(\r\n table=self,\r\n column_name=column_name,\r\n column_data_type=data_type\r\n )\r\n table_column.save()\r\n\r\n self.table_columns.add(table_column)\r\n\r\n return table_column\r\n else:\r\n raise DynamicTableError(\"argument must be str, use self.bulk_add_columns to add multiple columns\")\r\n\r\n def bulk_add_columns(self, column_names: Sequence[str], data_types: Sequence[str]):\r\n allowed_argument_type = (list, tuple, set)\r\n if isinstance(column_names, allowed_argument_type) and isinstance(data_types, allowed_argument_type):\r\n if len(column_names) != len(data_types):\r\n raise DynamicTableError(f\"len({column_names}) = {len(column_names)} != len({data_types}) = {len(data_types)}\")\r\n else:\r\n # check if list of data_types contains any unsupported data type\r\n supported_data_type = self.data_type_is_supported(data_types)\r\n if False in supported_data_type:\r\n raise UnSupportedDataType(f\"{data_types} data type that are supported are: {supported_data_type}\")\r\n else:\r\n # check if the provided column names contain duplicates, raise an error if it does\r\n unique_column_names = set(column_names)\r\n if len(column_names) != len(unique_column_names):\r\n raise DuplicateColumnInTable()\r\n\r\n is_column = [self.is_column(column) for column in column_names]\r\n\r\n if True in is_column:\r\n raise DuplicateColumnInTable()\r\n\r\n columns = [\r\n TableColumn.objects.create(\r\n table=self,\r\n column_name=column_name,\r\n column_data_type=data_type\r\n )\r\n\r\n for column_name, data_type in zip(column_names, data_types, strict=True)\r\n # the above further exception should not be activated, but adding it there,\r\n # if just in case, for some unknown reason it escape the other safeguard.\r\n ]\r\n self.table_columns.add(*columns)\r\n return columns\r\n else:\r\n raise DynamicTableError(\"argument must be a sequence. use self.add_column to add a single column\")\r\n\r\n def add_row(self, value: dict):\r\n if not isinstance(value, dict):\r\n raise ValueError(f\"{value} is not a list or a dict\")\r\n\r\n if self.__total_table_columns() == 0:\r\n raise TableHaveNoColumn()\r\n\r\n row = []\r\n table_row = TableRow.objects.create(table=self)\r\n\r\n for table_column in self.table_columns.all():\r\n cell_value = value.get(table_column.column_name, \"\")\r\n\r\n cell = CellValue.objects.create(\r\n value=cell_value, table=self,\r\n table_column=table_column,\r\n table_row=table_row\r\n )\r\n row.append(cell)\r\n\r\n # add cell to column\r\n table_column.column_cells.add(cell)\r\n\r\n # add cell to row\r\n table_row.row_cells.add(*row)\r\n\r\n # add row to table\r\n self.table_rows.add(table_row)\r\n return table_row\r\n\r\n def bulk_add_rows(self, values: Sequence[dict]) -> list:\r\n if not isinstance(values, (list, tuple, set)):\r\n raise ValueError('values must be a sequence of dict')\r\n\r\n rows = []\r\n for row in values:\r\n if not isinstance(row, dict):\r\n raise ValueError('values must be a sequence of dict')\r\n if self.__total_table_columns() == 0:\r\n raise TableHaveNoColumn()\r\n\r\n rows.append(self.add_row(row))\r\n return rows\r\n\r\n def delete_column(self, column_name):\r\n # using get instead of filter if for some reason the unique parameter\r\n # was disabled in the table column definition, this will doubly ensure\r\n # that the field are unique else it will always raise an error if it\r\n # encounter duplicates column names\r\n\r\n if not isinstance(column_name, str):\r\n raise ValueError('column_name must be a str')\r\n try:\r\n column = self.table_columns.get(column_name=column_name)\r\n except TableColumn.MultipleObjectsReturned:\r\n raise DuplicateColumnInTable()\r\n except TableColumn.DoesNotExist:\r\n raise ColumnNotInTable()\r\n else:\r\n # remove column from the table\r\n self.table_columns.remove(column)\r\n\r\n # delete the removed column and all the cells associated with it\r\n column.delete()\r\n return column\r\n\r\n def delete_row(self, row_index=None):\r\n \"\"\"if row_index is None remove the last row\"\"\"\r\n\r\n if not isinstance(row_index, (int, type(None))):\r\n raise TypeError(\"Row index value must be an integer\")\r\n\r\n try:\r\n if row_index is None:\r\n row = self.table_rows.last()\r\n else:\r\n row = self.table_rows.get(pk=row_index)\r\n except TableRow.DoesNotExist:\r\n raise RowNotInTable()\r\n else:\r\n # remove row from the table\r\n self.table_rows.remove(row)\r\n\r\n # delete the removed row and all the cells associated with it\r\n row.delete()\r\n return row\r\n\r\n def get_cell(self, column_name, row_index):\r\n if isinstance(row_index, str):\r\n row_index = int(row_index)\r\n if not self.is_column(column_name):\r\n raise ColumnNotInTable()\r\n try:\r\n cell = CellValue.objects.get(\r\n table=self,\r\n table_column__column_name=column_name,\r\n table_row_id=row_index\r\n )\r\n return cell\r\n except CellValue.DoesNotExist:\r\n raise CellDoesNotExist\r\n\r\n def get_column_cells(self, column_name):\r\n if not self.is_column(column_name):\r\n raise ColumnNotInTable()\r\n\r\n column = TableColumn.objects.get(table=self, column_name=column_name)\r\n column_cells = column.column_cells.all()\r\n\r\n return list(column_cells)\r\n\r\n def get_row_cells(self, row_index):\r\n if isinstance(row_index, str):\r\n row_index = int(row_index)\r\n\r\n try:\r\n row = TableRow.objects.get(table=self, id=row_index)\r\n row_cells = row.row_cells.all()\r\n except TableRow.DoesNotExist:\r\n raise RowNotInTable()\r\n\r\n return list(row_cells)\r" }, { "identifier": "TableColumn", "path": "django_dynamic_table/models.py", "snippet": "class TableColumn(models.Model):\r\n table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)\r\n\r\n column_name = models.CharField(max_length=255, unique=True)\r\n column_data_type = models.CharField(max_length=15, choices=__SUPPORTED_DATA_TYPE_CHOICES__)\r\n\r\n column_cells = models.ManyToManyField('CellValue', blank=True)\r\n\r\n def __str__(self):\r\n return f\"{self.column_name}: {self.column_data_type} -- {self.table}\"\r\n\r\n def _get_column_values(self):\r\n return self.column_cells.all()\r" }, { "identifier": "TableRow", "path": "django_dynamic_table/models.py", "snippet": "class TableRow(models.Model):\r\n table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)\r\n row_cells = models.ManyToManyField('CellValue', blank=True)\r\n\r\n def __str__(self):\r\n return f\"{self.table} Table: Row no. {self.id}\"\r\n\r\n def to_dict(self):\r\n values = {\r\n item.column.column_name: item.value\r\n for item in self.row_cells.all()\r\n }\r\n return values\r" }, { "identifier": "CellValue", "path": "django_dynamic_table/models.py", "snippet": "class CellValue(models.Model):\r\n \"\"\"Synonymous with the cell in a spreadsheet, it contains the value of the\r\n table along with relevant information about it position in the table\"\"\"\r\n value = models.TextField(blank=True)\r\n\r\n table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)\r\n table_column = models.ForeignKey(TableColumn, on_delete=models.CASCADE)\r\n table_row = models.ForeignKey(TableRow, blank=True, on_delete=models.CASCADE)\r\n\r\n def __str__(self):\r\n return self.value\r\n \r\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\r\n self.full_clean()\r\n super(CellValue, self).save()\r\n\r\n def clean(self):\r\n super(CellValue, self).clean()\r\n self.__validate_data_type__(self.value, self.table_column.column_data_type)\r\n\r\n def __validate_data_type__(self, value, data_type):\r\n \"\"\"\r\n Ensures that the values is saved in the database in the format that\r\n can be easily be converted to the desired data type\r\n \"\"\"\r\n if data_type == 'char' or data_type == 'textfield':\r\n self.value = str(value)\r\n elif data_type == 'int':\r\n if not isinstance(value, int):\r\n try:\r\n if value:\r\n self.value = int(float(value))\r\n else:\r\n self.value = \"\"\r\n except ValueError:\r\n raise CantParseValueToDataType(f\"{value} to {data_type}\")\r\n elif data_type == 'float':\r\n if not isinstance(value, float):\r\n try:\r\n if value:\r\n self.value = float(value)\r\n else:\r\n self.value = \"\"\r\n except ValueError:\r\n raise CantParseValueToDataType(f\"{value} to {data_type}\")\r\n elif data_type == 'datetime':\r\n if value:\r\n # is it a str or a datetime object\r\n if isinstance(value, str):\r\n try:\r\n value = self.value.strip().lower()\r\n value = datetime.fromisoformat(value)\r\n self.value = value.isoformat()\r\n except ValueError:\r\n self.value = \"\"\r\n else:\r\n try:\r\n self.value = value.isoformat()\r\n except Exception:\r\n self.value = ''\r\n else:\r\n self.value = \"\"\r\n elif data_type == 'bool':\r\n if value:\r\n if not isinstance(value, bool):\r\n value = str(value).strip().title()\r\n if value == 'True' or value == 'False':\r\n self.value = eval(value)\r\n else:\r\n raise CantParseValueToDataType(f\"{value} to {data_type}\")\r\n else:\r\n self.value = \"\"\r\n\r\n def get_value(self):\r\n \"\"\"Get the value base on the data type\r\n\r\n If the data type is of file, it will retrieve the file from where\r\n it was uploaded, else format the value to the data type.\r\n\r\n The value should not be accessed directly.\r\n \"\"\"\r\n\r\n data_type = self.table_column.column_data_type\r\n\r\n if data_type == 'char' or data_type == 'textfield':\r\n return self.value\r\n elif data_type == 'int':\r\n try:\r\n return int(float(self.value))\r\n except ValueError:\r\n return self.value\r\n elif data_type == 'float':\r\n try:\r\n return float(self.value)\r\n except ValueError:\r\n return self.value\r\n elif data_type == 'bool':\r\n try:\r\n # FIXME: Put more restrictions on this\r\n return eval(self.value)\r\n except Exception:\r\n return self.value\r\n elif data_type == 'datetime':\r\n try:\r\n return datetime.fromisoformat(self.value)\r\n except ValueError:\r\n return self.value\r" }, { "identifier": "TableHaveNoRow", "path": "django_dynamic_table/errors.py", "snippet": "class TableHaveNoRow(DynamicTableError):\r\n pass\r" }, { "identifier": "TableHaveNoColumn", "path": "django_dynamic_table/errors.py", "snippet": "class TableHaveNoColumn(DynamicTableError):\r\n pass\r" }, { "identifier": "ColumnNotInTable", "path": "django_dynamic_table/errors.py", "snippet": "class ColumnNotInTable(DynamicTableError):\r\n pass\r" }, { "identifier": "RowNotInTable", "path": "django_dynamic_table/errors.py", "snippet": "class RowNotInTable(DynamicTableError):\r\n pass\r" }, { "identifier": "DuplicateColumnInTable", "path": "django_dynamic_table/errors.py", "snippet": "class DuplicateColumnInTable(DynamicTableError):\r\n pass\r" }, { "identifier": "DynamicTableError", "path": "django_dynamic_table/errors.py", "snippet": "class DynamicTableError(Exception):\r\n pass\r" }, { "identifier": "UnSupportedDataType", "path": "django_dynamic_table/errors.py", "snippet": "class UnSupportedDataType(TableColumnError):\r\n pass\r" }, { "identifier": "CantParseValueToDataType", "path": "django_dynamic_table/errors.py", "snippet": "class CantParseValueToDataType(CellValueError):\r\n pass\r" }, { "identifier": "CellDoesNotExist", "path": "django_dynamic_table/errors.py", "snippet": "class CellDoesNotExist(CellValueError):\r\n pass" } ]
import datetime from typing import List from django.test import TestCase from django.utils import timezone from .models import DynamicTable, TableColumn, TableRow, CellValue from .errors import ( TableHaveNoRow, TableHaveNoColumn, ColumnNotInTable, RowNotInTable, DuplicateColumnInTable, DynamicTableError, UnSupportedDataType, CantParseValueToDataType, CellDoesNotExist )
4,108
# Create your tests here. class DynamicTableTest(TestCase): def setUp(self) -> None: self.name = 'Employee Records' self.description = "Contains company employee personal information" self.date_created = timezone.now().date() self.column_name = 'First Name' self.data_type = 'char' self.supported_data_type = ['int', 'char', 'textfield', 'float', 'bool', 'date'] self.table = DynamicTable( table_name=self.name, table_description=self.description ) self.table.save() def test_table_creation_with_no_columns_and_rows(self): self.assertEqual(self.name, str(self.table)) self.assertEqual(self.description, self.table.table_description) self.assertEqual(self.date_created, self.table.date_created.date()) default_value = { 'rows': 0, 'columns': 0 } self.assertDictEqual(default_value, self.table.table_info()) # Delete columns test self.assertRaises(ColumnNotInTable, self.table.delete_column, column_name='Name') # Delete rows test self.assertRaises(RowNotInTable, self.table.delete_row, row_index=1) self.assertRaises(TypeError, self.table.delete_row, row_index='1') self.assertTrue(self.table.is_empty()) # ensures that rows can't be added to an empty table self.assertRaises(TableHaveNoColumn, self.table.add_row, value={}) self.assertRaises(ValueError, self.table.add_row, value='love') self.assertRaises(ValueError, self.table.add_row, value=[1, 2, 3]) self.assertRaises(ValueError, self.table.add_row, value=(1, 2, 3)) self.assertRaises(TableHaveNoColumn, self.table.bulk_add_rows, values=[{}, {}]) self.assertRaises(ValueError, self.table.bulk_add_rows, values={}) self.assertRaises(ValueError, self.table.bulk_add_rows, values='love') self.assertRaises(ValueError, self.table.bulk_add_rows, values=(1, 2)) self.assertRaises(ValueError, self.table.bulk_add_rows, values=[1, '2']) def test_supported_data_types(self): self.assertListEqual(sorted(self.supported_data_type), sorted(self.table.get_supported_data_types())) self.assertTrue(self.table.data_type_is_supported(' CHAR')) self.assertTrue(self.table.data_type_is_supported('DaTe ')) self.assertTrue(self.table.data_type_is_supported(' bool ')) self.assertFalse(self.table.data_type_is_supported('File')) self.assertFalse(self.table.data_type_is_supported('timE')) self.assertIsInstance(self.table.data_type_is_supported(['file', 'char']), list) self.assertListEqual([True, False, True, False], self.table.data_type_is_supported(['cHar', 'file', 'DATE', 'time'])) self.assertListEqual([True, True], self.table.data_type_is_supported(['cHar', 'DATE',])) self.assertListEqual([False, False], self.table.data_type_is_supported(['File', 'time'])) def test_adding_column_with_incorrect_parameters(self):
# Create your tests here. class DynamicTableTest(TestCase): def setUp(self) -> None: self.name = 'Employee Records' self.description = "Contains company employee personal information" self.date_created = timezone.now().date() self.column_name = 'First Name' self.data_type = 'char' self.supported_data_type = ['int', 'char', 'textfield', 'float', 'bool', 'date'] self.table = DynamicTable( table_name=self.name, table_description=self.description ) self.table.save() def test_table_creation_with_no_columns_and_rows(self): self.assertEqual(self.name, str(self.table)) self.assertEqual(self.description, self.table.table_description) self.assertEqual(self.date_created, self.table.date_created.date()) default_value = { 'rows': 0, 'columns': 0 } self.assertDictEqual(default_value, self.table.table_info()) # Delete columns test self.assertRaises(ColumnNotInTable, self.table.delete_column, column_name='Name') # Delete rows test self.assertRaises(RowNotInTable, self.table.delete_row, row_index=1) self.assertRaises(TypeError, self.table.delete_row, row_index='1') self.assertTrue(self.table.is_empty()) # ensures that rows can't be added to an empty table self.assertRaises(TableHaveNoColumn, self.table.add_row, value={}) self.assertRaises(ValueError, self.table.add_row, value='love') self.assertRaises(ValueError, self.table.add_row, value=[1, 2, 3]) self.assertRaises(ValueError, self.table.add_row, value=(1, 2, 3)) self.assertRaises(TableHaveNoColumn, self.table.bulk_add_rows, values=[{}, {}]) self.assertRaises(ValueError, self.table.bulk_add_rows, values={}) self.assertRaises(ValueError, self.table.bulk_add_rows, values='love') self.assertRaises(ValueError, self.table.bulk_add_rows, values=(1, 2)) self.assertRaises(ValueError, self.table.bulk_add_rows, values=[1, '2']) def test_supported_data_types(self): self.assertListEqual(sorted(self.supported_data_type), sorted(self.table.get_supported_data_types())) self.assertTrue(self.table.data_type_is_supported(' CHAR')) self.assertTrue(self.table.data_type_is_supported('DaTe ')) self.assertTrue(self.table.data_type_is_supported(' bool ')) self.assertFalse(self.table.data_type_is_supported('File')) self.assertFalse(self.table.data_type_is_supported('timE')) self.assertIsInstance(self.table.data_type_is_supported(['file', 'char']), list) self.assertListEqual([True, False, True, False], self.table.data_type_is_supported(['cHar', 'file', 'DATE', 'time'])) self.assertListEqual([True, True], self.table.data_type_is_supported(['cHar', 'DATE',])) self.assertListEqual([False, False], self.table.data_type_is_supported(['File', 'time'])) def test_adding_column_with_incorrect_parameters(self):
self.assertRaises(DynamicTableError, self.table.add_column, ['first name'], ['char'])
9
2023-12-19 15:50:38+00:00
8k
mohame54/Speech-Transcriber-App
whisper/whisper.py
[ { "identifier": "Inference", "path": "whisper/decoding.py", "snippet": "class Inference:\n \"\"\"\n Class for handling sequence generation inference.\n\n Attributes:\n encoder: ONNX runtime inference session for the encoder.\n decoder: ONNX runtime inference session for the decoder.\n _mode: Language mode (\"English\" or \"Arabic\").\n \"\"\"\n def __init__(\n self,\n encoder_path: str,\n decoder_path: str,\n mode: Optional[str] = \"English\"\n ):\n \"\"\"\n Initializes the Inference instance.\n\n Parameters:\n encoder_path: Path to the encoder model.\n decoder_path: Path to the decoder model.\n mode: Language mode (\"English\" or \"Arabic\").\n \"\"\"\n options = onnxruntime.SessionOptions()\n providers = [\"CPUExecutionProvider\"]\n options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL\n self.encoder = onnxruntime.InferenceSession(\n encoder_path, sess_options=options, providers=providers)\n self.decoder = onnxruntime.InferenceSession(\n decoder_path, sess_options=options, providers=providers)\n self._mode = mode\n self.reset()\n \n def reset(self):\n self.cross_k_cache = None\n self.cross_v_cache = None\n\n def encode(self, feats):\n _, cross_k_cache, cross_v_cache = self.encoder.run(None, {\"mel\":feats}) \n self.cross_k_cache = cross_k_cache\n self.cross_v_cache = cross_v_cache\n\n def get_inits(self):\n lang_id = ENGLISH_ID if self._mode == \"English\" else ARABIC_ID\n k_caches = np.zeros((6, 1, 8, 1, 64)).astype(np.float32)\n v_caches = np.zeros((6, 1, 8, 1, 64)).astype(np.float32)\n tokens = [50258, lang_id, 50359, 50363]\n hyp: Hypothesis = Hypothesis(\n tokens,\n k_caches,\n v_caches,\n )\n return hyp \n\n def set_mode(self, mode: str):\n self._mode = mode \n\n def __call__(\n self,\n hyp: Hypothesis,\n initial: Optional[bool] = False,\n ) -> Tuple[np.ndarray]:\n \"\"\"\n Generates logits for the given hypothesis using the encoder and decoder.\n\n Parameters:\n hyp: The hypothesis.\n initial: Whether it's the initial generation or not.\n\n Returns:\n np.ndarray: Logits for the hypothesis.\n np.ndarray: keys caches for inference.\n np.ndarray: values caches for inference.\n \"\"\"\n if initial:\n tokens = np.array(hyp.tokens)\n else:\n tokens = np.array([hyp.tokens[-1]]) \n tokens = np.expand_dims(tokens, axis=0).astype(np.int32)\n ort_inputs = {\n \"tokens\":tokens,\n \"self_k_caches\":hyp.k_caches,\n \"self_v_caches\":hyp.v_caches,\n \"cross_k_caches\":self.cross_k_cache,\n \"cross_v_caches\":self.cross_v_cache,\n } \n outs = self.decoder.run(None, ort_inputs) \n # update the internal variables\n # update the k an v states \n k_caches = outs[1] \n v_caches = outs[2]\n return (outs[0][:, -1: ,:]).squeeze(), k_caches, v_caches " }, { "identifier": "GreedyDecoding", "path": "whisper/decoding.py", "snippet": "class GreedyDecoding(Decoding):\n \"\"\"\n Greedy decoding strategy for sequence generation.\n\n Attributes:\n inference: The inference instance.\n eos_id: End-of-sequence token ID.\n temperature: Temperature parameter for softmax.\n top_p: Top-p sampling parameter.\n \"\"\"\n\n def __init__(\n self,\n inference,\n eos_id,\n temperature=0.9,\n top_p=0.95\n ):\n \"\"\"\n Initializes the GreedyDecoding instance.\n\n Parameters:\n inference: The inference instance.\n eos_id: End-of-sequence token ID.\n temperature (float): Temperature parameter for softmax.\n top_p (float): Top-p sampling parameter.\n \"\"\"\n super().__init__(inference)\n self.eos_id = eos_id\n self.temperature = temperature\n self.top_p = top_p\n\n def update(self, logits: np.ndarray, hyp: Hypothesis):\n \"\"\"\n Updates a hypothesis based on logits using the greedy decoding strategy.\n\n Parameters:\n logits: The logits from the model.\n hyp: The hypothesis.\n\n Returns:\n Hypothesis: Updated hypothesis.\n \"\"\"\n logits = logits.reshape(-1,)\n if self.temperature == 0 or self.temperature == 1.0:\n next_token = logits.argmax(axis=-1)\n else:\n probs = softmax(logits / self.temperature)\n next_token = sample_top_p(probs, self.top_p)\n logprobs = log_softmax(logits)[next_token]\n hyp.logprob += logprobs\n if next_token == self.eos_id:\n hyp.is_done = True\n hyp.tokens.append(next_token)\n return hyp\n\n def __call__(\n self,\n audio_feats:np.ndarray,\n max_len:int=50,\n return_multiple:bool=False\n ) -> List[Hypothesis]:\n \"\"\"\n Performs greedy decoding on audio features.\n\n Parameters:\n audio_feats (numpy array): Audio features.\n max_len (int): Maximum length of the generated sequence.\n return_multiple (bool): Whether to return multiple hypotheses or the best one.\n\n Returns:\n Hypothesis: Generated hypothesis.\n \"\"\"\n self.reset()\n self.inference.encode(audio_feats)\n hyp: Hypothesis = self.inference.get_inits()\n for i in range(1, max_len):\n is_initial = i == 1\n # Retrive the current logits and k_caches and v_caches\n logits, k_cahces, v_caches = self.inference(hyp, initial=is_initial)\n # Update the Hypothesis k_cahces, v_caches \n hyp.k_caches = k_cahces\n hyp.v_caches = v_caches\n hyp = self.update(logits, hyp)\n if hyp.is_done:\n break\n # Release keys and values caches. \n hyp.k_caches = None\n hyp.v_caches = None \n return hyp" }, { "identifier": "BeamSearchDecoding", "path": "whisper/decoding.py", "snippet": "class BeamSearchDecoding(Decoding):\n \"\"\"\n Beam search decoding strategy for sequence generation.\n\n Attributes:\n inference: The inference instance.\n eos_id: End-of-sequence token ID.\n beam_size: Size of the beam.\n length_penalty: Length penalty factor.\n \"\"\"\n\n def __init__(\n self,\n inference,\n eos_id: int,\n beam_size: int = 3,\n length_penalty: float = 1,\n top_p=0.95,\n temperature=1.0,\n ):\n \"\"\"\n Initializes the BeamSearchDecoding instance.\n\n Parameters:\n inference: The inference instance.\n eos_id (int): End-of-sequence token ID.\n beam_size (int): Size of the beam.\n length_penalty (float): Length penalty factor.\n \"\"\"\n super().__init__(inference)\n self.eos_id = eos_id\n self.beam_size = beam_size\n self.length_penalty = MaximumLikelihoodRanker(length_penalty)\n self.temperature = temperature\n self.top_p = top_p\n\n def update(\n self,\n hyps: List[Hypothesis],\n initial: bool = False,\n ):\n \"\"\"\n Updates hypotheses based on logits using the beam search strategy.\n\n Parameters:\n hyps: List of hypotheses.\n initial: Whether it's the initial hyp or not.\n\n Returns:\n List[Hypothesis]: Updated hypotheses.\n \"\"\"\n new_beam = []\n\n for hyp in hyps:\n if hyp.is_done:\n # If the hypothesis is already completed, keep it in the beam\n new_beam.append(hyp)\n continue\n\n # Get logits for the current hypothesis\n logits, k_caches, v_caches = self.inference(hyp, initial=initial)\n # Apply greedy decode or top p sampling to get the top beam_width candidates\n if self.temperature > 0.0 and self.temperature != 1.0:\n probs = softmax(logits / self.temperature)\n top_indices = sample_top_p(probs, self.top_p, size=self.beam_size)\n else:\n top_indices = np.argsort(logits)[::-1][:self.beam_size] \n # Apply log softmax normalize then calculate \n logits = logits - logits.max(axis=-1) \n sum_logits = np.log(np.sum(np.exp(logits)))\n for idx in top_indices:\n # Create a new hypothesis by extending the current one\n new_tokens = hyp.tokens + [idx]\n #Calculate the log probability\n new_logprob = hyp.logprob + (logits[idx] - sum_logits)\n new_is_done = (idx == self.eos_id)\n # Add the new hypothesis to the beam\n new_beam.append(\n Hypothesis(\n tokens=new_tokens,\n k_caches=k_caches,\n v_caches=v_caches,\n logprob=new_logprob,\n is_done=new_is_done\n )\n )\n\n # Sort the beam based on log probabilities\n new_beam = sorted(new_beam, key=lambda h: h.logprob, reverse=True)\n return new_beam[:self.beam_size]\n\n def __call__(\n self,\n audio_feats: np.ndarray,\n max_len: int = 50,\n return_multiple: bool=False\n ) -> List[Hypothesis]:\n \"\"\"\n Performs beam search decoding on audio features.\n\n Parameters:\n audio_feats (numpy array): Audio features.\n max_len (int): Maximum length of the generated sequence.\n return_multiple (bool): Whether to return multiple hypotheses or the best one.\n\n Returns:\n Hypothesis or List[Hypothesis]: Generated hypothesis or hypotheses.\n \"\"\"\n self.reset()\n self.inference.encode(audio_feats)\n beam: List[Hypothesis] = [self.inference.get_inits()]\n for i in range(1, max_len):\n is_initial = i == 1\n beam = self.update(\n beam,\n initial=is_initial\n )\n if any(h.is_done for h in beam):\n break\n beam = self.finalize(beam)\n if not return_multiple:\n best_idx = self.length_penalty.rank(beam)\n beam = beam[best_idx]\n return beam\n \n def finalize(self, hyps: List[Hypothesis]):\n \"\"\"\n Finalizes the decoding process by appending end-of-sequence tokens to hypotheses.\n\n Parameters:\n hyps: List of hypotheses.\n\n Returns:\n List[Hypothesis]: Finalized hypotheses.\n \"\"\"\n for i in range(len(hyps)):\n hyps[i].k_caches = None\n hyps[i].v_caches = None\n if hyps[i].tokens[-1] != self.eos_id:\n hyps[i].tokens.append(self.eos_id)\n return hyps" }, { "identifier": "Hypothesis", "path": "whisper/decoding.py", "snippet": "class Hypothesis:\n \"\"\"\n Represents a hypothesis in sequence generation.\n\n Attributes:\n tokens (List[int]): List of tokens in the hypothesis.\n k_caches (np.ndarray): key caches for inference.\n v_caches (np.ndarray): value caches for inference.\n logprob (float): Log probability of the hypothesis.\n is_done (bool): Indicates whether the hypothesis is complete.\n \"\"\"\n tokens: List[int]\n k_caches: Optional[np.ndarray] = None\n v_caches: Optional[np.ndarray] = None\n logprob: float = 0.0\n is_done: bool = False" } ]
from typing import Literal, Union, Tuple, Optional, List from transformers import WhisperFeatureExtractor, WhisperTokenizer from dataclasses import dataclass from .decoding import Inference, GreedyDecoding, BeamSearchDecoding, Hypothesis import soxr import soundfile as sf import numpy as np import wget import os
3,745
# LOCAL @dataclass class WhisperConfig: """ Configuration class for the WhisperInference module. Attributes: - encoder_path: Path to the encoder model. - decoder_path: Path to the decoder model. - model_id: Model identifier, default is "openai/whisper-base" this is the only one supported for now. - transcribption_mode: Language mode, default is "English". - decoding: Decoding mode, default is "greedy". - beam_size: Beam size for beam search decoding, default is 5. - eos_id: End-of-sequence token ID, default is 50257. - temperature: Temperature for decoding, default is 1.0. - top_p: Top-p sampling parameter, default is 0.98. - length_penalty: Length penalty for beam search decoding, default is 2.0. """ encoder_path: str decoder_path: str model_id: str = "openai/whisper-base" transcribption_mode: Literal["English", "Arabic"] = "English" decoding: Literal["greedy", "beam"] = "greedy" beam_size: int = 5 eos_id: int = 50257 temperature: float = 1.0 top_p: float = 0.98 length_penalty: float = 2.0 class WhisperInference: """ Inference module for transcribing audio using the Whisper model. Attributes: - processor: WhisperFeatureExtractor for extracting features from audio. - tokenizer: WhisperTokenizer for tokenizing transcriptions. - decoding: Decoding strategy based on the selected mode. """ def __init__( self, config: WhisperConfig ): """ Initializes the WhisperInference module. Args: - config: WhisperConfig object containing model configuration. """ # Initialize feature extractor and tokenizer self.processor = WhisperFeatureExtractor.from_pretrained(config.model_id) self.tokenizer = WhisperTokenizer.from_pretrained( config.model_id, language=config.transcribption_mode, task="transcribe", ) self.config = config self.inference = Inference( self.config.encoder_path, self.config.decoder_path, self.config.transcribption_mode, ) self.set_decoding() def set_decoding(self, decoding: Optional[str]= None): # Initialize inference and decoding strategy based on the selected mode decoding = decoding if decoding is not None else self.config.decoding if decoding == "greedy": self.decoding = GreedyDecoding( self.inference, self.config.eos_id, self.config.temperature, self.config.top_p ) else: self.decoding = BeamSearchDecoding( self.inference, self.config.eos_id, self.config.beam_size, self.config.length_penalty, self.config.top_p, self.config.temperature, ) def _extract_feats(self, audio)-> np.ndarray: """ Extracts features from the input audio using the feature extractor. Args: - audio: Input audio as a numpy array. Returns: - feats: Extracted log mel spectrogram. """ feats = self.processor(audio, sampling_rate=16_000)['input_features'] return feats def __call__( self, audio: Union[np.ndarray, str], max_len: int = 50, return_multiple: bool = False, return_hyps: bool = False, **generation_kwargs,
# LOCAL @dataclass class WhisperConfig: """ Configuration class for the WhisperInference module. Attributes: - encoder_path: Path to the encoder model. - decoder_path: Path to the decoder model. - model_id: Model identifier, default is "openai/whisper-base" this is the only one supported for now. - transcribption_mode: Language mode, default is "English". - decoding: Decoding mode, default is "greedy". - beam_size: Beam size for beam search decoding, default is 5. - eos_id: End-of-sequence token ID, default is 50257. - temperature: Temperature for decoding, default is 1.0. - top_p: Top-p sampling parameter, default is 0.98. - length_penalty: Length penalty for beam search decoding, default is 2.0. """ encoder_path: str decoder_path: str model_id: str = "openai/whisper-base" transcribption_mode: Literal["English", "Arabic"] = "English" decoding: Literal["greedy", "beam"] = "greedy" beam_size: int = 5 eos_id: int = 50257 temperature: float = 1.0 top_p: float = 0.98 length_penalty: float = 2.0 class WhisperInference: """ Inference module for transcribing audio using the Whisper model. Attributes: - processor: WhisperFeatureExtractor for extracting features from audio. - tokenizer: WhisperTokenizer for tokenizing transcriptions. - decoding: Decoding strategy based on the selected mode. """ def __init__( self, config: WhisperConfig ): """ Initializes the WhisperInference module. Args: - config: WhisperConfig object containing model configuration. """ # Initialize feature extractor and tokenizer self.processor = WhisperFeatureExtractor.from_pretrained(config.model_id) self.tokenizer = WhisperTokenizer.from_pretrained( config.model_id, language=config.transcribption_mode, task="transcribe", ) self.config = config self.inference = Inference( self.config.encoder_path, self.config.decoder_path, self.config.transcribption_mode, ) self.set_decoding() def set_decoding(self, decoding: Optional[str]= None): # Initialize inference and decoding strategy based on the selected mode decoding = decoding if decoding is not None else self.config.decoding if decoding == "greedy": self.decoding = GreedyDecoding( self.inference, self.config.eos_id, self.config.temperature, self.config.top_p ) else: self.decoding = BeamSearchDecoding( self.inference, self.config.eos_id, self.config.beam_size, self.config.length_penalty, self.config.top_p, self.config.temperature, ) def _extract_feats(self, audio)-> np.ndarray: """ Extracts features from the input audio using the feature extractor. Args: - audio: Input audio as a numpy array. Returns: - feats: Extracted log mel spectrogram. """ feats = self.processor(audio, sampling_rate=16_000)['input_features'] return feats def __call__( self, audio: Union[np.ndarray, str], max_len: int = 50, return_multiple: bool = False, return_hyps: bool = False, **generation_kwargs,
)-> Union[Hypothesis, List[Hypothesis]]:
3
2023-12-16 13:35:51+00:00
8k
zhcui/polar_preview
polar/lang_firsov/lang_firsov.py
[ { "identifier": "grad", "path": "polar/lang_firsov/grad.py", "snippet": "def get_grad_lf(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\ndef get_grad_lf_full(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None):\ndef get_grad_glf(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\ndef get_grad_glf_2(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\ndef get_grad_gglf(mylf, params=None, rdm1=None, mo_coeff=None, mo_occ=None,\n scf_max_cycle=50, fci=False, beta=np.inf):\n G = h_ep - einsum('x, xp -> xp', w_p, lams)\n H2 = h2 * fac\n G = h_ep * fac1" }, { "identifier": "thermal_average", "path": "polar/lang_firsov/thermal_average.py", "snippet": "def get_str(nph, compact=True):\ndef count(string, num_bra=0):\ndef get_counts(nph, ph_str=None):\ndef comm(A, B):\ndef bch_h1_exact(h1, lams, order, H1_ref=None):\ndef bch_h1(h1, lams, order, H1_ref=None):\ndef trace_A1(lams):\ndef bch_h1_exp_ref(h1, lams):\ndef bch_h1_exp(h1, lams):\n def A_func(h):\ndef comm_h2(h2, B):\ndef bch_h2_exact(h2, lams, order, H2_ref=None):\ndef bch_h2(h2, lams, order, H2_ref=None):\ndef trace_A2(lams):\ndef bch_h2_exp_ref(h2, lams):\ndef bch_h2_exp(h2, lams):\n def A_func(h):\n H1 = np.array(h1, copy=True)\n H1 = np.array(h1, copy=True)\n H1 = np.array(h1, copy=True)\n I = np.eye(nao) * 0.5\n H1 = np.dot(op, h1.ravel()).reshape(nao, nao)\n H1 = _expm_multiply_simple(A_op, h1.ravel(), traceA=tr)\n H1 = H1.reshape(nao, nao)\n H2 = np.array(h2, copy=True)\n H2 = np.array(h2, copy=True)\n H2 = np.array(h2, copy=True)\n I = np.eye(nao)\n H2 = np.dot(op, h2.ravel()).reshape(nao, nao, nao, nao)\n H2 = _expm_multiply_simple(A_op, h2.ravel(), traceA=tr)\n H2 = H2.reshape(nao, nao, nao, nao)\n H1 = h1 * factor\n H1_2 = bch_h1(h1, lams, order=6, H1_ref=H1)\n H1_2 = bch_h1(h1, lams, order=10)\n H2 = h2 * factor\n H2_2 = bch_h2(h2, lams, order=10, H2_ref=H2)" }, { "identifier": "fc_factor", "path": "polar/fci/fci.py", "snippet": "def fc_factor(n, m, l):\n \"\"\"\n Get the Franck-Condon factors, <n|exp(-l(b-b+))|m>\n https://physics.stackexchange.com/questions/553225/representation-of-the-displacement-operator-in-number-basis\n \"\"\"\n lsq = l * l\n res = np.exp(lsq * (-0.5))\n if n >= m:\n res *= l ** (n-m)\n res *= np.sqrt(fac(m) / fac(n))\n res *= lg(lsq, m, n-m)\n else:\n res *= l ** (m-n)\n res *= (np.sqrt(fac(n) / fac(m)) * ((-1)**(m-n)))\n res *= lg(lsq, n, m-n)\n return res" } ]
from functools import partial from scipy import linalg as la from scipy import optimize as opt from pyscf import gto, scf, ao2mo, lib from pyscf.scf import hf from pyscf.lib import logger from polar.lang_firsov import grad from polar.lang_firsov import thermal_average as ta from polar.fci.fci import fc_factor from pyscf.pbc.scf.addons import smearing_ from pyscf import fci, ao2mo from polar.lang_firsov import mp as lfmp from polar.lang_firsov import mp_glf import numpy as np
3,637
lams = np.zeros(nmode, dtype=params.dtype) else: raise ValueError("unknown lf type %s"%(self)) else: if uniform: l, z = params zs = np.zeros(nmode, dtype=params.dtype) zs[:] = z if isinstance(self, GGLangFirsov): lams = np.zeros((nmode, nao, nao), dtype=params.dtype) lams[range(nmode), range(nao), range(nao)] = l elif isinstance(self, GLangFirsov): lams = np.zeros((nmode, nao), dtype=params.dtype) lams[range(nmode), range(nao)] = l elif isinstance(self, LangFirsov): lams = np.zeros(nmode, dtype=params.dtype) lams[:] = l else: raise ValueError("unknown lf type %s"%(self)) else: zs = np.array(params[-nmode:], copy=True) lams = np.array(params[:-nmode].reshape(nmode, -1), copy=True) if isinstance(self, GGLangFirsov): lams = lib.unpack_tril(lams) if lams.shape != (nmode, nao, nao): raise ValueError("lams shape %s does not match %s" %(str(lams.shape), (nmode, nao, nao))) elif isinstance(self, GLangFirsov): pass elif isinstance(self, LangFirsov): lams = lams.reshape(nmode) else: raise ValueError("unknown lf type %s"%(self)) return lams, zs def pack_params(self, lams, zs): if self.lams_only: if self.uniform: params = np.array((lams[0],)) else: params = np.hstack((lams.ravel(),)) elif self.zs_only: if self.uniform: params = np.array((zs[0],)) else: params = np.hstack((zs.ravel(),)) else: if self.uniform: params = np.array((lams[0], zs[0])) else: params = np.hstack((lams.ravel(), zs.ravel())) return params def unpack_params_full(self, params, uniform=None): nocc = self.nelec_a nvir = self.nao - nocc kappa = params[:nvir*nocc] lams, zs = self.unpack_params(params[nvir*nocc:]) return kappa, lams, zs def pack_params_full(self, kappa, lams, zs): return np.hstack((kappa.ravel(), self.pack_params(lams, zs).ravel())) @property def nlams(self): if self.zs_only: nlams = 0 elif self.uniform: nlams = 1 else: nlams = self.nmode return nlams @property def nzs(self): if self.lams_only: nzs = 0 elif self.uniform: nzs = 1 else: nzs = self.nmode return nzs @property def nparam(self): nparam = self.nlams + self.nzs return nparam @property def nkappa(self): nocc = self.nelec_a nvir = self.nao - nocc nparam = nvir * nocc return nparam @property def nparam_full(self): nparam = int(self.nkappa) nparam += self.nparam return nparam def get_lams_zs(self, opt=True): if opt: return self.unpack_params(self.params_opt) else: return self.unpack_params(self.params) get_lf_ham = get_lf_ham solve_lf_ham = solve_lf_ham solve_lf_ham_full = solve_lf_ham_full get_grad = grad.get_grad_lf get_grad_full = grad.get_grad_lf_full kernel = kernel @staticmethod
#!/usr/bin/env python """ Variational Lang-Firsov. Authors: Zhi-Hao Cui <[email protected]> """ einsum = partial(np.einsum, optimize=True) # **************************************************************************** # Variational Lang-Firsov # **************************************************************************** def get_lf_ham(mylf, params=None, h0=None, h1=None, h2=None, h_ep=None, w_p=None): """ Get h0, h1, h_ep, h2 of variational Lang-Firsov Hamiltonian. Args: mylf: LF object. lams: (nmode,) zs: (nmode,) h0: energy constant h1: (nao, nao) h2: (nao, nao, nao, nao), TODO, compact h_ep: constant w_p: (nmode,) or constant. Returns: H0: constant H1: (nao, nao) H_ep: (nmode, nao, nao) H2: (nao, nao, nao, nao) """ if params is None: params = mylf.params if h0 is None: h0 = mylf.h0 if h1 is None: h1 = mylf.h1 if h2 is None: h2 = mylf.h2 if h_ep is None: h_ep = mylf.h_ep if w_p is None: w_p = mylf.w_p lams, zs = mylf.unpack_params(params) nmode = mylf.nmode nao = mylf.nao g = h_ep fac = np.exp(-0.5 * lams**2) h1_diag = w_p*lams**2 - (2.0*g)*lams + (2.0*g)*zs - 2.0*w_p*zs*lams + h1[range(nao), range(nao)] H1 = einsum("ij, i, j -> ij", h1, fac, fac) H1[range(nao), range(nao)] = h1_diag H_ep = np.zeros((nmode, nao, nao)) H_ep[range(nmode), range(nao), range(nao)] = g - w_p * lams H0 = h0 + (w_p * zs**2).sum() if h2 is not None: fac2 = np.exp(-2.0 * lams**2) H2_diag = (-4.0*g) * lams + (2.0 * w_p) * lams**2 + h2[range(nao), range(nao), range(nao), range(nao)] H2 = np.empty((nao, nao, nao, nao)) for i in range(nao): for j in range(nao): for k in range(nao): for l in range(nao): if i == j and i == l and i == k: # i = j = k = l H2[i, j, k, l] = h2[i, j, k, l] elif i == j and i == k: # i = j = k != l H2[i, j, k, l] = h2[i, j, k, l] * fac[l] * fac[k] elif i == j and i == l: # i = j = l != k H2[i, j, k, l] = h2[i, j, k, l] * fac[l] * fac[k] elif i == l and i == k: # i = l = k != j H2[i, j, k, l] = h2[i, j, k, l] * fac[i] * fac[j] elif j == l and j == k: # i != j = k = l H2[i, j, k, l] = h2[i, j, k, l] * fac[i] * fac[j] elif i == j: if k == l: H2[i, j, k, l] = h2[i, j, k, l] else: H2[i, j, k, l] = h2[i, j, k, l] * fac[k] * fac[l] elif i == k: if j == l: H2[i, j, k, l] = h2[i, j, k, l] * fac2[i] * fac2[j] else: H2[i, j, k, l] = h2[i, j, k, l] * fac2[i] * fac[j] * fac[l] elif i == l: if j == k: H2[i, j, k, l] = h2[i, j, k, l] else: H2[i, j, k, l] = h2[i, j, k, l] * fac[j] * fac[k] elif j == k: if i == l: H2[i, j, k, l] = h2[i, j, k, l] else: H2[i, j, k, l] = h2[i, j, k, l] * fac[i] * fac[l] elif j == l: if i == k: H2[i, j, k, l] = h2[i, j, k, l] * fac2[i] * fac2[j] else: H2[i, j, k, l] = h2[i, j, k, l] * fac[i] * fac[k] * fac2[j] elif k == l: if i == j: H2[i, j, k, l] = h2[i, j, k, l] else: H2[i, j, k, l] = h2[i, j, k, l] * fac[i] * fac[j] else: H2[i, j, k, l] = h2[i, j, k, l] * fac[i] * fac[j] * fac[k] * fac[l] H2[range(nao), range(nao), range(nao), range(nao)] = H2_diag else: H2 = h2 return H0, H1, H2, H_ep, w_p def solve_lf_ham(mylf, params=None, nelec=None, mp2=False, mp3=False, mp4=False, nph=9, verbose=False, scf_newton=False, beta=np.inf, dm0=None, scf_max_cycle=50, fci=False): H0, H1, H2, H_ep, w_p = mylf.get_lf_ham(params=params) ovlp = mylf.get_ovlp() nao = mylf.nao h1 = mylf.get_h1() h_ep_bare = mylf.get_h_ep() if nelec is None: nelec = mylf.nelec if params is None: params = mylf.params lams, zs = mylf.unpack_params(params) if H2 is not None: mf = hf.RHF(mylf.mol) mf.energy_nuc = lambda *args: H0 mf.get_hcore = lambda *args: H1 mf.get_ovlp = lambda *args: ovlp mf._eri = H2 mf.direct_scf = False mf.max_cycle = scf_max_cycle mf.conv_tol = mylf.conv_tol * 0.1 if scf_newton: mf = mf.newton() if beta < np.inf: mf = smearing_(mf, sigma=1.0/beta, method='fermi') e_tot = mf.kernel(dm0=dm0) rdm1 = mf.make_rdm1() mylf._scf = mf mylf.mo_energy = mf.mo_energy mylf.mo_coeff = mf.mo_coeff mylf.mo_occ = mf.mo_occ mylf.e_hf = float(e_tot) conv = mf.converged if fci: cisolver = fci.direct_nosym.FCI() cisolver.max_cycle = 100 cisolver.conv_tol = 1e-8 C = mf.mo_coeff h1_mo = C.conj().T @ mf.get_hcore() @ C h2_mo = ao2mo.kernel(mf._eri, C) e_tot, fcivec = cisolver.kernel(h1_mo, h2_mo, C.shape[-1], nelec, ecore=mf.energy_nuc()) rdm1 = cisolver.make_rdm1(fcivec, C.shape[-1], (mylf.nelec_a, mylf.nelec_b)) rdm1 = C @ rdm1 @ C.conj().T else: ew, ev = la.eigh(H1, ovlp) mo_occ = np.zeros(nao) if nelec == 1: nocc = nelec mo_occ[:nocc] = 1.0 else: nocc = nelec_a mo_occ[:nocc] = 2.0 e_tot = np.sum(ew * mo_occ) + H0 nao, nmo = ev.shape rdm1 = np.dot(ev * mo_occ, ev.conj().T) mylf.mo_energy = ew mylf.mo_coeff = ev mylf.mo_occ = mo_occ mylf.e_hf = float(e_tot) conv = True if mp2 or mp3 or mp4: nocc = (np.asarray(mo_occ > 0.5)).sum() mo_energy = ew mo_coeff = ev if lams.ndim == 1: lf = 'lf' elif lams.ndim == 2: lf = 'glf' else: raise ValueError logger.info(mylf, "PT number of phonon: %d", nph) logger.info(mylf, "e_hf %15.8f", e_tot) logger.info(mylf, "LF type %s", lf) if mp4: e_mp1, e_mp2, e_mp3, e_mp4 = lfmp.get_e_mp4(mylf.mol, h1, H_ep, w_p, lams, zs, mo_coeff, mo_occ, mo_energy, nph, lf=lf, h_ep_bare=h_ep_bare) e_tot += e_mp1 e_tot += e_mp2 e_tot += e_mp3 e_tot += e_mp4 mylf.e_mp1 = e_mp1 mylf.e_mp2 = e_mp2 mylf.e_mp3 = e_mp3 mylf.e_mp4 = e_mp4 logger.info(mylf, "e_mp1 %15.8f", e_mp1) logger.info(mylf, "e_mp2 %15.8f", e_mp2) logger.info(mylf, "e_mp3 %15.8f", e_mp3) logger.info(mylf, "e_mp4 %15.8f", e_mp4) elif mp3: e_mp1, e_mp2, e_mp3 = lfmp.get_e_mp3(mylf.mol, h1, H_ep, w_p, lams, zs, mo_coeff, mo_occ, mo_energy, nph, lf=lf, h_ep_bare=h_ep_bare) e_tot += e_mp1 e_tot += e_mp2 e_tot += e_mp3 mylf.e_mp1 = e_mp1 mylf.e_mp2 = e_mp2 mylf.e_mp3 = e_mp3 logger.info(mylf, "e_mp1 %15.8f", e_mp1) logger.info(mylf, "e_mp2 %15.8f", e_mp2) logger.info(mylf, "e_mp3 %15.8f", e_mp3) elif mp2 == 'slow': e_mp1, e_mp2 = lfmp.get_e_mp2_slow(mylf.mol, h1, H_ep, w_p, lams, zs, mo_coeff, mo_occ, mo_energy, nph, lf=lf, h_ep_bare=h_ep_bare) e_tot += e_mp1 e_tot += e_mp2 mylf.e_mp1 = e_mp1 mylf.e_mp2 = e_mp2 logger.info(mylf, "e_mp1 %15.8f", e_mp1) logger.info(mylf, "e_mp2 %15.8f", e_mp2) elif mp2: e_mp2 = lfmp.get_e_mp2(h1, H_ep, w_p, lams, zs, mo_coeff, mo_occ, mo_energy, nph) e_tot += e_mp2 mylf.e_mp2 = e_mp2 logger.info(mylf, "e_mp2 %15.8f", e_mp2) if verbose: logger.info(mylf, "e_tot %15.8f", e_tot) logger.info(mylf, "lams\n%s", lams) logger.info(mylf, "zs\n%s", zs) logger.info(mylf, "rdm1\n%s", rdm1) mylf.e_tot = e_tot return e_tot, rdm1 def solve_lf_ham_full(mylf, params=None, nelec=None, mp2=False, mp3=False, mp4=False, nph=9, verbose=False, scf_newton=False, beta=np.inf, dm0=None, scf_max_cycle=50, mo_coeff=None, mo_occ=None, canonicalization=True): if params is None: params = mylf.params_full kappa, lams, zs = mylf.unpack_params_full(params) params_p = mylf.pack_params(lams, zs) H0, H1, H2, H_ep, w_p = mylf.get_lf_ham(params=params_p) ovlp = mylf.get_ovlp() nao = mylf.nao h1 = mylf.get_h1() if nelec is None: nelec = mylf.nelec if H2 is not None: mf = hf.RHF(mylf.mol) mf.energy_nuc = lambda *args: H0 mf.get_hcore = lambda *args: H1 mf.get_ovlp = lambda *args: ovlp # ZHC FIXME NOTE the transformed H2 may not have the 4-fold symmetry, # it is only 2-fold. pqrs = rspq #mf._eri = ao2mo.restore(4, H2, nao) mf._eri = H2 mf.direct_scf = False mf.max_cycle = scf_max_cycle mf.conv_tol = mylf.conv_tol * 0.1 dr = hf.unpack_uniq_var(kappa, mo_occ) mo_coeff = np.dot(mo_coeff, la.expm(dr)) rdm1 = mf.make_rdm1(mo_coeff, mo_occ) e_tot = mf.energy_elec(dm=rdm1)[0] + mf.energy_nuc() fock = mf.get_fock(dm=rdm1) if canonicalization: print("-" * 79) mo_energy, mo_coeff = mf.canonicalize(mo_coeff, mo_occ, fock) homo = lumo = None mo_e_occ = mo_energy[mo_occ >= 1.0] mo_e_vir = mo_energy[mo_occ < 1.0] if len(mo_e_occ) > 0: homo = mo_e_occ.max() if len(mo_e_vir) > 0: lumo = mo_e_vir.min() if homo is not None: print ('HOMO = %15.8g'%(homo)) if lumo is not None: print ('LUMO = %15.8g'%(lumo)) if homo is not None: print ("gap = %15.8g"%(lumo - homo)) if (lumo is not None) and (homo is not None) and (homo > lumo): print ('WARN: HOMO %s > LUMO %s was found in the canonicalized orbitals.'%(homo, lumo)) print ("mo_energy:\n%s"%mo_energy) grad = mf.get_grad(mo_coeff, mo_occ, fock) grad_norm = la.norm(grad) print("-" * 79) print ("|g| = %15.8g" % grad_norm) print("-" * 79) else: mo_energy = einsum("pm, pq, qm -> m", mo_coeff.conj(), fock, mo_coeff) mylf._scf = mf mylf.e_hf = float(e_tot) conv = mf.converged mylf.mo_coeff = mf.mo_coeff = mo_coeff mylf.mo_occ = mf.mo_occ = mo_occ mylf.mo_energy = mf.mo_energy = mo_energy if mp2: logger.info(mylf, "LF-MP2 start, nph = %d", nph) ovlp_g = la.block_diag(ovlp, ovlp) hcore_g = la.block_diag(H1, H1) mylf._scf = mf = mf.to_ghf() mf.get_ovlp = lambda *args: ovlp_g mf.get_hcore = lambda *args: hcore_g e_mp2 = mp_glf.get_e_mp2(mylf, lams=lams, zs=zs, nph=nph) e_tot += e_mp2 mylf.e_mp2 = e_mp2 logger.info(mylf, "e_mp2 %15.8f", mylf.e_mp2) return e_tot, rdm1 def kernel(mylf, params=None, nelec=None, method='BFGS', conv_tol=None, max_cycle=None, gtol=None, mp2=False, mp3=False, mp4=False, nph=9, ntrial=None, use_num_grad=True, full_opt=False, mo_coeff=None, mo_occ=None, fci=False, scf_max_cycle=50, beta=np.inf): mylf.dump_flags() if params is None: if full_opt: params = mylf.params_full else: params = mylf.params if nelec is None: nelec = mylf.nelec if conv_tol is None: conv_tol = mylf.conv_tol if gtol is None: gtol = np.sqrt(conv_tol) if max_cycle is None: max_cycle = mylf.max_cycle if ntrial is None: ntrial = mylf.ntrial if use_num_grad: jac = None else: if full_opt: def jac(params): return mylf.get_grad_full(params, mo_coeff=mo_coeff, mo_occ=mo_occ) else: def jac(params): return mylf.get_grad(params, scf_max_cycle=scf_max_cycle, fci=fci, beta=beta) if full_opt: def cost_func(params): return mylf.solve_lf_ham_full(params, nelec, mo_coeff=mo_coeff, mo_occ=mo_occ, canonicalization=False)[0] else: def cost_func(params): return mylf.solve_lf_ham(params, nelec, scf_max_cycle=scf_max_cycle, fci=fci, beta=beta)[0] params_opt = None e_tot = 1e+9 for i in range(ntrial): print ("trial %5d / %5d"%(i, ntrial), flush=True) if i == 1: params = np.zeros_like(params) elif i >= 2: params = (np.random.random(params.shape) - 0.5) * (np.max(mylf.get_h_ep()) / np.max(mylf.get_w_p())) if i % 2== 0: params *= 0.1 res = opt.minimize(cost_func, params, jac=jac, method=method, tol=conv_tol, options={"disp": True, "maxiter": max_cycle, "gtol": gtol}) if res.fun < e_tot: params_opt = res.x e_tot = res.fun mylf.params_full_opt = params_opt mylf.params_opt = np.array(params_opt[-mylf.nparam:], copy=True) mylf.e_tot = e_tot if full_opt: mylf.e_tot, rdm1 = mylf.solve_lf_ham_full(params_opt, nelec, mp2=mp2, mp3=mp3, mp4=mp4, nph=nph, verbose=True, mo_coeff=mo_coeff, mo_occ=mo_occ) kappa, lams, zs = mylf.unpack_params_full(mylf.params_full_opt) else: mylf.e_tot, rdm1 = mylf.solve_lf_ham(params_opt, nelec, mp2=mp2, mp3=mp3, mp4=mp4, nph=nph, verbose=True, fci=fci, scf_max_cycle=scf_max_cycle, beta=beta) lams, zs = mylf.unpack_params(mylf.params_opt) kappa = None logger.info(mylf, "e_tot %15.8f", mylf.e_tot) logger.info(mylf, "kappa\n%s", kappa) logger.info(mylf, "lams\n%s", lams) logger.info(mylf, "zs\n%s", zs) logger.info(mylf, "rdm1\n%s", rdm1) return mylf.e_tot class LangFirsov(object): conv_tol = 1e-8 conv_tol_grad = None max_cycle = 1000 ntrial = 5 def __init__(self, h0, h1, h2, h_ep, w_p, nelec, spin=0, params=None, uniform=False, lams_only=False, zs_only=False, verbose=4, mol=None): self.mol = gto.Mole(verbose=verbose) self.mol.build(dump_input=False) self.verbose = verbose self.max_memory = self.mol.max_memory self.stdout = self.mol.stdout self.h0 = h0 self.h1 = h1 self.h2 = h2 self.h_ep = h_ep self.w_p = w_p self.nelec = nelec self.mol.nelectron = nelec self.mol.tot_electrons = lambda *args: self.nelec self.mol.incore_anyway = True if self.nelec == 1: self.spin = self.mol.spin = 1 else: self.spin = self.mol.spin = spin self.nelec_a = (self.nelec + self.spin) // 2 self.nelec_b = (self.nelec - self.spin) // 2 assert self.nelec_a + self.nelec_b == self.nelec self.nmode = len(self.w_p) self.nao = self.h1.shape[-1] self.ovlp = np.eye(self.nao) self.uniform = uniform self.lams_only = lams_only self.zs_only = zs_only self.lf_type = 'lf' if params is None: self.params = self.get_init_params() else: self.params = params assert len(self.params) == self.nparam self.params_full = np.zeros(self.nparam_full) self.params_full[-self.nparam:] = self.params # results: self.chkfile = None self.params_opt = None self.params_full_opt = None self.e_tot = None self.e_hf = None self.e_mp1 = None self.e_mp2 = None self.e_mp3 = None self.e_mp4 = None self.mo_energy = None self.mo_coeff = None self.mo_occ = None def dump_flags(self, verbose=None): log = logger.new_logger(self, verbose) if log.verbose < logger.INFO: return self log.info('\n') log.info('******** %s ********', self.__class__) method = [self.__class__.__name__] log.info('method = %s', '-'.join(method)) log.info("uniform = %s", self.uniform) log.info("nao = %10d", self.nao) log.info("nelec = %10s", self.nelec) log.info("nmode = %10d", self.nmode) log.info("nparam = %10d", self.nparam) log.info('conv_tol = %g', self.conv_tol) log.info('conv_tol_grad = %s', self.conv_tol_grad) log.info('max_cycles = %d', self.max_cycle) log.info("ntrial: %d", self.ntrial) if isinstance(self.h_ep, np.ndarray) and (self.nao > 16 or self.nmode > 16): log.info("h_ep:\n%s min %15.6f max %15.6f", str(self.h_ep.shape), np.min(self.h_ep), np.max(self.h_ep)) else: log.info("h_ep:\n%s", self.h_ep) if self.nmode > 16: log.info("w_p:\n%s min %15.6f max %15.6f", str(self.w_p.shape), np.min(self.w_p), np.max(self.w_p)) else: log.info("w_p:\n%s", self.w_p) lams, zs = self.unpack_params(self.params) log.info("lams shape: %s", str(lams.shape)) log.info("lams:\n%s", lams) log.info("zs shape: %s", str(zs.shape)) log.info("zs:\n%s", zs) if self.chkfile: log.info('chkfile to save SCF result = %s', self.chkfile) log.info('max_memory %d MB (current use %d MB)', self.max_memory, lib.current_memory()[0]) return self def get_h0(self): return self.h0 def energy_nuc(self): return self.h0 def get_ovlp(self): return self.ovlp def get_h1(self): return self.h1 def get_hcore(self): return self.h1 def get_h2(self): return self.h2 def get_h_ep(self): return self.h_ep def get_w_p(self): return self.w_p def get_dm0(self, key='minao'): """ get initial rdm1. """ if self.mol.natm == 0: h1e = self.get_h1() s1e = self.get_ovlp() mo_energy, mo_coeff = la.eigh(h1e, s1e) mo_occ = np.zeros_like(mo_energy) mo_occ[:self.nelec_a] = 2.0 dm0 = np.dot(mo_coeff * mo_occ, mo_coeff.conj().T) else: if key == 'minao': dm0 = hf.init_guess_by_minao(self.mol) elif key == 'atom': dm0 = hf.init_guess_by_atom(self.mol) else: raise ValueError return dm0 def get_init_params(self, scale=0.1): h_ep = self.h_ep w_p = self.w_p if self.zs_only: lams = np.array([]) elif self.uniform: lams = np.zeros(self.nmode) lams[:] = (np.random.random() - 0.5) * (np.max(h_ep) / np.max(w_p) * scale) else: lams = (np.random.random(self.nlams) - 0.5) * (np.max(h_ep) / np.max(w_p) * scale) if self.lams_only: zs = np.array([]) elif self.zs_only: zs = np.random.random(self.nzs) else: dm0 = self.get_dm0() zs = np.einsum("p, pp -> p", lams, dm0) params = np.append(lams, zs) if self.uniform: if self.lams_only or self.zs_only: params = params[[-1]] else: params = params[[0, self.nlams]] return params def unpack_params(self, params, uniform=None, lams_only=None, zs_only=None): if lams_only is None: lams_only = self.lams_only if zs_only is None: zs_only = self.zs_only if uniform is None: uniform = self.uniform nmode = self.nmode nao = self.nao if lams_only: zs = np.array([], dtype=params.dtype) if uniform: l = params if isinstance(self, GGLangFirsov): lams = np.zeros((nmode, nao, nao), dtype=params.dtype) lams[range(nmode), range(nao), range(nao)] = l elif isinstance(self, GLangFirsov): lams = np.zeros((nmode, nao), dtype=params.dtype) lams[range(nmode), range(nao)] = l elif isinstance(self, LangFirsov): lams = np.zeros(nmode, dtype=params.dtype) lams[:] = l else: raise ValueError("unknown lf type %s"%(self)) else: lams = np.array(params.reshape(nmode, -1), copy=True) if isinstance(self, GGLangFirsov): lams = lib.unpack_tril(lams) if lams.shape != (nmode, nao, nao): raise ValueError("lams shape %s does not match %s" %(str(lams.shape), (nmode, nao, nao))) elif isinstance(self, GLangFirsov): pass elif isinstance(self, LangFirsov): lams = lams.reshape(nmode) else: raise ValueError("unknown lf type %s"%(self)) elif zs_only: if uniform: z = params zs = np.zeros(nmode, dtype=params.dtype) zs[:] = z else: zs = np.array(params[-nmode:], copy=True) if isinstance(self, GGLangFirsov): lams = np.zeros((nmode, nao, nao), dtype=params.dtype) elif isinstance(self, GLangFirsov): lams = np.zeros((nmode, nao), dtype=params.dtype) elif isinstance(self, LangFirsov): lams = np.zeros(nmode, dtype=params.dtype) else: raise ValueError("unknown lf type %s"%(self)) else: if uniform: l, z = params zs = np.zeros(nmode, dtype=params.dtype) zs[:] = z if isinstance(self, GGLangFirsov): lams = np.zeros((nmode, nao, nao), dtype=params.dtype) lams[range(nmode), range(nao), range(nao)] = l elif isinstance(self, GLangFirsov): lams = np.zeros((nmode, nao), dtype=params.dtype) lams[range(nmode), range(nao)] = l elif isinstance(self, LangFirsov): lams = np.zeros(nmode, dtype=params.dtype) lams[:] = l else: raise ValueError("unknown lf type %s"%(self)) else: zs = np.array(params[-nmode:], copy=True) lams = np.array(params[:-nmode].reshape(nmode, -1), copy=True) if isinstance(self, GGLangFirsov): lams = lib.unpack_tril(lams) if lams.shape != (nmode, nao, nao): raise ValueError("lams shape %s does not match %s" %(str(lams.shape), (nmode, nao, nao))) elif isinstance(self, GLangFirsov): pass elif isinstance(self, LangFirsov): lams = lams.reshape(nmode) else: raise ValueError("unknown lf type %s"%(self)) return lams, zs def pack_params(self, lams, zs): if self.lams_only: if self.uniform: params = np.array((lams[0],)) else: params = np.hstack((lams.ravel(),)) elif self.zs_only: if self.uniform: params = np.array((zs[0],)) else: params = np.hstack((zs.ravel(),)) else: if self.uniform: params = np.array((lams[0], zs[0])) else: params = np.hstack((lams.ravel(), zs.ravel())) return params def unpack_params_full(self, params, uniform=None): nocc = self.nelec_a nvir = self.nao - nocc kappa = params[:nvir*nocc] lams, zs = self.unpack_params(params[nvir*nocc:]) return kappa, lams, zs def pack_params_full(self, kappa, lams, zs): return np.hstack((kappa.ravel(), self.pack_params(lams, zs).ravel())) @property def nlams(self): if self.zs_only: nlams = 0 elif self.uniform: nlams = 1 else: nlams = self.nmode return nlams @property def nzs(self): if self.lams_only: nzs = 0 elif self.uniform: nzs = 1 else: nzs = self.nmode return nzs @property def nparam(self): nparam = self.nlams + self.nzs return nparam @property def nkappa(self): nocc = self.nelec_a nvir = self.nao - nocc nparam = nvir * nocc return nparam @property def nparam_full(self): nparam = int(self.nkappa) nparam += self.nparam return nparam def get_lams_zs(self, opt=True): if opt: return self.unpack_params(self.params_opt) else: return self.unpack_params(self.params) get_lf_ham = get_lf_ham solve_lf_ham = solve_lf_ham solve_lf_ham_full = solve_lf_ham_full get_grad = grad.get_grad_lf get_grad_full = grad.get_grad_lf_full kernel = kernel @staticmethod
def fc_factor(n, l, m):
2
2023-12-18 07:39:51+00:00
8k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/backends/opencv.py
[ { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "MAX_FPS_DELTA", "path": "backend/scenedetect/frame_timecode.py", "snippet": "MAX_FPS_DELTA: float = 1.0 / 100000" }, { "identifier": "get_file_name", "path": "backend/scenedetect/platform.py", "snippet": "def get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\n \"\"\"Return the file name that `file_path` refers to, optionally removing the extension.\n\n If `include_extension` is False, the result will always be a str.\n\n E.g. /tmp/foo.bar -> foo\"\"\"\n file_name = os.path.basename(file_path)\n if not include_extension:\n file_name = str(file_name)\n last_dot_pos = file_name.rfind('.')\n if last_dot_pos >= 0:\n file_name = file_name[:last_dot_pos]\n return file_name" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SeekError", "path": "backend/scenedetect/video_stream.py", "snippet": "class SeekError(Exception):\n \"\"\"Either an unrecoverable error happened while attempting to seek, or the underlying\n stream is not seekable (additional information will be provided when possible).\n\n The stream is guaranteed to be left in a valid state, but the position may be reset.\"\"\"" }, { "identifier": "VideoOpenFailure", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoOpenFailure(Exception):\n \"\"\"Raised by a backend if opening a video fails.\"\"\"\n\n # pylint: disable=useless-super-delegation\n def __init__(self, message: str = \"Unknown backend error.\"):\n \"\"\"\n Arguments:\n message: Additional context the backend can provide for the open failure.\n \"\"\"\n super().__init__(message)\n\n # pylint: enable=useless-super-delegation" }, { "identifier": "FrameRateUnavailable", "path": "backend/scenedetect/video_stream.py", "snippet": "class FrameRateUnavailable(VideoOpenFailure):\n \"\"\"Exception instance to provide consistent error messaging across backends when the video frame\n rate is unavailable or cannot be calculated. Subclass of VideoOpenFailure.\"\"\"\n\n def __init__(self):\n super().__init__('Unable to obtain video framerate! Specify `framerate` manually, or'\n ' re-encode/re-mux the video and try again.')" } ]
from logging import getLogger from typing import AnyStr, Tuple, Union, Optional from numpy import ndarray from backend.scenedetect.frame_timecode import FrameTimecode, MAX_FPS_DELTA from backend.scenedetect.platform import get_file_name from backend.scenedetect.video_stream import VideoStream, SeekError, VideoOpenFailure, FrameRateUnavailable import math import os.path import cv2
6,526
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """:class:`VideoStreamCv2` is backed by the OpenCV `VideoCapture` object. This is the default backend. Works with video files, image sequences, and network streams/URLs. For wrapping input devices or pipes, there is also :class:`VideoCaptureAdapter` which can be constructed from an existing `cv2.VideoCapture`. This allows performing scene detection on inputs which do not support seeking. """ logger = getLogger('pyscenedetect') IMAGE_SEQUENCE_IDENTIFIER = '%' NON_VIDEO_FILE_INPUT_IDENTIFIERS = ( IMAGE_SEQUENCE_IDENTIFIER, # image sequence '://', # URL/network stream ' ! ', # gstreamer pipe ) def _get_aspect_ratio(cap: cv2.VideoCapture, epsilon: float = 0.0001) -> float: """Display/pixel aspect ratio of the VideoCapture as a float (1.0 represents square pixels).""" # Versions of OpenCV < 3.4.1 do not support this, so we fall back to 1.0. if not 'CAP_PROP_SAR_NUM' in dir(cv2): return 1.0 num: float = cap.get(cv2.CAP_PROP_SAR_NUM) den: float = cap.get(cv2.CAP_PROP_SAR_DEN) # If numerator or denominator are close to zero, so we fall back to 1.0. if abs(num) < epsilon or abs(den) < epsilon: return 1.0 return num / den
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """:class:`VideoStreamCv2` is backed by the OpenCV `VideoCapture` object. This is the default backend. Works with video files, image sequences, and network streams/URLs. For wrapping input devices or pipes, there is also :class:`VideoCaptureAdapter` which can be constructed from an existing `cv2.VideoCapture`. This allows performing scene detection on inputs which do not support seeking. """ logger = getLogger('pyscenedetect') IMAGE_SEQUENCE_IDENTIFIER = '%' NON_VIDEO_FILE_INPUT_IDENTIFIERS = ( IMAGE_SEQUENCE_IDENTIFIER, # image sequence '://', # URL/network stream ' ! ', # gstreamer pipe ) def _get_aspect_ratio(cap: cv2.VideoCapture, epsilon: float = 0.0001) -> float: """Display/pixel aspect ratio of the VideoCapture as a float (1.0 represents square pixels).""" # Versions of OpenCV < 3.4.1 do not support this, so we fall back to 1.0. if not 'CAP_PROP_SAR_NUM' in dir(cv2): return 1.0 num: float = cap.get(cv2.CAP_PROP_SAR_NUM) den: float = cap.get(cv2.CAP_PROP_SAR_DEN) # If numerator or denominator are close to zero, so we fall back to 1.0. if abs(num) < epsilon or abs(den) < epsilon: return 1.0 return num / den
class VideoStreamCv2(VideoStream):
3
2023-10-25 02:50:01+00:00
8k
Genesis-Embodied-AI/RoboGen
gpt_4/prompts/utils.py
[ { "identifier": "decompose_and_generate_reward_or_primitive", "path": "gpt_4/prompts/prompt_manipulation_reward_primitive.py", "snippet": "def decompose_and_generate_reward_or_primitive(task_name, task_description, initial_config, articulation_tree, semantics, \n involved_links, involved_joints, object_id, yaml_config_path, save_path, \n temperature=0.4, model='gpt-4'):\n query_task = \"\"\"\nTask name: {}\nDescription: {}\nInitial config:\n```yaml\n{}\n```\n\n{}\n\n{}\n\nLinks:\n{}\nJoints:\n{}\n\"\"\".format(task_name, task_description, initial_config, articulation_tree, semantics, involved_links, involved_joints)\n \n filled_user_contents = copy.deepcopy(user_contents)\n filled_user_contents[-1] = filled_user_contents[-1] + query_task\n\n system = \"You are a helpful assistant.\"\n reward_response = query(system, filled_user_contents, assistant_contents, save_path=save_path, debug=False, \n temperature=temperature, model=model)\n res = reward_response.split(\"\\n\")\n\n substeps = []\n substep_types = []\n reward_or_primitives = []\n action_spaces = []\n\n num_lines = len(res)\n for l_idx, line in enumerate(res):\n line = line.lower()\n if line.startswith(\"substep\"):\n substep_name = line.split(\":\")[1]\n substeps.append(substep_name)\n\n py_start_idx, py_end_idx = l_idx, l_idx\n for l_idx_2 in range(l_idx + 1, num_lines):\n ### this is a reward\n if res[l_idx_2].lower().startswith(\"```reward\"):\n substep_types.append(\"reward\")\n py_start_idx = l_idx_2 + 1\n for l_idx_3 in range(l_idx_2 + 1, num_lines):\n if \"```\" in res[l_idx_3]:\n py_end_idx = l_idx_3\n break\n \n if res[l_idx_2].lower().startswith(\"```primitive\"):\n substep_types.append(\"primitive\")\n action_spaces.append(\"None\")\n py_start_idx = l_idx_2 + 1\n for l_idx_3 in range(l_idx_2 + 1, num_lines):\n if \"```\" in res[l_idx_3]:\n py_end_idx = l_idx_3\n break\n break\n\n if res[l_idx_2].lower().startswith(\"```action space\"):\n action_space = res[l_idx_2 + 1]\n action_spaces.append(action_space)\n break\n\n reward_or_primitive_lines = res[py_start_idx:py_end_idx]\n reward_or_primitive_lines = [line.lstrip() for line in reward_or_primitive_lines]\n if substep_types[-1] == 'reward':\n reward_or_primitive_lines[0] = \" \" + reward_or_primitive_lines[0]\n for idx in range(1, len(reward_or_primitive_lines)):\n reward_or_primitive_lines[idx] = \" \" + reward_or_primitive_lines[idx]\n else:\n for idx in range(0, len(reward_or_primitive_lines)):\n reward_or_primitive_lines[idx] = \" \" + reward_or_primitive_lines[idx]\n reward_or_primitive = \"\\n\".join(reward_or_primitive_lines) + \"\\n\"\n\n reward_or_primitives.append(reward_or_primitive)\n\n task_name = task_name.replace(\" \", \"_\")\n parent_folder = os.path.dirname(os.path.dirname(save_path))\n task_save_path = os.path.join(parent_folder, \"task_{}\".format(task_name))\n if not os.path.exists(task_save_path):\n os.makedirs(task_save_path)\n\n print(\"substep: \", substeps)\n print(\"substep types: \", substep_types)\n print(\"reward or primitives: \", reward_or_primitives)\n print(\"action spaces: \", action_spaces)\n\n with open(os.path.join(task_save_path, \"substeps.txt\"), \"w\") as f:\n f.write(\"\\n\".join(substeps))\n with open(os.path.join(task_save_path, \"substep_types.txt\"), \"w\") as f:\n f.write(\"\\n\".join(substep_types))\n with open(os.path.join(task_save_path, \"action_spaces.txt\"), \"w\") as f:\n f.write(\"\\n\".join(action_spaces))\n with open(os.path.join(task_save_path, \"config_path.txt\"), \"w\") as f:\n f.write(yaml_config_path)\n\n for idx, (substep, type, reward_or_primitive) in enumerate(zip(substeps, substep_types, reward_or_primitives)):\n substep = substep.lstrip().replace(\" \", \"_\")\n substep = substep.replace(\"'\", \"\")\n file_name = os.path.join(task_save_path, f\"{substep}.py\")\n\n if type == 'reward':\n header = reward_file_header1.format(substep)\n end = reward_file_end.format(substep, substep)\n file_content = header + reward_file_header2 + reward_or_primitive + end\n with open(file_name, \"w\") as f:\n f.write(file_content)\n elif type == 'primitive':\n header = primitive_file_header1.format(substep)\n end = primitive_file_end.format(substep, substep)\n file_content = header + primitive_file_header2 + reward_or_primitive + end\n with open(file_name, \"w\") as f:\n f.write(file_content)\n\n return task_save_path" }, { "identifier": "query_joint_angle", "path": "gpt_4/prompts/prompt_set_joint_angle.py", "snippet": "def query_joint_angle(task_name, task_description, articulation_tree, semantics, links, joints, substeps, save_path=None, \n temperature=0.1, model='gpt-4'):\n input = \"\"\"\nTask Name: {}\nDescription: {}\n\n{}\n\n{}\n\nLinks:\n{}\n\nJoints:\n{}\n\nsubsteps:\n{}\n\"\"\".format(task_name, task_description, articulation_tree, semantics, links, joints, \"\".join(substeps))\n \n new_user_contents = copy.deepcopy(user_contents)\n new_user_contents[0] = new_user_contents[0] + input\n\n if save_path is None:\n save_path = 'data/debug/{}_joint_angle.json'.format(input_task_name.replace(\" \", \"_\"))\n\n system = \"You are a helpful assistant.\"\n response = query(system, new_user_contents, assistant_contents, save_path=save_path, temperature=temperature, model=model)\n\n # TODO: parse the response to get the joint angles\n response = response.split(\"\\n\")\n\n joint_values = {}\n for l_idx, line in enumerate(response):\n if line.lower().startswith(\"```joint values\"):\n for l_idx_2 in range(l_idx+1, len(response)):\n if response[l_idx_2].lower().startswith(\"```\"):\n break\n if response[l_idx_2].lower().strip() == \"none\":\n continue\n joint_name, joint_value = response[l_idx_2].split(\":\")\n joint_values[joint_name.strip().lstrip()] = joint_value.strip().lstrip()\n\n return joint_values" }, { "identifier": "query_spatial_relationship", "path": "gpt_4/prompts/prompt_spatial_relationship.py", "snippet": "def query_spatial_relationship(task_name, task_description, involved_objects, articulation_tree, semantics, links, joints, substeps, save_path=None, \n temperature=0.1, model='gpt-4'):\n input = \"\"\"\nTask Name: {}\nDescription: {}\nObjects involved: {}\n\n{}\n\n{}\n\nLinks:\n{}\n\nJoints:\n{}\n\nsubsteps:\n{}\n\"\"\".format(task_name, task_description, involved_objects, articulation_tree, semantics, links, joints, \"\".join(substeps))\n \n new_user_contents = copy.deepcopy(user_contents)\n new_user_contents[0] = new_user_contents[0] + input\n\n if save_path is None:\n save_path = 'data/debug/{}_joint_angle.json'.format(input_task_name.replace(\" \", \"_\"))\n\n system = \"You are a helpful assistant.\"\n response = query(system, new_user_contents, assistant_contents, save_path=save_path, temperature=temperature, model=model)\n\n # TODO: parse the response to get the joint angles\n response = response.split(\"\\n\")\n\n spatial_relationships = []\n for l_idx, line in enumerate(response):\n if line.lower().startswith(\"```spatial relationship\"):\n for l_idx_2 in range(l_idx+1, len(response)):\n if response[l_idx_2].lower().startswith(\"```\"):\n break\n if response[l_idx_2].lower().strip() == \"none\":\n continue\n spatial_relationships.append(response[l_idx_2].strip().lstrip().lower())\n\n return spatial_relationships" }, { "identifier": "query", "path": "gpt_4/query.py", "snippet": "def query(system, user_contents, assistant_contents, model='gpt-4', save_path=None, temperature=1, debug=False):\n \n for user_content, assistant_content in zip(user_contents, assistant_contents):\n user_content = user_content.split(\"\\n\")\n assistant_content = assistant_content.split(\"\\n\")\n \n for u in user_content:\n print(u)\n print(\"=====================================\")\n for a in assistant_content:\n print(a)\n print(\"=====================================\")\n\n for u in user_contents[-1].split(\"\\n\"):\n print(u)\n\n if debug:\n import pdb; pdb.set_trace()\n return None\n\n print(\"=====================================\")\n\n start = time.time()\n \n num_assistant_mes = len(assistant_contents)\n messages = []\n\n messages.append({\"role\": \"system\", \"content\": \"{}\".format(system)})\n for idx in range(num_assistant_mes):\n messages.append({\"role\": \"user\", \"content\": user_contents[idx]})\n messages.append({\"role\": \"assistant\", \"content\": assistant_contents[idx]})\n messages.append({\"role\": \"user\", \"content\": user_contents[-1]})\n\n openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature\n )\n\n result = ''\n for choice in response.choices: \n result += choice.message.content \n\n end = time.time()\n used_time = end - start\n\n print(result)\n if save_path is not None:\n with open(save_path, \"w\") as f:\n json.dump({\"used_time\": used_time, \"res\": result, \"system\": system, \"user\": user_contents, \"assistant\": assistant_contents}, f, indent=4)\n\n return result" }, { "identifier": "adjust_size_v2", "path": "gpt_4/adjust_size.py", "snippet": "def adjust_size_v2(task_description, yaml_string, save_path, temperature=0.2, model='gpt-4'):\n # extract object names and sizes\n object_names = []\n object_sizes = []\n object_types = []\n\n config = yaml.safe_load(yaml_string)\n for obj in config:\n if \"name\" in obj:\n object_names.append(obj['name'].lower())\n object_types.append(obj['type'])\n if obj['type'] == 'mesh' or obj['type'] == 'urdf' or obj['type'] == 'sphere':\n object_sizes.append(obj['size'])\n if obj['type'] in ['cylinder', 'cube', 'box']:\n if isinstance(obj['size'], list):\n object_sizes.append([str(x) for x in obj[\"size\"]])\n else:\n object_sizes.append([str(x) for x in parse_center(obj['size'])])\n \n new_user_contents = \"```\\n\"\n better_task_description = re.sub(r'\\d', '', task_description)\n better_task_description = better_task_description.replace(\"_\", \" \")\n better_task_description = better_task_description.lstrip()\n better_task_description = better_task_description.strip()\n new_user_contents += \"Task: {}\\n\".format(better_task_description)\n for name, type, size in zip(object_names, object_types, object_sizes):\n if type in ['mesh', 'urdf', 'sphere']:\n new_user_contents += \"{}, {}, {}\\n\".format(name, type, size)\n else:\n new_content = \"{}, {}, \".format(name, type)\n size_string = \", \".join(size)\n new_content = new_content + size_string + \"\\n\"\n new_user_contents += new_content\n new_user_contents += \"```\"\n input_user = copy.deepcopy(scale_user_contents_v2)\n input_user.append(new_user_contents)\n\n system = \"You are a helpful assistant.\"\n response = query(system, input_user, scale_assistant_contents_v2, save_path=save_path, debug=False, temperature=temperature, model=model)\n\n response = response.split('\\n')\n\n corrected_names = []\n corrected_sizes = []\n for idx, line in enumerate(response):\n if \"```yaml\" in line:\n for idx2 in range(idx+1, len(response)):\n line2 = response[idx2]\n if \"```\" in line2:\n break\n line2 = line2.split(\", \")\n corrected_names.append(line2[0].lower())\n sizes = line2[2:]\n if len(sizes) > 1:\n corrected_sizes.append([float(x) for x in sizes])\n else:\n corrected_sizes.append(float(sizes[0]))\n \n # replace the size in yaml\n for obj in config:\n if 'type' in obj:\n if obj['type'] == 'mesh' or obj['type'] == 'urdf':\n obj['size'] = corrected_sizes[corrected_names.index(obj['name'].lower())]\n\n return config" } ]
import copy import os import yaml from gpt_4.prompts.prompt_manipulation_reward_primitive import decompose_and_generate_reward_or_primitive from gpt_4.prompts.prompt_set_joint_angle import query_joint_angle from gpt_4.prompts.prompt_spatial_relationship import query_spatial_relationship from gpt_4.query import query from gpt_4.adjust_size import adjust_size_v2
6,421
joints = [] task_response = task_response.split("\n") for l_idx, line in enumerate(task_response): if line.lower().startswith("task name:"): task_name = line.split(":")[1].strip() task_name = task_name.replace("/", " or ").replace(".", "").replace("'", "").replace('"', "") task_names.append(task_name) task_description = task_response[l_idx+1].split(":")[1].strip() task_description = task_description.replace("/", " or ").replace(".", "").replace("'", "").replace('"', "").replace(")", ".").replace("(", ".") task_descriptions.append(task_description) additional_objects.append(task_response[l_idx+2].split(":")[1].strip()) involved_links = "" for link_idx in range(l_idx+4, len(task_response)): if task_response[link_idx].lower().startswith("joints:"): break else: # involved_links.append(task_response[link_idx].split(":")[0][2:]) involved_links += (task_response[link_idx][2:]) links.append(involved_links) involved_joints = "" for joint_idx in range(link_idx+1, len(task_response)): if not task_response[joint_idx].lower().startswith("- "): break else: # involved_joints.append(task_response[joint_idx].split(":")[0][2:]) involved_joints += (task_response[joint_idx][2:]) joints.append(involved_joints) return task_names, task_descriptions, additional_objects, links, joints def build_task_given_text(object_category, task_name, task_description, additional_object, involved_links, involved_joints, articulation_tree_filled, semantics_filled, object_path, save_folder, temperature_dict, model_dict=None): if model_dict is None: model_dict = { "task_generation": "gpt-4", "reward": "gpt-4", "yaml": "gpt-4", "size": "gpt-4", "joint": "gpt-4", "spatial_relationship": "gpt-4" } task_yaml_config_prompt_filled = copy.deepcopy(task_yaml_config_prompt) if additional_object.lower() == "none": task_object = object_category else: task_object = "{}, {}".format(object_category, additional_object) task_yaml_config_prompt_filled = task_yaml_config_prompt_filled.format(task_name, task_description, task_object) task_yaml_config_prompt_filled += articulation_tree_filled + semantics_filled system = "You are a helpful assistant." save_path = os.path.join(save_folder, "gpt_response/task_yaml_config_{}.json".format(task_name)) print("=" * 50) print("=" * 20, "generating task yaml config", "=" * 20) print("=" * 50) task_yaml_response = query(system, [task_yaml_config_prompt_filled], [], save_path=save_path, debug=False, temperature=temperature_dict["yaml"], model=model_dict["yaml"]) # NOTE: parse the yaml file and generate the task in the simulator. description = f"{task_name}_{task_description}".replace(" ", "_").replace(".", "").replace(",", "") task_yaml_response = task_yaml_response.split("\n") size_save_path = os.path.join(save_folder, "gpt_response/size_{}.json".format(task_name)) parsed_yaml, save_name = parse_response_to_get_yaml(task_yaml_response, description, save_path=size_save_path, temperature=temperature_dict["size"], model=model_dict["size"]) # NOTE: post-process such that articulated object is urdf. # NOTE: post-process to include the reward asset path for reward generation. for obj in parsed_yaml: if "name" in obj and obj['name'] == object_category: obj['type'] = 'urdf' obj['reward_asset_path'] = object_path # config_path = "gpt_4/data/parsed_configs_semantic_articulated/{}-{}".format(object_category, time_string) config_path = save_folder with open(os.path.join(config_path, save_name), 'w') as f: yaml.dump(parsed_yaml, f, indent=4) input_to_reward_config = copy.deepcopy(parsed_yaml) for obj in input_to_reward_config: if "reward_asset_path" in obj: input_to_reward_config.remove(obj) initial_config = yaml.safe_dump(parsed_yaml) ### decompose and generate reward yaml_file_path = os.path.join(config_path, save_name) reward_save_path = os.path.join(save_folder, "gpt_response/reward_{}.json".format(task_name)) print("=" * 50) print("=" * 20, "generating reward", "=" * 20) print("=" * 50) solution_path = decompose_and_generate_reward_or_primitive(task_name, task_description, initial_config, articulation_tree_filled, semantics_filled, involved_links, involved_joints, object_path, yaml_file_path, save_path=reward_save_path, temperature=temperature_dict["reward"], model=model_dict["reward"]) ### generate joint angle save_path = os.path.join(save_folder, "gpt_response/joint_angle_{}.json".format(task_name)) substep_file_path = os.path.join(solution_path, "substeps.txt") with open(substep_file_path, 'r') as f: substeps = f.readlines() print("=" * 50) print("=" * 20, "generating initial joint angle", "=" * 20) print("=" * 50) joint_angle_values = query_joint_angle(task_name, task_description, articulation_tree_filled, semantics_filled, involved_links, involved_joints, substeps, save_path=save_path, temperature=temperature_dict['joint'], model=model_dict["joint"]) joint_angle_values["set_joint_angle_object_name"] = object_category involved_objects = [] config = yaml.safe_load(initial_config) for obj in config: if "name" in obj: involved_objects.append(obj["name"]) involved_objects = ", ".join(involved_objects) save_path = os.path.join(save_folder, "gpt_response/spatial_relationships_{}.json".format(task_name)) print("=" * 50) print("=" * 20, "generating initial spatial relationship", "=" * 20) print("=" * 50)
task_yaml_config_prompt = """ I need you to describe the initial scene configuration for a given task in the following format, using a yaml file. This yaml file will help build the task in a simulator. The task is for a mobile Franka panda robotic arm to learn a manipulation skill in the simulator. The Franka panda arm is mounted on a floor, at location (1, 1, 0). It can move freely on the floor. The z axis is the gravity axis. The format is as follows: ```yaml - use_table: whether the task requires using a table. This should be decided based on common sense. If a table is used, its location will be fixed at (0, 0, 0). The height of the table will be 0.6m. Usually, if the objects invovled in the task are usually placed on a table (not directly on the ground), then the task requires using a table. # for each object involved in the task, we need to specify the following fields for it. - type: mesh name: name of the object, so it can be referred to in the simulator size: describe the scale of the object mesh using 1 number in meters. The scale should match real everyday objects. E.g., an apple is of scale 0.08m. You can think of the scale to be the longest dimension of the object. lang: this should be a language description of the mesh. The language should be a concise description of the obejct, such that the language description can be used to search an existing database of objects to find the object. path: this can be a string showing the path to the mesh of the object. on_table: whether the object needs to be placed on the table (if there is a table needed for the task). This should be based on common sense and the requirement of the task. E.g., a microwave is usually placed on the table. center: the location of the object center. If there isn't a table needed for the task or the object does not need to be on the table, this center should be expressed in the world coordinate system. If there is a table in the task and the object needs to be placed on the table, this center should be expressed in terms of the table coordinate, where (0, 0, 0) is the lower corner of the table, and (1, 1, 1) is the higher corner of the table. In either case, you should try to specify a location such that there is no collision between objects. movable: if the object is movable or not in the simulator due to robot actions. This option should be falsed for most tasks; it should be true only if the task specifically requires the robot to move the object. This value can also be missing, which means the object is not movable. ``` An example input includes the task names, task descriptions, and objects involved in the task. I will also provide with you the articulation tree and semantics of the articulated object. This can be useful for knowing what parts are already in the articulated object, and thus you do not need to repeat those parts as separate objects in the yaml file. Your task includes two parts: 1. Output the yaml configuration of the task. 2. Sometimes, the task description / objects involved will refer to generic/placeholder objects, e.g., to place an "item" into the drawer, and to heat "food" in the microwave. In the generated yaml config, you should change these placeholder objects to be concrete objects in the lang field, e.g., change "item" to be a toy or a pencil, and "food" to be a hamburger, a bowl of soup, etc. Example input: Task Name: Insert Bread Slice Description: The robotic arm will insert a bread slice into the toaster. Objects involved: Toaster, bread slice. Only the objects specified here should be included in the yaml file. ```Toaster articulation tree links: base link_0 link_1 link_2 link_3 link_4 link_5 joints: joint_name: joint_0 joint_type: continuous parent_link: link_5 child_link: link_0 joint_name: joint_1 joint_type: prismatic parent_link: link_5 child_link: link_1 joint_name: joint_2 joint_type: prismatic parent_link: link_5 child_link: link_2 joint_name: joint_3 joint_type: prismatic parent_link: link_5 child_link: link_3 joint_name: joint_4 joint_type: prismatic parent_link: link_5 child_link: link_4 joint_name: joint_5 joint_type: fixed parent_link: base child_link: link_5 ``` ```Toaster semantics link_0 hinge knob link_1 slider slider link_2 slider button link_3 slider button link_4 slider button link_5 free toaster_body ``` An example output: ```yaml - use_table: True ### Toaster and bread are usually put on a table. - type: mesh name: "Toaster" on_table: True # Toasters are usually put on a table. center: (0.1, 0.1, 0) # Remember that when an object is placed on the table, the center is expressed in the table coordinate, where (0, 0, 0) is the lower corner and (1, 1, 1) is the higher corner of the table. Here we put the toaster near the lower corner of the table. size: 0.35 # the size of a toaster is roughly 0.35m lang: "a common toaster" path: "toaster.urdf" - type: mesh name: "bread slice" on_table: True # Bread is usually placed on the table as well. center: (0.8, 0.7, 0) # Remember that when an object is placed on the table, the center is expressed in the table coordinate, where (0, 0, 0) is the lower corner and (1, 1, 1) is the higher corner of the table. Here we put the bread slice near the higher corner of the table. size: 0.1 # common size of a bread slice lang: "a slice of bread" Path: "bread_slice.obj" ``` Another example input: Task Name: Removing Lid From Pot Description: The robotic arm will remove the lid from the pot. Objects involved: KitchenPot. Only the objects specified here should be included in the yaml file. ```KitchenPot articulation tree links: base link_0 link_1 joints: joint_name: joint_0 joint_type: prismatic parent_link: link_1 child_link: link_0 joint_name: joint_1 joint_type: fixed parent_link: base child_link: link_1 ``` ```KitchenPot semantics link_0 slider lid link_1 free pot_body ``` Output: ```yaml - use_table: True # A kitchen pot is usually placed on the table. - type: mesh name: "KitchenPot" on_table: True # kitchen pots are usually placed on a table. center: (0.3, 0.6, 0) # Remember that when an object is placed on the table, the center is expressed in the table coordinate, where (0, 0, 0) is the lower corner and (1, 1, 1) is the higher corner of the table. Here we put the kitchen pot just at a random location on the table. size: 0.28 # the size of a common kitchen pot is roughly 0.28m lang: "a common kitchen pot" path: "kitchen_pot.urdf" ``` Note in this example, the kitchen pot already has a lid from the semantics file. Therefore, you do not need to include a separate lid in the yaml file. One more example input: Task Name: Push the chair. Description: The robotic arm will push and move the chair to a target location. Objects involved: A chair. Only the objects here should be included in the yaml file. ```Chair articulation tree links: base link_0 link_1 joints: joint_name: joint_0 joint_type: revolute parent_link: link_1 child_link: link_0 joint_name: joint_1 joint_type: fixed parent_link: base child_link: link_1 ``` ```Chair semantics link_0 hinge seat link_1 free leg ``` Output: ```yaml - use_table: False # A chair is usually just on the ground - type: mesh name: "Chair" on_table: False # An oven is usually just placed on the floor. center: (1.0, 0, 0) # Remember that when not on a table, the center is expressed in the world coordinate. Since the robot is at (1, 1, 0) and the table is at (0, 0, 0), we place the oven at (1.8, 2, 0) to avoid collision with the table and the robot. size: 1.2 # the size of an oven is roughly 0.9m lang: "a standard chair" path: "chair.urdf" movable: True # here the task requires the robot to push the chair, so the chair has to be moveable. ``` Note in the above example we set the chair to be moveable so the robot can push it for executing the task. Another example: Task Name: Put an item into the box drawer Description: The robot will open the drawer of the box, and put an item into it. Objects involved: A box with drawer, an item to be placed in the drawer. ```Box articulation tree links: base link_0 link_1 link_2 joints: joint_name: joint_0 joint_type: revolute parent_link: link_2 child_link: link_0 joint_name: joint_1 joint_type: prismatic parent_link: link_2 child_link: link_1 joint_name: joint_2 joint_type: fixed parent_link: base child_link: link_2 ``` ```Box semantics link_0 hinge rotation_lid link_1 slider drawer link_2 free box_body ``` Output: ```yaml - use_table: true - center: (0.5, 0.5, 0) lang: "a wooden box" name: "Box" on_table: true path: "box.urdf" size: 0.3 type: urdf - path: "item.obj" center: (0.2, 0.4, 0) lang: "A toy" # Note here, we changed the generic/placeholder "item" object to be a more concrete object: a toy. name: "Item" on_table: true size: 0.05 type: mesh ``` One more example: Task Name: Fetch item from refrigerator Description: The robot will open the refrigerator door, and fetch an item from the refrigerator. Objects involved: A refrigerator, an item to be fetched from the refrigerator. ```Refirgerator articulation tree links: base link_0 link_1 link_2 joints: joint_name: joint_0 joint_type: fixed parent_link: base child_link: link_0 joint_name: joint_1 joint_type: revolute parent_link: link_0 child_link: link_1 joint_name: joint_2 joint_type: revolute parent_link: link_0 child_link: link_2 ``` ```Refrigerator semantics link_0 heavy refrigerator_body link_1 hinge door link_2 hinge door ``` Output: ```yaml - use_table: true # the fetched item should be placed on the table, after it's moved out of the refrigerator. - center: (1.0, 0.2, 0) # Remember that when not on a table, the center is expressed in the world coordinate. Since the robot is at (1, 1, 0) and the table is at (0, 0, 0), we place the oven at (1.8, 2, 0) to avoid collision with the table and the robot. lang: a common two-door refrigerator name: Refrigerator on_table: false # the refrigerator is usually placed on the floor. path: refrigerator.urdf reward_asset_path: '10612' size: 1.8 type: urdf - center: (1.0, 0.2, 0.5) # the soda can is initially placed inside the refrigerator. lang: a can of soda name: Item on_table: false # the item is initially placed inside the refrigerator path: soda_can.obj size: 0.2 type: mesh ``` Rules: - You do not need to include the robot in the yaml file. - The yaml file should only include the objects listed in "Objects involved". - Sometimes, the task description / objects involved will refer to generic/placeholder objects, e.g., to place an "item" into the drawer, and to heat "food" in the microwave. In the generated yaml config, you should change these placeholder objects to be concrete objects in the lang field, e.g., change "item" to be a toy or a pencil, and "food" to be a hamburger, a bowl of soup, etc. Can you do this for the following task: Task Name: {} Description: {} Objects involved: {} """ def parse_response_to_get_yaml(response, task_description, save_path, temperature=0.2, model='gpt-4'): yaml_string = [] for l_idx, line in enumerate(response): if "```yaml" in line: for l_idx_2 in range(l_idx + 1, len(response)): if response[l_idx_2].lstrip().startswith("```"): break yaml_string.append(response[l_idx_2]) yaml_string = '\n'.join(yaml_string) description = f"{task_description}".replace(" ", "_").replace(".", "").replace(",", "").replace("(", "").replace(")", "") save_name = description + '.yaml' print("=" * 30) print("querying GPT to adjust the size of the objects") print("=" * 30) parsed_size_yaml = adjust_size_v2(description, yaml_string, save_path, temperature, model=model) return parsed_size_yaml, save_name def parse_task_response(task_response): task_names = [] task_descriptions = [] additional_objects = [] links = [] joints = [] task_response = task_response.split("\n") for l_idx, line in enumerate(task_response): if line.lower().startswith("task name:"): task_name = line.split(":")[1].strip() task_name = task_name.replace("/", " or ").replace(".", "").replace("'", "").replace('"', "") task_names.append(task_name) task_description = task_response[l_idx+1].split(":")[1].strip() task_description = task_description.replace("/", " or ").replace(".", "").replace("'", "").replace('"', "").replace(")", ".").replace("(", ".") task_descriptions.append(task_description) additional_objects.append(task_response[l_idx+2].split(":")[1].strip()) involved_links = "" for link_idx in range(l_idx+4, len(task_response)): if task_response[link_idx].lower().startswith("joints:"): break else: # involved_links.append(task_response[link_idx].split(":")[0][2:]) involved_links += (task_response[link_idx][2:]) links.append(involved_links) involved_joints = "" for joint_idx in range(link_idx+1, len(task_response)): if not task_response[joint_idx].lower().startswith("- "): break else: # involved_joints.append(task_response[joint_idx].split(":")[0][2:]) involved_joints += (task_response[joint_idx][2:]) joints.append(involved_joints) return task_names, task_descriptions, additional_objects, links, joints def build_task_given_text(object_category, task_name, task_description, additional_object, involved_links, involved_joints, articulation_tree_filled, semantics_filled, object_path, save_folder, temperature_dict, model_dict=None): if model_dict is None: model_dict = { "task_generation": "gpt-4", "reward": "gpt-4", "yaml": "gpt-4", "size": "gpt-4", "joint": "gpt-4", "spatial_relationship": "gpt-4" } task_yaml_config_prompt_filled = copy.deepcopy(task_yaml_config_prompt) if additional_object.lower() == "none": task_object = object_category else: task_object = "{}, {}".format(object_category, additional_object) task_yaml_config_prompt_filled = task_yaml_config_prompt_filled.format(task_name, task_description, task_object) task_yaml_config_prompt_filled += articulation_tree_filled + semantics_filled system = "You are a helpful assistant." save_path = os.path.join(save_folder, "gpt_response/task_yaml_config_{}.json".format(task_name)) print("=" * 50) print("=" * 20, "generating task yaml config", "=" * 20) print("=" * 50) task_yaml_response = query(system, [task_yaml_config_prompt_filled], [], save_path=save_path, debug=False, temperature=temperature_dict["yaml"], model=model_dict["yaml"]) # NOTE: parse the yaml file and generate the task in the simulator. description = f"{task_name}_{task_description}".replace(" ", "_").replace(".", "").replace(",", "") task_yaml_response = task_yaml_response.split("\n") size_save_path = os.path.join(save_folder, "gpt_response/size_{}.json".format(task_name)) parsed_yaml, save_name = parse_response_to_get_yaml(task_yaml_response, description, save_path=size_save_path, temperature=temperature_dict["size"], model=model_dict["size"]) # NOTE: post-process such that articulated object is urdf. # NOTE: post-process to include the reward asset path for reward generation. for obj in parsed_yaml: if "name" in obj and obj['name'] == object_category: obj['type'] = 'urdf' obj['reward_asset_path'] = object_path # config_path = "gpt_4/data/parsed_configs_semantic_articulated/{}-{}".format(object_category, time_string) config_path = save_folder with open(os.path.join(config_path, save_name), 'w') as f: yaml.dump(parsed_yaml, f, indent=4) input_to_reward_config = copy.deepcopy(parsed_yaml) for obj in input_to_reward_config: if "reward_asset_path" in obj: input_to_reward_config.remove(obj) initial_config = yaml.safe_dump(parsed_yaml) ### decompose and generate reward yaml_file_path = os.path.join(config_path, save_name) reward_save_path = os.path.join(save_folder, "gpt_response/reward_{}.json".format(task_name)) print("=" * 50) print("=" * 20, "generating reward", "=" * 20) print("=" * 50) solution_path = decompose_and_generate_reward_or_primitive(task_name, task_description, initial_config, articulation_tree_filled, semantics_filled, involved_links, involved_joints, object_path, yaml_file_path, save_path=reward_save_path, temperature=temperature_dict["reward"], model=model_dict["reward"]) ### generate joint angle save_path = os.path.join(save_folder, "gpt_response/joint_angle_{}.json".format(task_name)) substep_file_path = os.path.join(solution_path, "substeps.txt") with open(substep_file_path, 'r') as f: substeps = f.readlines() print("=" * 50) print("=" * 20, "generating initial joint angle", "=" * 20) print("=" * 50) joint_angle_values = query_joint_angle(task_name, task_description, articulation_tree_filled, semantics_filled, involved_links, involved_joints, substeps, save_path=save_path, temperature=temperature_dict['joint'], model=model_dict["joint"]) joint_angle_values["set_joint_angle_object_name"] = object_category involved_objects = [] config = yaml.safe_load(initial_config) for obj in config: if "name" in obj: involved_objects.append(obj["name"]) involved_objects = ", ".join(involved_objects) save_path = os.path.join(save_folder, "gpt_response/spatial_relationships_{}.json".format(task_name)) print("=" * 50) print("=" * 20, "generating initial spatial relationship", "=" * 20) print("=" * 50)
spatial_relationships = query_spatial_relationship(task_name, task_description, involved_objects, articulation_tree_filled, semantics_filled,
2
2023-10-31 19:44:09+00:00
8k
junhoyeo/BetterOCR
betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/brainocr.py
[ { "identifier": "get_detector", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/detection.py", "snippet": "def get_detector(det_model_ckpt_fp: str, device: str = \"cpu\"):\n net = CRAFT()\n\n net.load_state_dict(\n copy_state_dict(torch.load(det_model_ckpt_fp, map_location=device))\n )\n if device == \"cuda\":\n net = torch.nn.DataParallel(net).to(device)\n cudnn.benchmark = False\n\n net.eval()\n return net" }, { "identifier": "get_textbox", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/detection.py", "snippet": "def get_textbox(detector, image: np.ndarray, opt2val: dict):\n bboxes, polys = test_net(image, detector, opt2val)\n result = []\n for i, box in enumerate(polys):\n poly = np.array(box).astype(np.int32).reshape((-1))\n result.append(poly)\n\n return result" }, { "identifier": "get_recognizer", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/recognition.py", "snippet": "def get_recognizer(opt2val: dict):\n \"\"\"\n :return:\n recognizer: recognition net\n converter: CTCLabelConverter\n \"\"\"\n # converter\n vocab = opt2val[\"vocab\"]\n converter = CTCLabelConverter(vocab)\n\n # recognizer\n recognizer = Model(opt2val)\n\n # state_dict\n rec_model_ckpt_fp = opt2val[\"rec_model_ckpt_fp\"]\n device = opt2val[\"device\"]\n state_dict = torch.load(rec_model_ckpt_fp, map_location=device)\n\n if device == \"cuda\":\n recognizer = torch.nn.DataParallel(recognizer).to(device)\n else:\n # TODO temporary: multigpu 학습한 뒤 ckpt loading 문제\n from collections import OrderedDict\n\n def _sync_tensor_name(state_dict):\n state_dict_ = OrderedDict()\n for name, val in state_dict.items():\n name = name.replace(\"module.\", \"\")\n state_dict_[name] = val\n return state_dict_\n\n state_dict = _sync_tensor_name(state_dict)\n\n recognizer.load_state_dict(state_dict)\n\n return recognizer, converter" }, { "identifier": "get_text", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/recognition.py", "snippet": "def get_text(image_list, recognizer, converter, opt2val: dict):\n imgW = opt2val[\"imgW\"]\n imgH = opt2val[\"imgH\"]\n adjust_contrast = opt2val[\"adjust_contrast\"]\n batch_size = opt2val[\"batch_size\"]\n n_workers = opt2val[\"n_workers\"]\n contrast_ths = opt2val[\"contrast_ths\"]\n\n # TODO: figure out what is this for\n # batch_max_length = int(imgW / 10)\n\n coord = [item[0] for item in image_list]\n img_list = [item[1] for item in image_list]\n AlignCollate_normal = AlignCollate(imgH, imgW, adjust_contrast)\n test_data = ListDataset(img_list)\n test_loader = torch.utils.data.DataLoader(\n test_data,\n batch_size=batch_size,\n shuffle=False,\n num_workers=n_workers,\n collate_fn=AlignCollate_normal,\n pin_memory=True,\n )\n\n # predict first round\n result1 = recognizer_predict(recognizer, converter, test_loader, opt2val)\n\n # predict second round\n low_confident_idx = [\n i for i, item in enumerate(result1) if (item[1] < contrast_ths)\n ]\n if len(low_confident_idx) > 0:\n img_list2 = [img_list[i] for i in low_confident_idx]\n AlignCollate_contrast = AlignCollate(imgH, imgW, adjust_contrast)\n test_data = ListDataset(img_list2)\n test_loader = torch.utils.data.DataLoader(\n test_data,\n batch_size=batch_size,\n shuffle=False,\n num_workers=n_workers,\n collate_fn=AlignCollate_contrast,\n pin_memory=True,\n )\n result2 = recognizer_predict(recognizer, converter, test_loader, opt2val)\n\n result = []\n for i, zipped in enumerate(zip(coord, result1)):\n box, pred1 = zipped\n if i in low_confident_idx:\n pred2 = result2[low_confident_idx.index(i)]\n if pred1[1] > pred2[1]:\n result.append((box, pred1[0], pred1[1]))\n else:\n result.append((box, pred2[0], pred2[1]))\n else:\n result.append((box, pred1[0], pred1[1]))\n\n return result" }, { "identifier": "diff", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/utils.py", "snippet": "def diff(input_list):\n return max(input_list) - min(input_list)" }, { "identifier": "get_image_list", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/utils.py", "snippet": "def get_image_list(\n horizontal_list: list, free_list: list, img: np.ndarray, model_height: int = 64\n):\n image_list = []\n maximum_y, maximum_x = img.shape\n\n max_ratio_hori, max_ratio_free = 1, 1\n for box in free_list:\n rect = np.array(box, dtype=\"float32\")\n transformed_img = four_point_transform(img, rect)\n ratio = transformed_img.shape[1] / transformed_img.shape[0]\n crop_img = cv2.resize(\n transformed_img,\n (int(model_height * ratio), model_height),\n interpolation=Image.LANCZOS,\n )\n # box : [[x1,y1],[x2,y2],[x3,y3],[x4,y4]]\n image_list.append((box, crop_img))\n max_ratio_free = max(ratio, max_ratio_free)\n\n max_ratio_free = math.ceil(max_ratio_free)\n\n for box in horizontal_list:\n x_min = max(0, box[0])\n x_max = min(box[1], maximum_x)\n y_min = max(0, box[2])\n y_max = min(box[3], maximum_y)\n crop_img = img[y_min:y_max, x_min:x_max]\n width = x_max - x_min\n height = y_max - y_min\n ratio = width / height\n crop_img = cv2.resize(\n crop_img,\n (int(model_height * ratio), model_height),\n interpolation=Image.LANCZOS,\n )\n image_list.append(\n (\n [\n [x_min, y_min],\n [x_max, y_min],\n [x_max, y_max],\n [x_min, y_max],\n ],\n crop_img,\n )\n )\n max_ratio_hori = max(ratio, max_ratio_hori)\n\n max_ratio_hori = math.ceil(max_ratio_hori)\n max_ratio = max(max_ratio_hori, max_ratio_free)\n max_width = math.ceil(max_ratio) * model_height\n\n image_list = sorted(\n image_list, key=lambda item: item[0][0][1]\n ) # sort by vertical position\n return image_list, max_width" }, { "identifier": "get_paragraph", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/utils.py", "snippet": "def get_paragraph(raw_result, x_ths: int = 1, y_ths: float = 0.5, mode: str = \"ltr\"):\n # create basic attributes\n box_group = []\n for box in raw_result:\n all_x = [int(coord[0]) for coord in box[0]]\n all_y = [int(coord[1]) for coord in box[0]]\n min_x = min(all_x)\n max_x = max(all_x)\n min_y = min(all_y)\n max_y = max(all_y)\n height = max_y - min_y\n box_group.append(\n [box[1], min_x, max_x, min_y, max_y, height, 0.5 * (min_y + max_y), 0]\n ) # last element indicates group\n # cluster boxes into paragraph\n current_group = 1\n while len([box for box in box_group if box[7] == 0]) > 0:\n # group0 = non-group\n box_group0 = [box for box in box_group if box[7] == 0]\n # new group\n if len([box for box in box_group if box[7] == current_group]) == 0:\n # assign first box to form new group\n box_group0[0][7] = current_group\n # try to add group\n else:\n current_box_group = [box for box in box_group if box[7] == current_group]\n mean_height = np.mean([box[5] for box in current_box_group])\n # yapf: disable\n min_gx = min([box[1] for box in current_box_group]) - x_ths * mean_height\n max_gx = max([box[2] for box in current_box_group]) + x_ths * mean_height\n min_gy = min([box[3] for box in current_box_group]) - y_ths * mean_height\n max_gy = max([box[4] for box in current_box_group]) + y_ths * mean_height\n add_box = False\n for box in box_group0:\n same_horizontal_level = (min_gx <= box[1] <= max_gx) or (min_gx <= box[2] <= max_gx)\n same_vertical_level = (min_gy <= box[3] <= max_gy) or (min_gy <= box[4] <= max_gy)\n if same_horizontal_level and same_vertical_level:\n box[7] = current_group\n add_box = True\n break\n # cannot add more box, go to next group\n if not add_box:\n current_group += 1\n # yapf: enable\n # arrage order in paragraph\n result = []\n for i in set(box[7] for box in box_group):\n current_box_group = [box for box in box_group if box[7] == i]\n mean_height = np.mean([box[5] for box in current_box_group])\n min_gx = min([box[1] for box in current_box_group])\n max_gx = max([box[2] for box in current_box_group])\n min_gy = min([box[3] for box in current_box_group])\n max_gy = max([box[4] for box in current_box_group])\n\n text = \"\"\n while len(current_box_group) > 0:\n highest = min([box[6] for box in current_box_group])\n candidates = [\n box for box in current_box_group if box[6] < highest + 0.4 * mean_height\n ]\n # get the far left\n if mode == \"ltr\":\n most_left = min([box[1] for box in candidates])\n for box in candidates:\n if box[1] == most_left:\n best_box = box\n elif mode == \"rtl\":\n most_right = max([box[2] for box in candidates])\n for box in candidates:\n if box[2] == most_right:\n best_box = box\n text += \" \" + best_box[0]\n current_box_group.remove(best_box)\n\n result.append(\n [\n [\n [min_gx, min_gy],\n [max_gx, min_gy],\n [max_gx, max_gy],\n [min_gx, max_gy],\n ],\n text[1:],\n ]\n )\n\n return result" }, { "identifier": "group_text_box", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/utils.py", "snippet": "def group_text_box(\n polys,\n slope_ths: float = 0.1,\n ycenter_ths: float = 0.5,\n height_ths: float = 0.5,\n width_ths: float = 1.0,\n add_margin: float = 0.05,\n):\n # poly top-left, top-right, low-right, low-left\n horizontal_list, free_list, combined_list, merged_list = [], [], [], []\n\n for poly in polys:\n slope_up = (poly[3] - poly[1]) / np.maximum(10, (poly[2] - poly[0]))\n slope_down = (poly[5] - poly[7]) / np.maximum(10, (poly[4] - poly[6]))\n if max(abs(slope_up), abs(slope_down)) < slope_ths:\n x_max = max([poly[0], poly[2], poly[4], poly[6]])\n x_min = min([poly[0], poly[2], poly[4], poly[6]])\n y_max = max([poly[1], poly[3], poly[5], poly[7]])\n y_min = min([poly[1], poly[3], poly[5], poly[7]])\n horizontal_list.append(\n [x_min, x_max, y_min, y_max, 0.5 * (y_min + y_max), y_max - y_min]\n )\n else:\n height = np.linalg.norm([poly[6] - poly[0], poly[7] - poly[1]])\n margin = int(1.44 * add_margin * height)\n\n theta13 = abs(\n np.arctan((poly[1] - poly[5]) / np.maximum(10, (poly[0] - poly[4])))\n )\n theta24 = abs(\n np.arctan((poly[3] - poly[7]) / np.maximum(10, (poly[2] - poly[6])))\n )\n # do I need to clip minimum, maximum value here?\n x1 = poly[0] - np.cos(theta13) * margin\n y1 = poly[1] - np.sin(theta13) * margin\n x2 = poly[2] + np.cos(theta24) * margin\n y2 = poly[3] - np.sin(theta24) * margin\n x3 = poly[4] + np.cos(theta13) * margin\n y3 = poly[5] + np.sin(theta13) * margin\n x4 = poly[6] - np.cos(theta24) * margin\n y4 = poly[7] + np.sin(theta24) * margin\n\n free_list.append([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])\n horizontal_list = sorted(horizontal_list, key=lambda item: item[4])\n\n # combine box\n new_box = []\n for poly in horizontal_list:\n if len(new_box) == 0:\n b_height = [poly[5]]\n b_ycenter = [poly[4]]\n new_box.append(poly)\n else:\n # comparable height and comparable y_center level up to ths*height\n if (abs(np.mean(b_height) - poly[5]) < height_ths * np.mean(b_height)) and (\n abs(np.mean(b_ycenter) - poly[4]) < ycenter_ths * np.mean(b_height)\n ):\n b_height.append(poly[5])\n b_ycenter.append(poly[4])\n new_box.append(poly)\n else:\n b_height = [poly[5]]\n b_ycenter = [poly[4]]\n combined_list.append(new_box)\n new_box = [poly]\n combined_list.append(new_box)\n\n # merge list use sort again\n for boxes in combined_list:\n if len(boxes) == 1: # one box per line\n box = boxes[0]\n margin = int(add_margin * box[5])\n merged_list.append(\n [box[0] - margin, box[1] + margin, box[2] - margin, box[3] + margin]\n )\n else: # multiple boxes per line\n boxes = sorted(boxes, key=lambda item: item[0])\n\n merged_box, new_box = [], []\n for box in boxes:\n if len(new_box) == 0:\n x_max = box[1]\n new_box.append(box)\n else:\n if abs(box[0] - x_max) < width_ths * (\n box[3] - box[2]\n ): # merge boxes\n x_max = box[1]\n new_box.append(box)\n else:\n x_max = box[1]\n merged_box.append(new_box)\n new_box = [box]\n if len(new_box) > 0:\n merged_box.append(new_box)\n\n for mbox in merged_box:\n if len(mbox) != 1: # adjacent box in same line\n # do I need to add margin here?\n x_min = min(mbox, key=lambda x: x[0])[0]\n x_max = max(mbox, key=lambda x: x[1])[1]\n y_min = min(mbox, key=lambda x: x[2])[2]\n y_max = max(mbox, key=lambda x: x[3])[3]\n\n margin = int(add_margin * (y_max - y_min))\n\n merged_list.append(\n [x_min - margin, x_max + margin, y_min - margin, y_max + margin]\n )\n else: # non adjacent box in same line\n box = mbox[0]\n\n margin = int(add_margin * (box[3] - box[2]))\n merged_list.append(\n [\n box[0] - margin,\n box[1] + margin,\n box[2] - margin,\n box[3] + margin,\n ]\n )\n # may need to check if box is really in image\n return merged_list, free_list" }, { "identifier": "reformat_input", "path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/utils.py", "snippet": "def reformat_input(image):\n \"\"\"\n :param image: image file path or bytes or array\n :return:\n img (array): (original_image_height, original_image_width, 3)\n img_cv_grey (array): (original_image_height, original_image_width, 3)\n \"\"\"\n if type(image) == str:\n if image.startswith(\"http://\") or image.startswith(\"https://\"):\n tmp, _ = urlretrieve(\n image,\n reporthook=printProgressBar(\n prefix=\"Progress:\",\n suffix=\"Complete\",\n length=50,\n ),\n )\n img_cv_grey = cv2.imread(tmp, cv2.IMREAD_GRAYSCALE)\n os.remove(tmp)\n else:\n img_cv_grey = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n image = os.path.expanduser(image)\n img = load_image(image) # can accept URL\n elif type(image) == bytes:\n nparr = np.frombuffer(image, np.uint8)\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_cv_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n elif type(image) == np.ndarray:\n if len(image.shape) == 2: # grayscale\n img_cv_grey = image\n img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\n elif len(image.shape) == 3 and image.shape[2] == 3: # BGRscale\n img = image\n img_cv_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n elif len(image.shape) == 3 and image.shape[2] == 4: # RGBAscale\n img = image[:, :, :3]\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n img_cv_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n return img, img_cv_grey" } ]
import ast import cv2 import numpy as np from logging import getLogger from typing import List from PIL import Image from .detection import get_detector, get_textbox from .recognition import get_recognizer, get_text from .utils import ( diff, get_image_list, get_paragraph, group_text_box, reformat_input, )
5,737
""" this code is adapted from https://github.com/black7375/korean_ocr_using_pororo Apache License 2.0 @yunwoong7 Apache License 2.0 @black7375 """ """ This code is primarily based on the following: https://github.com/JaidedAI/EasyOCR/blob/8af936ba1b2f3c230968dc1022d0cd3e9ca1efbb/easyocr/easyocr.py Basic usage: >>> from pororo import Pororo >>> ocr = Pororo(task="ocr", lang="ko") >>> ocr("IMAGE_FILE") """ LOGGER = getLogger(__name__) class Reader(object): def __init__( self, lang: str, det_model_ckpt_fp: str, rec_model_ckpt_fp: str, opt_fp: str, device: str, ) -> None: """ TODO @karter: modify this such that you download the pretrained checkpoint files Parameters: lang: language code. e.g, "en" or "ko" det_model_ckpt_fp: Detection model's checkpoint path e.g., 'craft_mlt_25k.pth' rec_model_ckpt_fp: Recognition model's checkpoint path opt_fp: option file path """ # Plug options in the dictionary opt2val = self.parse_options(opt_fp) # e.g., {"imgH": 64, ...} opt2val["vocab"] = self.build_vocab(opt2val["character"]) opt2val["vocab_size"] = len(opt2val["vocab"]) opt2val["device"] = device opt2val["lang"] = lang opt2val["det_model_ckpt_fp"] = det_model_ckpt_fp opt2val["rec_model_ckpt_fp"] = rec_model_ckpt_fp # Get model objects self.detector = get_detector(det_model_ckpt_fp, opt2val["device"]) self.recognizer, self.converter = get_recognizer(opt2val) self.opt2val = opt2val @staticmethod def parse_options(opt_fp: str) -> dict: opt2val = dict() for line in open(opt_fp, "r", encoding="utf8"): line = line.strip() if ": " in line: opt, val = line.split(": ", 1) try: opt2val[opt] = ast.literal_eval(val) except: opt2val[opt] = val return opt2val @staticmethod def build_vocab(character: str) -> List[str]: """Returns vocabulary (=list of characters)""" vocab = ["[blank]"] + list( character ) # dummy '[blank]' token for CTCLoss (index 0) return vocab def detect(self, img: np.ndarray, opt2val: dict): """ :return: horizontal_list (list): e.g., [[613, 1496, 51, 190], [136, 1544, 134, 508]] free_list (list): e.g., [] """ text_box = get_textbox(self.detector, img, opt2val) horizontal_list, free_list = group_text_box( text_box, opt2val["slope_ths"], opt2val["ycenter_ths"], opt2val["height_ths"], opt2val["width_ths"], opt2val["add_margin"], ) min_size = opt2val["min_size"] if min_size: horizontal_list = [ i for i in horizontal_list if max(i[1] - i[0], i[3] - i[2]) > min_size ] free_list = [ i for i in free_list
""" this code is adapted from https://github.com/black7375/korean_ocr_using_pororo Apache License 2.0 @yunwoong7 Apache License 2.0 @black7375 """ """ This code is primarily based on the following: https://github.com/JaidedAI/EasyOCR/blob/8af936ba1b2f3c230968dc1022d0cd3e9ca1efbb/easyocr/easyocr.py Basic usage: >>> from pororo import Pororo >>> ocr = Pororo(task="ocr", lang="ko") >>> ocr("IMAGE_FILE") """ LOGGER = getLogger(__name__) class Reader(object): def __init__( self, lang: str, det_model_ckpt_fp: str, rec_model_ckpt_fp: str, opt_fp: str, device: str, ) -> None: """ TODO @karter: modify this such that you download the pretrained checkpoint files Parameters: lang: language code. e.g, "en" or "ko" det_model_ckpt_fp: Detection model's checkpoint path e.g., 'craft_mlt_25k.pth' rec_model_ckpt_fp: Recognition model's checkpoint path opt_fp: option file path """ # Plug options in the dictionary opt2val = self.parse_options(opt_fp) # e.g., {"imgH": 64, ...} opt2val["vocab"] = self.build_vocab(opt2val["character"]) opt2val["vocab_size"] = len(opt2val["vocab"]) opt2val["device"] = device opt2val["lang"] = lang opt2val["det_model_ckpt_fp"] = det_model_ckpt_fp opt2val["rec_model_ckpt_fp"] = rec_model_ckpt_fp # Get model objects self.detector = get_detector(det_model_ckpt_fp, opt2val["device"]) self.recognizer, self.converter = get_recognizer(opt2val) self.opt2val = opt2val @staticmethod def parse_options(opt_fp: str) -> dict: opt2val = dict() for line in open(opt_fp, "r", encoding="utf8"): line = line.strip() if ": " in line: opt, val = line.split(": ", 1) try: opt2val[opt] = ast.literal_eval(val) except: opt2val[opt] = val return opt2val @staticmethod def build_vocab(character: str) -> List[str]: """Returns vocabulary (=list of characters)""" vocab = ["[blank]"] + list( character ) # dummy '[blank]' token for CTCLoss (index 0) return vocab def detect(self, img: np.ndarray, opt2val: dict): """ :return: horizontal_list (list): e.g., [[613, 1496, 51, 190], [136, 1544, 134, 508]] free_list (list): e.g., [] """ text_box = get_textbox(self.detector, img, opt2val) horizontal_list, free_list = group_text_box( text_box, opt2val["slope_ths"], opt2val["ycenter_ths"], opt2val["height_ths"], opt2val["width_ths"], opt2val["add_margin"], ) min_size = opt2val["min_size"] if min_size: horizontal_list = [ i for i in horizontal_list if max(i[1] - i[0], i[3] - i[2]) > min_size ] free_list = [ i for i in free_list
if max(diff([c[0] for c in i]), diff([c[1] for c in i])) > min_size
4
2023-10-26 11:26:25+00:00
8k
KoeAI/LLVC
minimal_rvc/pipeline.py
[ { "identifier": "SynthesizerTrnMs256NSFSid", "path": "minimal_rvc/models.py", "snippet": "class SynthesizerTrnMs256NSFSid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n emb_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n self.emb_channels = emb_channels\n self.sr = sr\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder(\n inter_channels,\n hidden_channels,\n filter_channels,\n emb_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\n \"gin_channels:\",\n gin_channels,\n \"self.spk_embed_dim:\",\n self.spk_embed_dim,\n \"emb_channels:\",\n emb_channels,\n )\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "RMVPE", "path": "minimal_rvc/rmvpe.py", "snippet": "class RMVPE:\n def __init__(self, model_path, is_half, device=None):\n self.resample_kernel = {}\n self.resample_kernel = {}\n self.is_half = is_half\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.device = device\n self.mel_extractor = MelSpectrogram(\n is_half, 128, 16000, 1024, 160, None, 30, 8000\n ).to(device)\n if \"privateuseone\" in str(device):\n import onnxruntime as ort\n\n ort_session = ort.InferenceSession(\n \"rmvpe.onnx\", providers=[\"DmlExecutionProvider\"]\n )\n self.model = ort_session\n else:\n model = E2E(4, 1, (2, 2))\n ckpt = torch.load(model_path, map_location=\"cpu\")\n model.load_state_dict(ckpt)\n model.eval()\n if is_half == True:\n model = model.half()\n self.model = model\n self.model = self.model.to(device)\n cents_mapping = 20 * np.arange(360) + 1997.3794084376191\n self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368\n\n def mel2hidden(self, mel):\n with torch.no_grad():\n n_frames = mel.shape[-1]\n mel = F.pad(\n mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode=\"reflect\"\n )\n if \"privateuseone\" in str(self.device):\n onnx_input_name = self.model.get_inputs()[0].name\n onnx_outputs_names = self.model.get_outputs()[0].name\n hidden = self.model.run(\n [onnx_outputs_names],\n input_feed={onnx_input_name: mel.cpu().numpy()},\n )[0]\n else:\n hidden = self.model(mel)\n return hidden[:, :n_frames]\n\n def decode(self, hidden, thred=0.03):\n cents_pred = self.to_local_average_cents(hidden, thred=thred)\n f0 = 10 * (2 ** (cents_pred / 1200))\n f0[f0 == 10] = 0\n # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])\n return f0\n\n def infer_from_audio(self, audio, thred=0.03):\n # torch.cuda.synchronize()\n t0 = ttime()\n mel = self.mel_extractor(\n torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True\n )\n # print(123123123,mel.device.type)\n # torch.cuda.synchronize()\n t1 = ttime()\n hidden = self.mel2hidden(mel)\n # torch.cuda.synchronize()\n t2 = ttime()\n # print(234234,hidden.device.type)\n if \"privateuseone\" not in str(self.device):\n hidden = hidden.squeeze(0).cpu().numpy()\n else:\n hidden = hidden[0]\n if self.is_half == True:\n hidden = hidden.astype(\"float32\")\n\n f0 = self.decode(hidden, thred=thred)\n # torch.cuda.synchronize()\n t3 = ttime()\n # print(\"hmvpe:%s\\t%s\\t%s\\t%s\"%(t1-t0,t2-t1,t3-t2,t3-t0))\n return f0\n\n def to_local_average_cents(self, salience, thred=0.05):\n # t0 = ttime()\n center = np.argmax(salience, axis=1) # 帧长#index\n salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368\n # t1 = ttime()\n center += 4\n todo_salience = []\n todo_cents_mapping = []\n starts = center - 4\n ends = center + 5\n for idx in range(salience.shape[0]):\n todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])\n todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])\n # t2 = ttime()\n todo_salience = np.array(todo_salience) # 帧长,9\n todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9\n product_sum = np.sum(todo_salience * todo_cents_mapping, 1)\n weight_sum = np.sum(todo_salience, 1) # 帧长\n devided = product_sum / weight_sum # 帧长\n # t3 = ttime()\n maxx = np.max(salience, axis=1) # 帧长\n devided[maxx <= thred] = 0\n # t4 = ttime()\n # print(\"decode:%s\\t%s\\t%s\\t%s\" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))\n return devided" } ]
import os import traceback import faiss import numpy as np import pyworld import scipy.signal as signal import torch import torch.nn.functional as F import torchcrepe from typing import * from fairseq.models.hubert import HubertModel from torch import Tensor from .models import SynthesizerTrnMs256NSFSid from .rmvpe import RMVPE
4,751
source[source < 0.001] = np.nan target = np.interp( np.arange(0, len(source) * p_len, len(source)) / p_len, np.arange(0, len(source)), source ) f0 = np.nan_to_num(target) return f0 # Resized f0 def get_f0_official_crepe_computation( self, x, f0_min, f0_max, model="full", ): # Pick a batch size that doesn't cause memory errors on your gpu batch_size = 512 # Compute pitch using first gpu audio = torch.tensor(np.copy(x))[None].float() f0, pd = torchcrepe.predict( audio, self.sr, self.window, f0_min, f0_max, model, batch_size=batch_size, device=self.device, return_periodicity=True, ) pd = torchcrepe.filter.median(pd, 3) f0 = torchcrepe.filter.mean(f0, 3) f0[pd < 0.1] = 0 f0 = f0[0].cpu().numpy() return f0 def get_f0( self, x: np.ndarray, p_len: int, f0_up_key: int, f0_method: str, f0_relative: bool, inp_f0: np.ndarray = None, ): f0_min = 50 f0_max = 1100 f0_mel_min = 1127 * np.log(1 + f0_min / 700) f0_mel_max = 1127 * np.log(1 + f0_max / 700) if f0_method == "harvest": f0, t = pyworld.harvest( x.astype(np.double), fs=self.sr, f0_ceil=f0_max, f0_floor=f0_min, frame_period=10, ) f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) f0 = signal.medfilt(f0, 3) elif f0_method == "dio": f0, t = pyworld.dio( x.astype(np.double), fs=self.sr, f0_ceil=f0_max, f0_floor=f0_min, frame_period=10, ) f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) f0 = signal.medfilt(f0, 3) elif f0_method == "mangio-crepe": f0 = self.get_f0_crepe_computation( x, f0_min, f0_max, p_len, 160, "full") elif f0_method == "crepe": f0 = self.get_f0_official_crepe_computation( x, f0_min, f0_max, "full") elif f0_method == "rmvpe": f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) if f0_relative: if f0_method == "rmvpe" or f0_method == "rmvpe_onnx": # this is the average f0 of /test_wavs/2086-149214-0000.wav # by calculating f0 relative to this wav, we can ensure # consistent output pitch when converting from different speakers rel_f0 = 126.21 else: raise ValueError("TODO: find rel_f0 for " + f0_method) mean_f0 = np.mean(f0[f0 > 0]) offset = np.round(12 * np.log2(mean_f0 / rel_f0)) # print("offset: " + str(offset)) f0_up_key = f0_up_key - offset f0 *= pow(2, f0_up_key / 12) tf0 = self.sr // self.window # f0 points per second if inp_f0 is not None: delta_t = np.round( (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 ).astype("int16") replace_f0 = np.interp( list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] ) shape = f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)].shape[0] f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)] = replace_f0[ :shape ] f0bak = f0.copy() f0_mel = 1127 * np.log(1 + f0 / 700) f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( f0_mel_max - f0_mel_min ) + 1 f0_mel[f0_mel <= 1] = 1 f0_mel[f0_mel > 255] = 255 f0_coarse = np.rint(f0_mel).astype(int) return f0_coarse, f0bak # 1-0 def _convert( self, model: HubertModel, embedding_output_layer: int,
# This module is based on code from ddPn08, liujing04, and teftef6220 # https://github.com/ddPn08/rvc-webui # https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI # https://github.com/teftef6220/Voice_Separation_and_Selection # These modules are licensed under the MIT License. # from faiss.swigfaiss_avx2 import IndexIVFFlat # cause crash on windows' faiss-cpu installed from pip class VocalConvertPipeline(object): def __init__(self, tgt_sr: int, device: Union[str, torch.device], is_half: bool, no_pad: bool = False): if isinstance(device, str): device = torch.device(device) if device.type == "cuda": vram = torch.cuda.get_device_properties( device).total_memory / 1024**3 else: vram = None if vram is not None and vram <= 4: self.x_pad = 1 self.x_query = 5 self.x_center = 30 self.x_max = 32 elif vram is not None and vram <= 5: self.x_pad = 1 self.x_query = 6 self.x_center = 38 self.x_max = 41 else: self.x_pad = 3 self.x_query = 10 self.x_center = 60 self.x_max = 65 if no_pad: self.x_pad = 0 self.sr = 16000 # hubert input sample rate self.window = 160 # hubert input window self.t_pad = self.sr * self.x_pad # padding time for each utterance self.t_pad_tgt = tgt_sr * self.x_pad self.t_pad2 = self.t_pad * 2 self.t_query = self.sr * self.x_query # query time before and after query point self.t_center = self.sr * self.x_center # query cut point position self.t_max = self.sr * self.x_max # max time for no query self.device = device self.is_half = is_half self.model_rmvpe = RMVPE( f"llvc_models/models/f0/rmvpe.pt", is_half=self.is_half, device=self.device, ) def get_optimal_torch_device(self, index: int = 0) -> torch.device: # Get cuda device if torch.cuda.is_available(): # Very fast return torch.device(f"cuda:{index % torch.cuda.device_count()}") elif torch.backends.mps.is_available(): return torch.device("mps") # Insert an else here to grab "xla" devices if available. TO DO later. Requires the torch_xla.core.xla_model library # Else wise return the "cpu" as a torch device, return torch.device("cpu") def get_f0_crepe_computation( self, x, f0_min, f0_max, p_len, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. hop_length=64, model="full", # Either use crepe-tiny "tiny" or crepe "full". Default is full ): # fixes the F.conv2D exception. We needed to convert double to float. x = x.astype(np.float32) x /= np.quantile(np.abs(x), 0.999) torch_device = self.get_optimal_torch_device() audio = torch.from_numpy(x).to(torch_device, copy=True) audio = torch.unsqueeze(audio, dim=0) if audio.ndim == 2 and audio.shape[0] > 1: audio = torch.mean(audio, dim=0, keepdim=True).detach() audio = audio.detach() print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) pitch: Tensor = torchcrepe.predict( audio, self.sr, hop_length, f0_min, f0_max, model, batch_size=hop_length * 2, device=torch_device, pad=True ) p_len = p_len or x.shape[0] // hop_length # Resize the pitch for final f0 source = np.array(pitch.squeeze(0).cpu().float().numpy()) source[source < 0.001] = np.nan target = np.interp( np.arange(0, len(source) * p_len, len(source)) / p_len, np.arange(0, len(source)), source ) f0 = np.nan_to_num(target) return f0 # Resized f0 def get_f0_official_crepe_computation( self, x, f0_min, f0_max, model="full", ): # Pick a batch size that doesn't cause memory errors on your gpu batch_size = 512 # Compute pitch using first gpu audio = torch.tensor(np.copy(x))[None].float() f0, pd = torchcrepe.predict( audio, self.sr, self.window, f0_min, f0_max, model, batch_size=batch_size, device=self.device, return_periodicity=True, ) pd = torchcrepe.filter.median(pd, 3) f0 = torchcrepe.filter.mean(f0, 3) f0[pd < 0.1] = 0 f0 = f0[0].cpu().numpy() return f0 def get_f0( self, x: np.ndarray, p_len: int, f0_up_key: int, f0_method: str, f0_relative: bool, inp_f0: np.ndarray = None, ): f0_min = 50 f0_max = 1100 f0_mel_min = 1127 * np.log(1 + f0_min / 700) f0_mel_max = 1127 * np.log(1 + f0_max / 700) if f0_method == "harvest": f0, t = pyworld.harvest( x.astype(np.double), fs=self.sr, f0_ceil=f0_max, f0_floor=f0_min, frame_period=10, ) f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) f0 = signal.medfilt(f0, 3) elif f0_method == "dio": f0, t = pyworld.dio( x.astype(np.double), fs=self.sr, f0_ceil=f0_max, f0_floor=f0_min, frame_period=10, ) f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) f0 = signal.medfilt(f0, 3) elif f0_method == "mangio-crepe": f0 = self.get_f0_crepe_computation( x, f0_min, f0_max, p_len, 160, "full") elif f0_method == "crepe": f0 = self.get_f0_official_crepe_computation( x, f0_min, f0_max, "full") elif f0_method == "rmvpe": f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) if f0_relative: if f0_method == "rmvpe" or f0_method == "rmvpe_onnx": # this is the average f0 of /test_wavs/2086-149214-0000.wav # by calculating f0 relative to this wav, we can ensure # consistent output pitch when converting from different speakers rel_f0 = 126.21 else: raise ValueError("TODO: find rel_f0 for " + f0_method) mean_f0 = np.mean(f0[f0 > 0]) offset = np.round(12 * np.log2(mean_f0 / rel_f0)) # print("offset: " + str(offset)) f0_up_key = f0_up_key - offset f0 *= pow(2, f0_up_key / 12) tf0 = self.sr // self.window # f0 points per second if inp_f0 is not None: delta_t = np.round( (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 ).astype("int16") replace_f0 = np.interp( list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] ) shape = f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)].shape[0] f0[self.x_pad * tf0: self.x_pad * tf0 + len(replace_f0)] = replace_f0[ :shape ] f0bak = f0.copy() f0_mel = 1127 * np.log(1 + f0 / 700) f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( f0_mel_max - f0_mel_min ) + 1 f0_mel[f0_mel <= 1] = 1 f0_mel[f0_mel > 255] = 255 f0_coarse = np.rint(f0_mel).astype(int) return f0_coarse, f0bak # 1-0 def _convert( self, model: HubertModel, embedding_output_layer: int,
net_g: SynthesizerTrnMs256NSFSid,
0
2023-10-28 01:58:49+00:00
8k
aurelio-labs/semantic-router
tests/unit/test_hybrid_layer.py
[ { "identifier": "BaseEncoder", "path": "semantic_router/encoders/base.py", "snippet": "class BaseEncoder(BaseModel):\n name: str\n score_threshold: float\n type: str = Field(default=\"base\")\n\n class Config:\n arbitrary_types_allowed = True\n\n def __call__(self, docs: List[str]) -> List[List[float]]:\n raise NotImplementedError(\"Subclasses must implement this method\")" }, { "identifier": "BM25Encoder", "path": "semantic_router/encoders/bm25.py", "snippet": "class BM25Encoder(BaseEncoder):\n model: Optional[Any] = None\n idx_mapping: Optional[Dict[int, int]] = None\n type: str = \"sparse\"\n\n def __init__(\n self,\n name: str = \"bm25\",\n score_threshold: float = 0.82,\n use_default_params: bool = True,\n ):\n super().__init__(name=name, score_threshold=score_threshold)\n try:\n from pinecone_text.sparse import BM25Encoder as encoder\n except ImportError:\n raise ImportError(\n \"Please install pinecone-text to use BM25Encoder. \"\n \"You can install it with: `pip install 'semantic-router[hybrid]'`\"\n )\n\n self.model = encoder()\n\n if use_default_params:\n logger.info(\"Downloading and initializing default sBM25 model parameters.\")\n self.model = encoder.default()\n self._set_idx_mapping()\n\n def _set_idx_mapping(self):\n params = self.model.get_params()\n doc_freq = params[\"doc_freq\"]\n if isinstance(doc_freq, dict):\n indices = doc_freq[\"indices\"]\n self.idx_mapping = {int(idx): i for i, idx in enumerate(indices)}\n else:\n raise TypeError(\"Expected a dictionary for 'doc_freq'\")\n\n def __call__(self, docs: List[str]) -> List[List[float]]:\n if self.model is None or self.idx_mapping is None:\n raise ValueError(\"Model or index mapping is not initialized.\")\n if len(docs) == 1:\n sparse_dicts = self.model.encode_queries(docs)\n elif len(docs) > 1:\n sparse_dicts = self.model.encode_documents(docs)\n else:\n raise ValueError(\"No documents to encode.\")\n\n embeds = [[0.0] * len(self.idx_mapping)] * len(docs)\n for i, output in enumerate(sparse_dicts):\n indices = output[\"indices\"]\n values = output[\"values\"]\n for idx, val in zip(indices, values):\n if idx in self.idx_mapping:\n position = self.idx_mapping[idx]\n embeds[i][position] = val\n return embeds\n\n def fit(self, docs: List[str]):\n if self.model is None:\n raise ValueError(\"Model is not initialized.\")\n self.model.fit(docs)\n self._set_idx_mapping()" }, { "identifier": "CohereEncoder", "path": "semantic_router/encoders/cohere.py", "snippet": "class CohereEncoder(BaseEncoder):\n client: Optional[cohere.Client] = None\n type: str = \"cohere\"\n\n def __init__(\n self,\n name: Optional[str] = None,\n cohere_api_key: Optional[str] = None,\n score_threshold: float = 0.3,\n ):\n if name is None:\n name = os.getenv(\"COHERE_MODEL_NAME\", \"embed-english-v3.0\")\n super().__init__(name=name, score_threshold=score_threshold)\n cohere_api_key = cohere_api_key or os.getenv(\"COHERE_API_KEY\")\n if cohere_api_key is None:\n raise ValueError(\"Cohere API key cannot be 'None'.\")\n try:\n self.client = cohere.Client(cohere_api_key)\n except Exception as e:\n raise ValueError(\n f\"Cohere API client failed to initialize. Error: {e}\"\n ) from e\n\n def __call__(self, docs: List[str]) -> List[List[float]]:\n if self.client is None:\n raise ValueError(\"Cohere client is not initialized.\")\n try:\n embeds = self.client.embed(docs, input_type=\"search_query\", model=self.name)\n return embeds.embeddings\n except Exception as e:\n raise ValueError(f\"Cohere API call failed. Error: {e}\") from e" }, { "identifier": "OpenAIEncoder", "path": "semantic_router/encoders/openai.py", "snippet": "class OpenAIEncoder(BaseEncoder):\n client: Optional[openai.Client]\n type: str = \"openai\"\n\n def __init__(\n self,\n name: Optional[str] = None,\n openai_api_key: Optional[str] = None,\n score_threshold: float = 0.82,\n ):\n if name is None:\n name = os.getenv(\"OPENAI_MODEL_NAME\", \"text-embedding-ada-002\")\n super().__init__(name=name, score_threshold=score_threshold)\n api_key = openai_api_key or os.getenv(\"OPENAI_API_KEY\")\n if api_key is None:\n raise ValueError(\"OpenAI API key cannot be 'None'.\")\n try:\n self.client = openai.Client(api_key=api_key)\n except Exception as e:\n raise ValueError(\n f\"OpenAI API client failed to initialize. Error: {e}\"\n ) from e\n\n def __call__(self, docs: List[str]) -> List[List[float]]:\n if self.client is None:\n raise ValueError(\"OpenAI client is not initialized.\")\n embeds = None\n error_message = \"\"\n\n # Exponential backoff\n for j in range(3):\n try:\n embeds = self.client.embeddings.create(input=docs, model=self.name)\n if embeds.data:\n break\n except OpenAIError as e:\n sleep(2**j)\n error_message = str(e)\n logger.warning(f\"Retrying in {2**j} seconds...\")\n except Exception as e:\n logger.error(f\"OpenAI API call failed. Error: {error_message}\")\n raise ValueError(f\"OpenAI API call failed. Error: {e}\") from e\n\n if (\n not embeds\n or not isinstance(embeds, CreateEmbeddingResponse)\n or not embeds.data\n ):\n raise ValueError(f\"No embeddings returned. Error: {error_message}\")\n\n embeddings = [embeds_obj.embedding for embeds_obj in embeds.data]\n return embeddings" }, { "identifier": "TfidfEncoder", "path": "semantic_router/encoders/tfidf.py", "snippet": "class TfidfEncoder(BaseEncoder):\n idf: ndarray = np.array([])\n word_index: Dict = {}\n\n def __init__(self, name: str = \"tfidf\", score_threshold: float = 0.82):\n # TODO default score_threshold not thoroughly tested, should optimize\n super().__init__(name=name, score_threshold=score_threshold)\n self.word_index = {}\n self.idf = np.array([])\n\n def __call__(self, docs: list[str]) -> list[list[float]]:\n if len(self.word_index) == 0 or self.idf.size == 0:\n raise ValueError(\"Vectorizer is not initialized.\")\n if len(docs) == 0:\n raise ValueError(\"No documents to encode.\")\n\n docs = [self._preprocess(doc) for doc in docs]\n tf = self._compute_tf(docs)\n tfidf = tf * self.idf\n return tfidf.tolist()\n\n def fit(self, routes: list[Route]):\n docs = []\n for route in routes:\n for doc in route.utterances:\n docs.append(self._preprocess(doc))\n self.word_index = self._build_word_index(docs)\n self.idf = self._compute_idf(docs)\n\n def _build_word_index(self, docs: list[str]) -> dict:\n words = set()\n for doc in docs:\n for word in doc.split():\n words.add(word)\n word_index = {word: i for i, word in enumerate(words)}\n return word_index\n\n def _compute_tf(self, docs: list[str]) -> np.ndarray:\n if len(self.word_index) == 0:\n raise ValueError(\"Word index is not initialized.\")\n tf = np.zeros((len(docs), len(self.word_index)))\n for i, doc in enumerate(docs):\n word_counts = Counter(doc.split())\n for word, count in word_counts.items():\n if word in self.word_index:\n tf[i, self.word_index[word]] = count\n # L2 normalization\n tf = tf / norm(tf, axis=1, keepdims=True)\n return tf\n\n def _compute_idf(self, docs: list[str]) -> np.ndarray:\n if len(self.word_index) == 0:\n raise ValueError(\"Word index is not initialized.\")\n idf = np.zeros(len(self.word_index))\n for doc in docs:\n words = set(doc.split())\n for word in words:\n if word in self.word_index:\n idf[self.word_index[word]] += 1\n idf = np.log(len(docs) / (idf + 1))\n return idf\n\n def _preprocess(self, doc: str) -> str:\n lowercased_doc = doc.lower()\n no_punctuation_doc = lowercased_doc.translate(\n str.maketrans(\"\", \"\", string.punctuation)\n )\n return no_punctuation_doc" }, { "identifier": "AzureOpenAIEncoder", "path": "semantic_router/encoders/zure.py", "snippet": "class AzureOpenAIEncoder(BaseEncoder):\n client: Optional[openai.AzureOpenAI] = None\n type: str = \"azure\"\n api_key: Optional[str] = None\n deployment_name: Optional[str] = None\n azure_endpoint: Optional[str] = None\n api_version: Optional[str] = None\n model: Optional[str] = None\n\n def __init__(\n self,\n api_key: Optional[str] = None,\n deployment_name: Optional[str] = None,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = None,\n model: Optional[str] = None,\n score_threshold: float = 0.82,\n ):\n name = deployment_name\n if name is None:\n name = os.getenv(\"AZURE_OPENAI_DEPLOYMENT_NAME\", \"text-embedding-ada-002\")\n super().__init__(name=name, score_threshold=score_threshold)\n self.api_key = api_key\n self.deployment_name = deployment_name\n self.azure_endpoint = azure_endpoint\n self.api_version = api_version\n self.model = model\n if self.api_key is None:\n self.api_key = os.getenv(\"AZURE_OPENAI_API_KEY\")\n if self.api_key is None:\n raise ValueError(\"No Azure OpenAI API key provided.\")\n if self.deployment_name is None:\n self.deployment_name = os.getenv(\n \"AZURE_OPENAI_DEPLOYMENT_NAME\", \"text-embedding-ada-002\"\n )\n # deployment_name may still be None, but it is optional in the API\n if self.azure_endpoint is None:\n self.azure_endpoint = os.getenv(\"AZURE_OPENAI_ENDPOINT\")\n if self.azure_endpoint is None:\n raise ValueError(\"No Azure OpenAI endpoint provided.\")\n if self.api_version is None:\n self.api_version = os.getenv(\"AZURE_OPENAI_API_VERSION\")\n if self.api_version is None:\n raise ValueError(\"No Azure OpenAI API version provided.\")\n if self.model is None:\n self.model = os.getenv(\"AZURE_OPENAI_MODEL\")\n if self.model is None:\n raise ValueError(\"No Azure OpenAI model provided.\")\n assert (\n self.api_key is not None\n and self.azure_endpoint is not None\n and self.api_version is not None\n and self.model is not None\n )\n\n try:\n self.client = openai.AzureOpenAI(\n azure_deployment=str(deployment_name) if deployment_name else None,\n api_key=str(api_key),\n azure_endpoint=str(azure_endpoint),\n api_version=str(api_version),\n # _strict_response_validation=True,\n )\n except Exception as e:\n raise ValueError(\n f\"OpenAI API client failed to initialize. Error: {e}\"\n ) from e\n\n def __call__(self, docs: List[str]) -> List[List[float]]:\n if self.client is None:\n raise ValueError(\"OpenAI client is not initialized.\")\n embeds = None\n error_message = \"\"\n\n # Exponential backoff\n for j in range(3):\n try:\n embeds = self.client.embeddings.create(\n input=docs, model=str(self.model)\n )\n if embeds.data:\n break\n except OpenAIError as e:\n # print full traceback\n import traceback\n\n traceback.print_exc()\n sleep(2**j)\n error_message = str(e)\n logger.warning(f\"Retrying in {2**j} seconds...\")\n except Exception as e:\n logger.error(f\"Azure OpenAI API call failed. Error: {error_message}\")\n raise ValueError(f\"Azure OpenAI API call failed. Error: {e}\") from e\n\n if (\n not embeds\n or not isinstance(embeds, CreateEmbeddingResponse)\n or not embeds.data\n ):\n raise ValueError(f\"No embeddings returned. Error: {error_message}\")\n\n embeddings = [embeds_obj.embedding for embeds_obj in embeds.data]\n return embeddings" }, { "identifier": "HybridRouteLayer", "path": "semantic_router/hybrid_layer.py", "snippet": "class HybridRouteLayer:\n index = None\n sparse_index = None\n categories = None\n score_threshold: float\n\n def __init__(\n self,\n encoder: BaseEncoder,\n sparse_encoder: Optional[BM25Encoder] = None,\n routes: List[Route] = [],\n alpha: float = 0.3,\n ):\n self.encoder = encoder\n self.score_threshold = self.encoder.score_threshold\n\n if sparse_encoder is None:\n logger.warning(\"No sparse_encoder provided. Using default BM25Encoder.\")\n self.sparse_encoder = BM25Encoder()\n else:\n self.sparse_encoder = sparse_encoder\n\n self.alpha = alpha\n self.routes = routes\n if isinstance(self.sparse_encoder, TfidfEncoder) and hasattr(\n self.sparse_encoder, \"fit\"\n ):\n self.sparse_encoder.fit(routes)\n # if routes list has been passed, we initialize index now\n if routes:\n # initialize index now\n # for route in tqdm(routes):\n # self._add_route(route=route)\n self._add_routes(routes)\n\n def __call__(self, text: str) -> Optional[str]:\n results = self._query(text)\n top_class, top_class_scores = self._semantic_classify(results)\n passed = self._pass_threshold(top_class_scores, self.score_threshold)\n if passed:\n return top_class\n else:\n return None\n\n def add(self, route: Route):\n self._add_route(route=route)\n\n def _add_route(self, route: Route):\n self.routes += [route]\n\n self.update_dense_embeddings_index(route.utterances)\n\n if isinstance(self.sparse_encoder, TfidfEncoder) and hasattr(\n self.sparse_encoder, \"fit\"\n ):\n self.sparse_encoder.fit(self.routes)\n # re-build index\n self.sparse_index = None\n all_utterances = [\n utterance for route in self.routes for utterance in route.utterances\n ]\n self.update_sparse_embeddings_index(all_utterances)\n else:\n self.update_sparse_embeddings_index(route.utterances)\n\n # create route array\n if self.categories is None:\n self.categories = np.array([route.name] * len(route.utterances))\n else:\n str_arr = np.array([route.name] * len(route.utterances))\n self.categories = np.concatenate([self.categories, str_arr])\n self.routes.append(route)\n\n def _add_routes(self, routes: List[Route]):\n # create embeddings for all routes\n logger.info(\"Creating embeddings for all routes...\")\n all_utterances = [\n utterance for route in routes for utterance in route.utterances\n ]\n self.update_dense_embeddings_index(all_utterances)\n self.update_sparse_embeddings_index(all_utterances)\n\n # create route array\n route_names = [route.name for route in routes for _ in route.utterances]\n route_array = np.array(route_names)\n self.categories = (\n np.concatenate([self.categories, route_array])\n if self.categories is not None\n else route_array\n )\n\n def update_dense_embeddings_index(self, utterances: list):\n dense_embeds = np.array(self.encoder(utterances))\n # create utterance array (the dense index)\n self.index = (\n np.concatenate([self.index, dense_embeds])\n if self.index is not None\n else dense_embeds\n )\n\n def update_sparse_embeddings_index(self, utterances: list):\n sparse_embeds = np.array(self.sparse_encoder(utterances))\n # create sparse utterance array\n self.sparse_index = (\n np.concatenate([self.sparse_index, sparse_embeds])\n if self.sparse_index is not None\n else sparse_embeds\n )\n\n def _query(self, text: str, top_k: int = 5):\n \"\"\"Given some text, encodes and searches the index vector space to\n retrieve the top_k most similar records.\n \"\"\"\n # create dense query vector\n xq_d = np.array(self.encoder([text]))\n xq_d = np.squeeze(xq_d) # Reduce to 1d array.\n # create sparse query vector\n xq_s = np.array(self.sparse_encoder([text]))\n xq_s = np.squeeze(xq_s)\n # convex scaling\n xq_d, xq_s = self._convex_scaling(xq_d, xq_s)\n\n if self.index is not None and self.sparse_index is not None:\n # calculate dense vec similarity\n index_norm = norm(self.index, axis=1)\n xq_d_norm = norm(xq_d.T)\n sim_d = np.dot(self.index, xq_d.T) / (index_norm * xq_d_norm)\n # calculate sparse vec similarity\n sparse_norm = norm(self.sparse_index, axis=1)\n xq_s_norm = norm(xq_s.T)\n sim_s = np.dot(self.sparse_index, xq_s.T) / (sparse_norm * xq_s_norm)\n total_sim = sim_d + sim_s\n # get indices of top_k records\n top_k = min(top_k, total_sim.shape[0])\n idx = np.argpartition(total_sim, -top_k)[-top_k:]\n scores = total_sim[idx]\n # get the utterance categories (route names)\n routes = self.categories[idx] if self.categories is not None else []\n return [{\"route\": d, \"score\": s.item()} for d, s in zip(routes, scores)]\n else:\n logger.warning(\"No index found. Please add routes to the layer.\")\n return []\n\n def _convex_scaling(self, dense: np.ndarray, sparse: np.ndarray):\n # scale sparse and dense vecs\n dense = np.array(dense) * self.alpha\n sparse = np.array(sparse) * (1 - self.alpha)\n return dense, sparse\n\n def _semantic_classify(self, query_results: List[Dict]) -> Tuple[str, List[float]]:\n scores_by_class: Dict[str, List[float]] = {}\n for result in query_results:\n score = result[\"score\"]\n route = result[\"route\"]\n if route in scores_by_class:\n scores_by_class[route].append(score)\n else:\n scores_by_class[route] = [score]\n\n # Calculate total score for each class\n total_scores = {route: sum(scores) for route, scores in scores_by_class.items()}\n top_class = max(total_scores, key=lambda x: total_scores[x], default=None)\n\n # Return the top class and its associated scores\n if top_class is not None:\n return str(top_class), scores_by_class.get(top_class, [])\n else:\n logger.warning(\"No classification found for semantic classifier.\")\n return \"\", []\n\n def _pass_threshold(self, scores: List[float], threshold: float) -> bool:\n if scores:\n return max(scores) > threshold\n else:\n return False" }, { "identifier": "Route", "path": "semantic_router/route.py", "snippet": "class Route(BaseModel):\n name: str\n utterances: List[str]\n description: Optional[str] = None\n function_schema: Optional[Dict[str, Any]] = None\n llm: Optional[BaseLLM] = None\n\n def __call__(self, query: str) -> RouteChoice:\n if self.function_schema:\n if not self.llm:\n raise ValueError(\n \"LLM is required for dynamic routes. Please ensure the `llm` \"\n \"attribute is set.\"\n )\n # if a function schema is provided we generate the inputs\n extracted_inputs = self.llm.extract_function_inputs(\n query=query, function_schema=self.function_schema\n )\n func_call = extracted_inputs\n else:\n # otherwise we just pass None for the call\n func_call = None\n return RouteChoice(name=self.name, function_call=func_call)\n\n def to_dict(self) -> Dict[str, Any]:\n return self.dict()\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]):\n return cls(**data)\n\n @classmethod\n def from_dynamic_route(cls, llm: BaseLLM, entity: Union[BaseModel, Callable]):\n \"\"\"\n Generate a dynamic Route object from a function or Pydantic model using LLM\n \"\"\"\n schema = function_call.get_schema(item=entity)\n dynamic_route = cls._generate_dynamic_route(llm=llm, function_schema=schema)\n dynamic_route.function_schema = schema\n return dynamic_route\n\n @classmethod\n def _parse_route_config(cls, config: str) -> str:\n # Regular expression to match content inside <config></config>\n config_pattern = r\"<config>(.*?)</config>\"\n match = re.search(config_pattern, config, re.DOTALL)\n\n if match:\n config_content = match.group(1).strip() # Get the matched content\n return config_content\n else:\n raise ValueError(\"No <config></config> tags found in the output.\")\n\n @classmethod\n def _generate_dynamic_route(cls, llm: BaseLLM, function_schema: Dict[str, Any]):\n logger.info(\"Generating dynamic route...\")\n\n prompt = f\"\"\"\nYou are tasked to generate a JSON configuration based on the provided\nfunction schema. Please follow the template below, no other tokens allowed:\n\n<config>\n{{\n \"name\": \"<function_name>\",\n \"utterances\": [\n \"<example_utterance_1>\",\n \"<example_utterance_2>\",\n \"<example_utterance_3>\",\n \"<example_utterance_4>\",\n \"<example_utterance_5>\"]\n}}\n</config>\n\nOnly include the \"name\" and \"utterances\" keys in your answer.\nThe \"name\" should match the function name and the \"utterances\"\nshould comprise a list of 5 example phrases that could be used to invoke\nthe function. Use real values instead of placeholders.\n\nInput schema:\n{function_schema}\n\"\"\"\n\n llm_input = [Message(role=\"user\", content=prompt)]\n output = llm(llm_input)\n if not output:\n raise Exception(\"No output generated for dynamic route\")\n\n route_config = cls._parse_route_config(config=output)\n\n logger.info(f\"Generated route config:\\n{route_config}\")\n\n if is_valid(route_config):\n route_config_dict = json.loads(route_config)\n route_config_dict[\"llm\"] = llm\n return Route.from_dict(route_config_dict)\n raise Exception(\"No config generated\")" } ]
import pytest from semantic_router.encoders import ( AzureOpenAIEncoder, BaseEncoder, BM25Encoder, CohereEncoder, OpenAIEncoder, TfidfEncoder, ) from semantic_router.hybrid_layer import HybridRouteLayer from semantic_router.route import Route
6,069
def mock_encoder_call(utterances): # Define a mapping of utterances to return values mock_responses = { "Hello": [0.1, 0.2, 0.3], "Hi": [0.4, 0.5, 0.6], "Goodbye": [0.7, 0.8, 0.9], "Bye": [1.0, 1.1, 1.2], "Au revoir": [1.3, 1.4, 1.5], } return [mock_responses.get(u, [0, 0, 0]) for u in utterances] @pytest.fixture def base_encoder(mocker): mock_base_encoder = BaseEncoder(name="test-encoder", score_threshold=0.5) mocker.patch.object(BaseEncoder, "__call__", return_value=[[0.1, 0.2, 0.3]]) return mock_base_encoder @pytest.fixture def cohere_encoder(mocker): mocker.patch.object(CohereEncoder, "__call__", side_effect=mock_encoder_call) return CohereEncoder(name="test-cohere-encoder", cohere_api_key="test_api_key") @pytest.fixture def openai_encoder(mocker): mocker.patch.object(OpenAIEncoder, "__call__", side_effect=mock_encoder_call) return OpenAIEncoder(name="test-openai-encoder", openai_api_key="test_api_key") @pytest.fixture def azure_encoder(mocker):
def mock_encoder_call(utterances): # Define a mapping of utterances to return values mock_responses = { "Hello": [0.1, 0.2, 0.3], "Hi": [0.4, 0.5, 0.6], "Goodbye": [0.7, 0.8, 0.9], "Bye": [1.0, 1.1, 1.2], "Au revoir": [1.3, 1.4, 1.5], } return [mock_responses.get(u, [0, 0, 0]) for u in utterances] @pytest.fixture def base_encoder(mocker): mock_base_encoder = BaseEncoder(name="test-encoder", score_threshold=0.5) mocker.patch.object(BaseEncoder, "__call__", return_value=[[0.1, 0.2, 0.3]]) return mock_base_encoder @pytest.fixture def cohere_encoder(mocker): mocker.patch.object(CohereEncoder, "__call__", side_effect=mock_encoder_call) return CohereEncoder(name="test-cohere-encoder", cohere_api_key="test_api_key") @pytest.fixture def openai_encoder(mocker): mocker.patch.object(OpenAIEncoder, "__call__", side_effect=mock_encoder_call) return OpenAIEncoder(name="test-openai-encoder", openai_api_key="test_api_key") @pytest.fixture def azure_encoder(mocker):
mocker.patch.object(AzureOpenAIEncoder, "__call__", side_effect=mock_encoder_call)
5
2023-10-30 12:12:45+00:00
8k
baaivision/JudgeLM
judgelm/serve/gradio_web_server_multi.py
[ { "identifier": "SESSION_EXPIRATION_TIME", "path": "judgelm/constants.py", "snippet": "SESSION_EXPIRATION_TIME = 3600" }, { "identifier": "build_side_by_side_ui_anony", "path": "judgelm/serve/gradio_block_arena_anony.py", "snippet": "def build_side_by_side_ui_anony(models):\n notice_markdown = \"\"\"\n# ⚔️ Chatbot Arena ⚔️ \n### Rules\n- Chat with two anonymous models side-by-side and vote for which one is better!\n- You can do multiple rounds of conversations before voting.\n- The names of the models will be revealed after your vote. Conversations with identity keywords (e.g., ChatGPT, Bard, Vicuna) or any votes after the names are revealed will not count towards the leaderboard.\n- Click \"Clear history\" to start a new round.\n- | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |\n\n### Terms of use\nBy using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.** The demo works better on desktop devices with a wide screen.\n\n### Battle\nPlease scroll down and start chatting. You can view a leaderboard of participating models in the fourth tab above labeled 'Leaderboard' or by clicking [here](?leaderboard). The models include both closed-source models (e.g., ChatGPT) and open-source models (e.g., Vicuna).\n\"\"\"\n\n states = [gr.State() for _ in range(num_sides)]\n model_selectors = [None] * num_sides\n chatbots = [None] * num_sides\n\n gr.Markdown(notice_markdown, elem_id=\"notice_markdown\")\n\n with gr.Box(elem_id=\"share-region-anony\"):\n with gr.Row():\n for i in range(num_sides):\n with gr.Column():\n model_selectors[i] = gr.Markdown(anony_names[i])\n\n with gr.Row():\n for i in range(num_sides):\n label = \"Model A\" if i == 0 else \"Model B\"\n with gr.Column():\n chatbots[i] = gr.Chatbot(\n label=label, elem_id=f\"chatbot\", visible=False, height=550\n )\n\n with gr.Box() as button_row:\n with gr.Row():\n leftvote_btn = gr.Button(value=\"👈 A is better\", interactive=False)\n rightvote_btn = gr.Button(value=\"👉 B is better\", interactive=False)\n tie_btn = gr.Button(value=\"🤝 Tie\", interactive=False)\n bothbad_btn = gr.Button(value=\"👎 Both are bad\", interactive=False)\n\n with gr.Row():\n with gr.Column(scale=20):\n textbox = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press ENTER\",\n visible=False,\n container=False,\n )\n with gr.Column(scale=1, min_width=50):\n send_btn = gr.Button(value=\"Send\", visible=False)\n\n with gr.Row() as button_row2:\n regenerate_btn = gr.Button(value=\"🔄 Regenerate\", interactive=False)\n clear_btn = gr.Button(value=\"🗑️ Clear history\", interactive=False)\n share_btn = gr.Button(value=\"📷 Share\")\n\n with gr.Accordion(\"Parameters\", open=False, visible=True) as parameter_row:\n temperature = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=0.7,\n step=0.1,\n interactive=True,\n label=\"Temperature\",\n )\n top_p = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=1.0,\n step=0.1,\n interactive=True,\n label=\"Top P\",\n )\n max_output_tokens = gr.Slider(\n minimum=16,\n maximum=1024,\n value=512,\n step=64,\n interactive=True,\n label=\"Max output tokens\",\n )\n\n gr.Markdown(learn_more_md)\n\n # Register listeners\n btn_list = [\n leftvote_btn,\n rightvote_btn,\n tie_btn,\n bothbad_btn,\n regenerate_btn,\n clear_btn,\n ]\n leftvote_btn.click(\n leftvote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n rightvote_btn.click(\n rightvote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n tie_btn.click(\n tievote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n bothbad_btn.click(\n bothbad_vote_last_response,\n states + model_selectors,\n model_selectors + [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n regenerate_btn.click(\n regenerate, states, states + chatbots + [textbox] + btn_list\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n clear_btn.click(\n clear_history, None, states + chatbots + model_selectors + [textbox] + btn_list\n )\n\n share_js = \"\"\"\nfunction (a, b, c, d) {\n const captureElement = document.querySelector('#share-region-anony');\n html2canvas(captureElement)\n .then(canvas => {\n canvas.style.display = 'none'\n document.body.appendChild(canvas)\n return canvas\n })\n .then(canvas => {\n const image = canvas.toDataURL('image/png')\n const a = document.createElement('a')\n a.setAttribute('download', 'chatbot-arena.png')\n a.setAttribute('href', image)\n a.click()\n canvas.remove()\n });\n return [a, b, c, d];\n}\n\"\"\"\n share_btn.click(share_click, states + model_selectors, [], _js=share_js)\n\n textbox.submit(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n\n send_btn.click(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n\n return (\n states,\n model_selectors,\n chatbots,\n textbox,\n send_btn,\n button_row,\n button_row2,\n parameter_row,\n )" }, { "identifier": "load_demo_side_by_side_anony", "path": "judgelm/serve/gradio_block_arena_anony.py", "snippet": "def load_demo_side_by_side_anony(models_, url_params):\n global models\n models = models_\n\n states = (None,) * num_sides\n selector_updates = (\n gr.Markdown.update(visible=True),\n gr.Markdown.update(visible=True),\n )\n\n return (\n states\n + selector_updates\n + (gr.Chatbot.update(visible=True),) * num_sides\n + (\n gr.Textbox.update(visible=True),\n gr.Box.update(visible=True),\n gr.Row.update(visible=True),\n gr.Row.update(visible=True),\n gr.Accordion.update(visible=True),\n )\n )" }, { "identifier": "set_global_vars_anony", "path": "judgelm/serve/gradio_block_arena_anony.py", "snippet": "def set_global_vars_anony(enable_moderation_):\n global enable_moderation\n enable_moderation = enable_moderation_" }, { "identifier": "build_side_by_side_ui_named", "path": "judgelm/serve/gradio_block_arena_named.py", "snippet": "def build_side_by_side_ui_named(models):\n notice_markdown = \"\"\"\n# ⚔️ Chatbot Arena ⚔️ \n### Rules\n- Chat with two models side-by-side and vote for which one is better!\n- You pick the models you want to chat with.\n- You can do multiple rounds of conversations before voting.\n- Click \"Clear history\" to start a new round.\n- | [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) |\n\n### Terms of use\nBy using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. **The service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license.** The demo works better on desktop devices with a wide screen.\n\n### Choose two models to chat with (view [leaderboard](?leaderboard))\n\"\"\"\n\n states = [gr.State() for _ in range(num_sides)]\n model_selectors = [None] * num_sides\n chatbots = [None] * num_sides\n\n model_description_md = get_model_description_md(models)\n notice = gr.Markdown(\n notice_markdown + model_description_md, elem_id=\"notice_markdown\"\n )\n\n with gr.Box(elem_id=\"share-region-named\"):\n with gr.Row():\n for i in range(num_sides):\n with gr.Column():\n model_selectors[i] = gr.Dropdown(\n choices=models,\n value=models[i] if len(models) > i else \"\",\n interactive=True,\n show_label=False,\n container=False,\n )\n\n with gr.Row():\n for i in range(num_sides):\n label = \"Model A\" if i == 0 else \"Model B\"\n with gr.Column():\n chatbots[i] = gr.Chatbot(\n label=label, elem_id=f\"chatbot\", visible=False, height=550\n )\n\n with gr.Box() as button_row:\n with gr.Row():\n leftvote_btn = gr.Button(value=\"👈 A is better\", interactive=False)\n rightvote_btn = gr.Button(value=\"👉 B is better\", interactive=False)\n tie_btn = gr.Button(value=\"🤝 Tie\", interactive=False)\n bothbad_btn = gr.Button(value=\"👎 Both are bad\", interactive=False)\n\n with gr.Row():\n with gr.Column(scale=20):\n textbox = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press ENTER\",\n visible=False,\n container=False,\n )\n with gr.Column(scale=1, min_width=50):\n send_btn = gr.Button(value=\"Send\", visible=False)\n\n with gr.Row() as button_row2:\n regenerate_btn = gr.Button(value=\"🔄 Regenerate\", interactive=False)\n clear_btn = gr.Button(value=\"🗑️ Clear history\", interactive=False)\n share_btn = gr.Button(value=\"📷 Share\")\n\n with gr.Accordion(\"Parameters\", open=False, visible=True) as parameter_row:\n temperature = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=0.7,\n step=0.1,\n interactive=True,\n label=\"Temperature\",\n )\n top_p = gr.Slider(\n minimum=0.0,\n maximum=1.0,\n value=1.0,\n step=0.1,\n interactive=True,\n label=\"Top P\",\n )\n max_output_tokens = gr.Slider(\n minimum=16,\n maximum=1024,\n value=512,\n step=64,\n interactive=True,\n label=\"Max output tokens\",\n )\n\n gr.Markdown(learn_more_md)\n\n # Register listeners\n btn_list = [\n leftvote_btn,\n rightvote_btn,\n tie_btn,\n bothbad_btn,\n regenerate_btn,\n clear_btn,\n ]\n leftvote_btn.click(\n leftvote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n rightvote_btn.click(\n rightvote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n tie_btn.click(\n tievote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n bothbad_btn.click(\n bothbad_vote_last_response,\n states + model_selectors,\n [textbox, leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],\n )\n regenerate_btn.click(\n regenerate, states, states + chatbots + [textbox] + btn_list\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n clear_btn.click(clear_history, None, states + chatbots + [textbox] + btn_list)\n\n share_js = \"\"\"\nfunction (a, b, c, d) {\n const captureElement = document.querySelector('#share-region-named');\n html2canvas(captureElement)\n .then(canvas => {\n canvas.style.display = 'none'\n document.body.appendChild(canvas)\n return canvas\n })\n .then(canvas => {\n const image = canvas.toDataURL('image/png')\n const a = document.createElement('a')\n a.setAttribute('download', 'chatbot-arena.png')\n a.setAttribute('href', image)\n a.click()\n canvas.remove()\n });\n return [a, b, c, d];\n}\n\"\"\"\n share_btn.click(share_click, states + model_selectors, [], _js=share_js)\n\n for i in range(num_sides):\n model_selectors[i].change(\n clear_history, None, states + chatbots + [textbox] + btn_list\n )\n\n textbox.submit(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n send_btn.click(\n add_text,\n states + model_selectors + [textbox],\n states + chatbots + [textbox] + btn_list,\n ).then(\n bot_response_multi,\n states + [temperature, top_p, max_output_tokens],\n states + chatbots + btn_list,\n ).then(\n flash_buttons, [], btn_list\n )\n\n return (\n states,\n model_selectors,\n chatbots,\n textbox,\n send_btn,\n button_row,\n button_row2,\n parameter_row,\n )" }, { "identifier": "load_demo_side_by_side_named", "path": "judgelm/serve/gradio_block_arena_named.py", "snippet": "def load_demo_side_by_side_named(models, url_params):\n states = (None,) * num_sides\n\n model_left = models[0] if len(models) > 0 else \"\"\n if len(models) > 1:\n weights = ([8] * 4 + [4] * 8 + [1] * 32)[: len(models) - 1]\n weights = weights / np.sum(weights)\n model_right = np.random.choice(models[1:], p=weights)\n else:\n model_right = model_left\n\n selector_updates = (\n gr.Dropdown.update(choices=models, value=model_left, visible=True),\n gr.Dropdown.update(choices=models, value=model_right, visible=True),\n )\n\n return (\n states\n + selector_updates\n + (gr.Chatbot.update(visible=True),) * num_sides\n + (\n gr.Textbox.update(visible=True),\n gr.Box.update(visible=True),\n gr.Row.update(visible=True),\n gr.Row.update(visible=True),\n gr.Accordion.update(visible=True),\n )\n )" }, { "identifier": "set_global_vars_named", "path": "judgelm/serve/gradio_block_arena_named.py", "snippet": "def set_global_vars_named(enable_moderation_):\n global enable_moderation\n enable_moderation = enable_moderation_" }, { "identifier": "set_global_vars", "path": "judgelm/serve/gradio_web_server.py", "snippet": "class State:\n def __init__(self, model_name):\n def to_gradio_chatbot(self):\n def dict(self):\ndef set_global_vars(controller_url_, enable_moderation_):\ndef get_conv_log_filename():\ndef get_model_list(controller_url, add_chatgpt, add_claude, add_palm):\ndef load_demo_single(models, url_params):\ndef load_demo(url_params, request: gr.Request):\ndef vote_last_response(state, vote_type, model_selector, request: gr.Request):\ndef upvote_last_response(state, model_selector, request: gr.Request):\ndef downvote_last_response(state, model_selector, request: gr.Request):\ndef flag_last_response(state, model_selector, request: gr.Request):\ndef regenerate(state, request: gr.Request):\ndef clear_history(request: gr.Request):\ndef add_text(state, model_selector, text, request: gr.Request):\ndef add_pretext(state, model_selector, question_text, answer1_text, answer2_text, request: gr.Request):\ndef post_process_code(code):\ndef model_worker_stream_iter(\n conv,\n model_name,\n worker_addr,\n prompt,\n temperature,\n repetition_penalty,\n top_p,\n max_new_tokens,\n):\ndef bot_response(state, temperature, top_p, max_new_tokens, request: gr.Request):\ndef get_model_description_md(models):\ndef build_single_model_ui(models, add_promotion_links=False):\ndef build_demo(models):\ndef build_single_model_ui_v2(models, add_promotion_links=False):\ndef build_demo_v2(models):" }, { "identifier": "build_leaderboard_tab", "path": "judgelm/serve/monitor/monitor.py", "snippet": "def build_leaderboard_tab(elo_results_file, leaderboard_table_file):\n if elo_results_file is None: # Do live update\n md = \"Loading ...\"\n p1 = p2 = p3 = p4 = None\n else:\n with open(elo_results_file, \"rb\") as fin:\n elo_results = pickle.load(fin)\n\n md = make_leaderboard_md(elo_results)\n p1 = elo_results[\"win_fraction_heatmap\"]\n p2 = elo_results[\"battle_count_heatmap\"]\n p3 = elo_results[\"bootstrap_elo_rating\"]\n p4 = elo_results[\"average_win_rate_bar\"]\n\n md_1 = gr.Markdown(md, elem_id=\"leaderboard_markdown\")\n\n if leaderboard_table_file:\n data = load_leaderboard_table_csv(leaderboard_table_file)\n headers = [\n \"Model\",\n \"Arena Elo rating\",\n \"MT-bench (score)\",\n \"MMLU\",\n \"License\",\n ]\n values = []\n for item in data:\n row = []\n for key in headers:\n value = item[key]\n row.append(value)\n values.append(row)\n values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)\n\n headers[1] = \"⭐ \" + headers[1]\n headers[2] = \"📈 \" + headers[2]\n\n gr.Dataframe(\n headers=headers,\n datatype=[\"markdown\", \"number\", \"number\", \"number\", \"str\"],\n value=values,\n elem_id=\"leaderboard_dataframe\",\n )\n gr.Markdown(\n \"If you want to see more models, please help us [add them](https://github.com/lm-sys/FastChat/blob/main/docs/arena.md#how-to-add-a-new-model).\"\n )\n else:\n pass\n\n gr.Markdown(\n f\"\"\"## More Statistics for Chatbot Arena\\n\nWe added some additional figures to show more statistics. The code for generating them is also included in this [notebook]({notebook_url}).\nPlease note that you may see different orders from different ranking methods. This is expected for models that perform similarly, as demonstrated by the confidence interval in the bootstrap figure. Going forward, we prefer the classical Elo calculation because of its scalability and interpretability. You can find more discussions in this blog [post](https://lmsys.org/blog/2023-05-03-arena/).\n\"\"\"\n )\n\n leader_component_values[:] = [md, p1, p2, p3, p4]\n\n with gr.Row():\n with gr.Column():\n gr.Markdown(\n \"#### Figure 1: Fraction of Model A Wins for All Non-tied A vs. B Battles\"\n )\n plot_1 = gr.Plot(p1, show_label=False)\n with gr.Column():\n gr.Markdown(\n \"#### Figure 2: Battle Count for Each Combination of Models (without Ties)\"\n )\n plot_2 = gr.Plot(p2, show_label=False)\n with gr.Row():\n with gr.Column():\n gr.Markdown(\n \"#### Figure 3: Bootstrap of Elo Estimates (1000 Rounds of Random Sampling)\"\n )\n plot_3 = gr.Plot(p3, show_label=False)\n with gr.Column():\n gr.Markdown(\n \"#### Figure 4: Average Win Rate Against All Other Models (Assuming Uniform Sampling and No Ties)\"\n )\n plot_4 = gr.Plot(p4, show_label=False)\n return [md_1, plot_1, plot_2, plot_3, plot_4]" }, { "identifier": "build_logger", "path": "judgelm/utils.py", "snippet": "def extract_jsonl(file_path):\ndef save_jsonl(data_list, file_path):\ndef build_logger(logger_name, logger_filename):\n def __init__(self, logger, log_level=logging.INFO):\n def __getattr__(self, attr):\n def write(self, buf):\n def flush(self):\ndef disable_torch_init():\ndef get_gpu_memory(max_gpus=None):\ndef violates_moderation(text):\ndef clean_flant5_ckpt(ckpt_path):\ndef pretty_print_semaphore(semaphore):\ndef iter_over_async(\n async_gen: AsyncGenerator, event_loop: AbstractEventLoop\n) -> Generator:\n async def get_next():\ndef detect_language(text: str) -> str:\ndef parse_gradio_auth_creds(filename: str):\ndef is_partial_stop(output: str, stop_str: str):\ndef _make_r_io_base(f, mode: str):\ndef jload(f, mode=\"r\"):\ndef jlload(f, mode='r'):\ndef run_cmd(cmd: str):\ndef is_sentence_complete(output: str):\ndef get_context_length(config):\nclass StreamToLogger(object):\nSEQUENCE_LENGTH_KEYS = [\n \"max_sequence_length\",\n \"seq_length\",\n \"max_position_embeddings\",\n \"max_seq_len\",\n \"model_max_length\",\n]" } ]
import argparse import pickle import time import gradio as gr from judgelm.constants import ( SESSION_EXPIRATION_TIME, ) from judgelm.serve.gradio_block_arena_anony import ( build_side_by_side_ui_anony, load_demo_side_by_side_anony, set_global_vars_anony, ) from judgelm.serve.gradio_block_arena_named import ( build_side_by_side_ui_named, load_demo_side_by_side_named, set_global_vars_named, ) from judgelm.serve.gradio_web_server import ( set_global_vars, block_css, build_single_model_ui, get_model_list, load_demo_single, ip_expiration_dict, ) from judgelm.serve.monitor.monitor import build_leaderboard_tab from judgelm.utils import ( build_logger, get_window_url_params_js, parse_gradio_auth_creds, )
6,231
""" The gradio demo server with multiple tabs. It supports chatting with a single model or chatting with two models side-by-side. """ logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log") def load_demo(url_params, request: gr.Request): global models ip = request.client.host logger.info(f"load_demo. ip: {ip}. params: {url_params}") ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME selected = 0 if "arena" in url_params: selected = 1 elif "compare" in url_params: selected = 2 elif "leaderboard" in url_params: selected = 3 if args.model_list_mode == "reload": if args.anony_only_for_proprietary_model: models = get_model_list(args.controller_url, False, False, False) else: models = get_model_list( args.controller_url, args.add_chatgpt, args.add_claude, args.add_palm ) single_updates = load_demo_single(models, url_params) models_anony = list(models) if args.anony_only_for_proprietary_model: # Only enable these models in anony battles. if args.add_chatgpt: models_anony += ["gpt-4", "gpt-3.5-turbo"] if args.add_claude: models_anony += ["claude-2", "claude-instant-1"] if args.add_palm: models_anony += ["palm-2"]
""" The gradio demo server with multiple tabs. It supports chatting with a single model or chatting with two models side-by-side. """ logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log") def load_demo(url_params, request: gr.Request): global models ip = request.client.host logger.info(f"load_demo. ip: {ip}. params: {url_params}") ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME selected = 0 if "arena" in url_params: selected = 1 elif "compare" in url_params: selected = 2 elif "leaderboard" in url_params: selected = 3 if args.model_list_mode == "reload": if args.anony_only_for_proprietary_model: models = get_model_list(args.controller_url, False, False, False) else: models = get_model_list( args.controller_url, args.add_chatgpt, args.add_claude, args.add_palm ) single_updates = load_demo_single(models, url_params) models_anony = list(models) if args.anony_only_for_proprietary_model: # Only enable these models in anony battles. if args.add_chatgpt: models_anony += ["gpt-4", "gpt-3.5-turbo"] if args.add_claude: models_anony += ["claude-2", "claude-instant-1"] if args.add_palm: models_anony += ["palm-2"]
side_by_side_anony_updates = load_demo_side_by_side_anony(models_anony, url_params)
2
2023-10-26 19:41:07+00:00
8k
EulerSearch/embedding_studio
embedding_studio/embeddings/data/loaders/s3/s3_loader.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n FINETUNING_MONGO_HOST: str = os.getenv(\"FINETUNING_MONGO_HOST\", \"mongo\")\n FINETUNING_MONGO_PORT: int = os.getenv(\"FINETUNING_MONGO_PORT\", 27017)\n FINETUNING_MONGO_DB_NAME: str = os.getenv(\n \"FINETUNING_MONGO_DB_NAME\", \"embedding_studio\"\n )\n FINETUNING_MONGO_USERNAME: str = os.getenv(\n \"FINETUNING_MONGO_USERNAME\", \"root\"\n )\n FINETUNING_MONGO_PASSWORD: str = os.getenv(\n \"FINETUNING_MONGO_PASSWORD\", \"mongopassword\"\n )\n FINETUNING_MONGO_URL: str = (\n f\"mongodb://{FINETUNING_MONGO_USERNAME}:{FINETUNING_MONGO_PASSWORD}@\"\n f\"{FINETUNING_MONGO_HOST}:{FINETUNING_MONGO_PORT}\"\n )\n CLICKSTREAM_MONGO_HOST: str = os.getenv(\"CLICKSTREAM_MONGO_HOST\", \"mongo\")\n CLICKSTREAM_MONGO_PORT: int = os.getenv(\"CLICKSTREAM_MONGO_PORT\", 27017)\n CLICKSTREAM_MONGO_DB_NAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_DB_NAME\", \"embedding_studio\"\n )\n CLICKSTREAM_MONGO_USERNAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_USERNAME\", \"root\"\n )\n CLICKSTREAM_MONGO_PASSWORD: str = os.getenv(\n \"CLICKSTREAM_MONGO_PASSWORD\", \"mongopassword\"\n )\n CLICKSTREAM_MONGO_URL: str = (\n f\"mongodb://{CLICKSTREAM_MONGO_USERNAME}:{CLICKSTREAM_MONGO_PASSWORD}@\"\n f\"{CLICKSTREAM_MONGO_HOST}:{CLICKSTREAM_MONGO_PORT}\"\n )\n REDIS_HOST: str = os.getenv(\"REDIS_HOST\", \"localhost\")\n REDIS_PORT: int = os.getenv(\"REDIS_PORT\", 6379)\n REDIS_PASSWORD: str = os.getenv(\"REDIS_PASSWORD\", \"redispassword\")\n REDIS_URL: str = f\"redis://{REDIS_HOST}:{REDIS_PORT}/0\"\n MINIO_HOST: str = os.getenv(\"MINIO_HOST\", \"localhost\")\n MINIO_PORT: int = os.getenv(\"MINIO_PORT\", 9000)\n MINIO_ROOT_USER: str = os.getenv(\"MINIO_ROOT_USER\", \"root\")\n MINIO_ROOT_PASSWORD: str = os.getenv(\n \"MINIO_ROOT_PASSWORD\", \"miniopassword\"\n )\n MINIO_DEFAULT_BUCKETS: str = os.getenv(\n \"MINIO_DEFAULT_BUCKETS\", \"embeddingstudio\"\n )\n MINIO_ACCESS_KEY: str = os.getenv(\n \"MINIO_ACCESS_KEY\", \"mtGNiEvoTL6C0EXAMPLE\"\n )\n MINIO_SECRET_KEY: str = os.getenv(\n \"MINIO_SECRET_KEY\", \"HY5JserXAaWmphNyCpQPEXAMPLEKEYEXAMPLEKEY\"\n )\n MYSQL_HOST: str = os.getenv(\"MYSQL_HOST\", \"localhost\")\n MYSQL_PORT: int = os.getenv(\"MYSQL_PORT\", 3306)\n MYSQL_DATABASE: str = os.getenv(\"MYSQL_DATABASE\", \"mlflow\")\n MYSQL_USER: str = os.getenv(\"MYSQL_USER\", \"mlflow_user\")\n MYSQL_PASSWORD: str = os.getenv(\"MYSQL_PASSWORD\", \"Baxp3O5rUvpIxiD77BfZ\")\n MYSQL_ROOT_PASSWORD: str = os.getenv(\n \"MYSQL_ROOT_PASSWORD\", \"PrK5qmPTDsm2IYKvHVG8\"\n )\n MLFLOW_HOST: str = os.getenv(\"MLFLOW_HOST\", \"localhost\")\n MLFLOW_PORT: int = os.getenv(\"MLFLOW_PORT\", 5001)\n MLFLOW_TRACKING_URI: str = f\"http://{MLFLOW_HOST}:{MLFLOW_PORT}\"\n ES_PLUGINS_PATH: str = os.getenv(\"ES_PLUGINS_PATH\", \"plugins\")\n FINE_TUNING_WORKER_MAX_RETRIES: int = os.getenv(\n \"FINE_TUNING_WORKER_MAX_RETRIES\", 3\n )\n FINE_TUNING_WORKER_TIME_LIMIT: int = os.getenv(\n \"FINE_TUNING_WORKER_TIME_LIMIT\", 18000000\n )\n DEFAULT_MAX_ATTEMPTS: int = os.getenv(\"DEFAULT_MAX_ATTEMPTS\", 3)\n DEFAULT_WAIT_TIME_SECONDS: float = os.getenv(\n \"DEFAULT_WAIT_TIME_SECONDS\", 3.0\n )\n S3_READ_CREDENTIALS_ATTEMPTS: int = os.getenv(\n \"S3_READ_CREDENTIALS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_READ_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_READ_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n S3_DOWNLOAD_DATA_ATTEMPTS: int = os.getenv(\n \"S3_DOWNLOAD_DATA_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_METRIC_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_METRIC_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_PARAM_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_PARAM_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOAD_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOAD_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_DELETE_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_RUNS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_RUNS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_END_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_END_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_END_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_END_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS\",\n DEFAULT_WAIT_TIME_SECONDS,\n )\n MLFLOW_DELETE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_CREATE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC\", 12 * 60 * 60\n )\n CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC\", 5 * 60\n )" }, { "identifier": "DataLoader", "path": "embedding_studio/embeddings/data/loaders/data_loader.py", "snippet": "class DataLoader(ABC):\n def __init__(self, **kwargs):\n pass\n\n @abstractmethod\n def load(self, items_data: List[ItemMeta]) -> Dataset:\n raise NotImplemented" }, { "identifier": "FailedToLoadAnythingFromAWSS3", "path": "embedding_studio/embeddings/data/loaders/s3/exceptions/failed_to_load_anything_from_s3.py", "snippet": "class FailedToLoadAnythingFromAWSS3(Exception):\n def __init__(self):\n super(FailedToLoadAnythingFromAWSS3, self).__init__(\n f\"Failed to load any file from AWS S3\"\n )" }, { "identifier": "S3FileMeta", "path": "embedding_studio/embeddings/data/loaders/s3/item_meta.py", "snippet": "class S3FileMeta(ItemMeta):\n bucket: str\n file: str\n\n class Config:\n arbitrary_types_allowed = True\n\n @property\n def id(self) -> str:\n return f\"{self.bucket}/{self.file}\"\n\n def __hash__(self) -> int:\n sha256_hash = hashlib.sha256()\n sha256_hash.update(self.id.encode(\"utf-8\"))\n hash_result: str = sha256_hash.hexdigest()\n hash_int = int(hash_result, 16)\n return hash_int" }, { "identifier": "RetryConfig", "path": "embedding_studio/workers/fine_tuning/utils/config.py", "snippet": "class RetryConfig(BaseModel):\n default_params: RetryParams = RetryParams()\n _specific_retries: Dict[str, RetryParams] = dict()\n\n def __getitem__(self, item: str) -> RetryParams:\n return self._specific_retries.get(item, self.default_params)\n\n def __setitem__(self, key: str, value: RetryParams):\n self._specific_retries[key] = value" }, { "identifier": "RetryParams", "path": "embedding_studio/workers/fine_tuning/utils/config.py", "snippet": "class RetryParams(BaseModel):\n max_attempts: int = 3\n wait_time_seconds: int = 3" }, { "identifier": "retry_method", "path": "embedding_studio/workers/fine_tuning/utils/retry.py", "snippet": "def retry_method(name: str = None):\n def decorator(func):\n \"\"\"Decorator to run provided class method with attempts\"\"\"\n\n def wrapper(self, *args, **kwargs):\n func_name = name if name else func.__name__\n retry_params = self.retry_config[func_name]\n\n if (\n retry_params.max_attempts is None\n or retry_params.max_attempts <= 1\n ):\n return func(self, *args, **kwargs)\n\n attempts = 0\n exception = None\n while attempts < retry_params.max_attempts:\n try:\n result = func(self, *args, **kwargs)\n # If the function succeeds, return the result\n return result\n except RequestException as e:\n if (\n hasattr(e, \"response\")\n and e.response is not None\n and 500 <= e.response.status_code < 600\n ):\n logger.error(\n f\"Server Error (5xx): {e.response.status_code}\"\n )\n # Handle server error appropriately, e.g., retry, log, or raise a custom exception\n exception = e\n else:\n logger.exception(f\"Request Exception: {e}\")\n raise e\n\n except Timeout as e:\n logger.error(f\"Timeout: {e}\")\n exception = e\n\n except ConnectionError as e:\n logger.error(f\"Connection error: {e}\")\n exception = e\n\n except Exception as e: # Handle other request exceptions\n if (\n hasattr(self, \"attempt_exception_types\")\n and type(e) in self.attempt_exception_types\n ) or (\n hasattr(self, \"is_retryable_error\")\n and self.is_retryable_error(e)\n ):\n logger.error(\n f\"Catch exception with type {type(e).__name__} that leads to new attempt\"\n )\n exception = e\n else:\n raise\n\n if exception is not None:\n logger.info(\n f\"Attempt {attempts + 1} failed with error: {exception}\"\n )\n attempts += 1\n time.sleep(retry_params.wait_time_seconds)\n\n raise MaxAttemptsReachedException(\n retry_params.max_attempts\n ) from exception\n\n return wrapper\n\n return decorator" } ]
import io import logging import uuid import boto3 from typing import Dict, Iterable, List, Optional from botocore import UNSIGNED from botocore.client import Config from botocore.exceptions import ClientError, EndpointConnectionError from datasets import Dataset from PIL import Image from pydantic import BaseModel from embedding_studio.core.config import settings from embedding_studio.embeddings.data.loaders.data_loader import DataLoader from embedding_studio.embeddings.data.loaders.s3.exceptions.failed_to_load_anything_from_s3 import ( FailedToLoadAnythingFromAWSS3, ) from embedding_studio.embeddings.data.loaders.s3.item_meta import S3FileMeta from embedding_studio.workers.fine_tuning.utils.config import ( RetryConfig, RetryParams, ) from embedding_studio.workers.fine_tuning.utils.retry import retry_method
4,465
return Image.open(outfile) except ClientError as e: if e.response["Error"]["Code"] == "404": logger.error(f"Object {file} not found in bucket {bucket}") return None else: # Raise the exception for any other unexpected errors raise e class AWSS3DataLoader(DataLoader): def __init__(self, retry_config: Optional[RetryConfig] = None, **kwargs): """Items loader from AWS S3. :param max_attempts: maximum number of attempts (default: 10) :param wait_time_seconds: time to wait between (default: 10) :param kwargs: dict data for AWSS3Credentials """ super(AWSS3DataLoader, self).__init__(**kwargs) self.retry_config = ( retry_config if retry_config else AWSS3DataLoader._get_default_retry_config() ) self.credentials = AWSS3Credentials(**kwargs) self.attempt_exception_types = [EndpointConnectionError] @staticmethod def _get_default_retry_config() -> RetryConfig: default_retry_params = RetryParams( max_attempts=settings.DEFAULT_MAX_ATTEMPTS, wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS, ) config = RetryConfig(default_params=default_retry_params) config["credentials"] = RetryParams( max_attempts=settings.S3_READ_CREDENTIALS_ATTEMPTS, wait_time_seconds=settings.S3_READ_WAIT_TIME_SECONDS, ) config["download_data"] = RetryParams( max_attempts=settings.S3_DOWNLOAD_DATA_ATTEMPTS, wait_time_seconds=settings.S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS, ) return config @retry_method(name="download_data") def _read_from_s3(self, client, bucket: str, file: str) -> Image: return read_from_s3(client, bucket, file) @retry_method(name="credentials") def _get_client(self, task_id: str): if ( self.credentials.aws_access_key_id is None or self.credentials.aws_secret_access_key is None ) and not self.credentials.use_system_info: logger.warning( "No specific AWS credentials, use Anonymous session" ) s3_client = boto3.client( "s3", config=Config(signature_version=UNSIGNED) ) else: sts_client = boto3.client( "sts", aws_access_key_id=self.credentials.aws_access_key_id, aws_secret_access_key=self.credentials.aws_secret_access_key, ) if self.credentials.external_id: assumed_role_object = sts_client.assume_role( RoleArn=self.credentials.role_arn, RoleSessionName=task_id, ExternalId=self.credentials.external_id, ) else: assumed_role_object = sts_client.assume_role( RoleArn=self.credentials.role_arn, RoleSessionName=task_id, ) credentials = assumed_role_object["Credentials"] s3_client = boto3.client( "s3", aws_access_key_id=credentials["AccessKeyId"], aws_secret_access_key=credentials["SecretAccessKey"], aws_session_token=credentials["SessionToken"], ) return s3_client def _generate_dataset_from_s3( self, files: List[S3FileMeta] ) -> Iterable[Dict]: if len(files) == 0: logger.warning("Nothing to download") else: logger.info("Connecting to aws s3...") task_id: str = str(uuid.uuid4()) try: s3_client = self._get_client(task_id) logger.info("Start downloading data from S3...") bad_items_count = 0 for val in files: image = None try: image: Image = read_from_s3( s3_client, val.bucket, val.file ) except Exception as e: logger.exception( f"Unable to download an item: {val.bucket}/{val.file} Exception: {str(e)}" ) if image is None: logger.error( f"Unable to download {val.file} from {val.bucket}" ) bad_items_count += 1 continue yield {"item": image, "item_id": val.id} if bad_items_count == len(files):
logger = logging.getLogger(__name__) class AWSS3Credentials(BaseModel): role_arn: Optional[str] = None aws_access_key_id: Optional[str] = None aws_secret_access_key: Optional[str] = None external_id: Optional[str] = None use_system_info: bool = False def read_from_s3(client, bucket: str, file: str) -> Image: if not isinstance(bucket, str) or len(bucket) == 0: raise ValueError("bucket value should be not empty string") if not isinstance(file, str) or len(file) == 0: raise ValueError("file value should be not empty string") outfile = io.BytesIO() try: client.download_fileobj(bucket, file, outfile) outfile.seek(0) return Image.open(outfile) except ClientError as e: if e.response["Error"]["Code"] == "404": logger.error(f"Object {file} not found in bucket {bucket}") return None else: # Raise the exception for any other unexpected errors raise e class AWSS3DataLoader(DataLoader): def __init__(self, retry_config: Optional[RetryConfig] = None, **kwargs): """Items loader from AWS S3. :param max_attempts: maximum number of attempts (default: 10) :param wait_time_seconds: time to wait between (default: 10) :param kwargs: dict data for AWSS3Credentials """ super(AWSS3DataLoader, self).__init__(**kwargs) self.retry_config = ( retry_config if retry_config else AWSS3DataLoader._get_default_retry_config() ) self.credentials = AWSS3Credentials(**kwargs) self.attempt_exception_types = [EndpointConnectionError] @staticmethod def _get_default_retry_config() -> RetryConfig: default_retry_params = RetryParams( max_attempts=settings.DEFAULT_MAX_ATTEMPTS, wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS, ) config = RetryConfig(default_params=default_retry_params) config["credentials"] = RetryParams( max_attempts=settings.S3_READ_CREDENTIALS_ATTEMPTS, wait_time_seconds=settings.S3_READ_WAIT_TIME_SECONDS, ) config["download_data"] = RetryParams( max_attempts=settings.S3_DOWNLOAD_DATA_ATTEMPTS, wait_time_seconds=settings.S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS, ) return config @retry_method(name="download_data") def _read_from_s3(self, client, bucket: str, file: str) -> Image: return read_from_s3(client, bucket, file) @retry_method(name="credentials") def _get_client(self, task_id: str): if ( self.credentials.aws_access_key_id is None or self.credentials.aws_secret_access_key is None ) and not self.credentials.use_system_info: logger.warning( "No specific AWS credentials, use Anonymous session" ) s3_client = boto3.client( "s3", config=Config(signature_version=UNSIGNED) ) else: sts_client = boto3.client( "sts", aws_access_key_id=self.credentials.aws_access_key_id, aws_secret_access_key=self.credentials.aws_secret_access_key, ) if self.credentials.external_id: assumed_role_object = sts_client.assume_role( RoleArn=self.credentials.role_arn, RoleSessionName=task_id, ExternalId=self.credentials.external_id, ) else: assumed_role_object = sts_client.assume_role( RoleArn=self.credentials.role_arn, RoleSessionName=task_id, ) credentials = assumed_role_object["Credentials"] s3_client = boto3.client( "s3", aws_access_key_id=credentials["AccessKeyId"], aws_secret_access_key=credentials["SecretAccessKey"], aws_session_token=credentials["SessionToken"], ) return s3_client def _generate_dataset_from_s3( self, files: List[S3FileMeta] ) -> Iterable[Dict]: if len(files) == 0: logger.warning("Nothing to download") else: logger.info("Connecting to aws s3...") task_id: str = str(uuid.uuid4()) try: s3_client = self._get_client(task_id) logger.info("Start downloading data from S3...") bad_items_count = 0 for val in files: image = None try: image: Image = read_from_s3( s3_client, val.bucket, val.file ) except Exception as e: logger.exception( f"Unable to download an item: {val.bucket}/{val.file} Exception: {str(e)}" ) if image is None: logger.error( f"Unable to download {val.file} from {val.bucket}" ) bad_items_count += 1 continue yield {"item": image, "item_id": val.id} if bad_items_count == len(files):
raise FailedToLoadAnythingFromAWSS3()
2
2023-10-31 00:33:13+00:00
8k
facebookresearch/minimax
src/minimax/envs/maze/maze_ood.py
[ { "identifier": "DIR_TO_VEC", "path": "src/minimax/envs/maze/common.py", "snippet": "DIR_TO_VEC = jnp.array([\n\t# Pointing right (positive X)\n\t(1, 0), # right\n\t(0, 1), # down\n\t(-1, 0), # left\n\t(0, -1), # up\n], dtype=jnp.int8)" }, { "identifier": "OBJECT_TO_INDEX", "path": "src/minimax/envs/maze/common.py", "snippet": "OBJECT_TO_INDEX = {\n\t\"unseen\": 0,\n\t\"empty\": 1,\n\t\"wall\": 2,\n\t\"floor\": 3,\n\t\"door\": 4,\n\t\"key\": 5,\n\t\"ball\": 6,\n\t\"box\": 7,\n\t\"goal\": 8,\n\t\"lava\": 9,\n\t\"agent\": 10,\n}" }, { "identifier": "COLOR_TO_INDEX", "path": "src/minimax/envs/maze/common.py", "snippet": "COLOR_TO_INDEX = {\n 'red' : 0,\n 'green' : 1,\n 'blue' : 2,\n 'purple': 3,\n 'yellow': 4,\n 'grey' : 5,\n}" }, { "identifier": "make_maze_map", "path": "src/minimax/envs/maze/common.py", "snippet": "def make_maze_map(\n\tparams,\n\twall_map, \n\tgoal_pos, \n\tagent_pos, \n\tagent_dir_idx,\n\tpad_obs=False):\n\t# Expand maze map to H x W x C\n\tempty = jnp.array([OBJECT_TO_INDEX['empty'], 0, 0], dtype=jnp.uint8)\n\twall = jnp.array([OBJECT_TO_INDEX['wall'], COLOR_TO_INDEX['grey'], 0], dtype=jnp.uint8)\n\tmaze_map = jnp.array(jnp.expand_dims(wall_map, -1), dtype=jnp.uint8)\n\tmaze_map = jnp.where(maze_map > 0, wall, empty)\n\t\n\tagent = jnp.array([OBJECT_TO_INDEX['agent'], COLOR_TO_INDEX['red'], agent_dir_idx], dtype=jnp.uint8)\n\tagent_x,agent_y = agent_pos\n\tmaze_map = maze_map.at[agent_y,agent_x,:].set(agent)\n\n\tgoal = jnp.array([OBJECT_TO_INDEX['goal'], COLOR_TO_INDEX['green'], 0], dtype=jnp.uint8)\n\tgoal_x,goal_y = goal_pos\n\tmaze_map = maze_map.at[goal_y,goal_x,:].set(goal)\n\n\t# Add observation padding\n\tif pad_obs:\n\t\tpadding = params.agent_view_size-1\n\telse:\n\t\tpadding = 1\n\n\tmaze_map_padded = jnp.tile(wall.reshape((1,1,*empty.shape)), (maze_map.shape[0]+2*padding, maze_map.shape[1]+2*padding, 1))\n\tmaze_map_padded = maze_map_padded.at[padding:-padding,padding:-padding,:].set(maze_map)\n\n\t# Add surrounding walls\n\twall_start = padding-1 # start index for walls\n\twall_end_y = maze_map_padded.shape[0] - wall_start - 1\n\twall_end_x = maze_map_padded.shape[1] - wall_start - 1\n\tmaze_map_padded = maze_map_padded.at[wall_start,wall_start:wall_end_x+1,:].set(wall) # top\n\tmaze_map_padded = maze_map_padded.at[wall_end_y,wall_start:wall_end_x+1,:].set(wall) # bottom\n\tmaze_map_padded = maze_map_padded.at[wall_start:wall_end_y+1,wall_start,:].set(wall) # left\n\tmaze_map_padded = maze_map_padded.at[wall_start:wall_end_y+1,wall_end_x,:].set(wall) # right\n\n\treturn maze_map_padded" }, { "identifier": "Maze", "path": "src/minimax/envs/maze/maze.py", "snippet": "class Maze(environment.Environment):\n\tdef __init__(\n\t\tself,\n\t\theight=13,\n\t\twidth=13,\n\t\tn_walls=25,\n\t\tagent_view_size=5,\n\t\treplace_wall_pos=False,\n\t\tsee_through_walls=True,\n\t\tsee_agent=False,\n\t\tmax_episode_steps=250,\n\t\tnormalize_obs=False,\n\t\tsample_n_walls=False,\n\t\tobs_agent_pos=False,\n\t\tsingleton_seed=-1\n\t):\n\t\tsuper().__init__()\n\n\t\tself.obs_shape = (agent_view_size, agent_view_size, 3)\n\t\tself.action_set = jnp.array([\n\t\t\tActions.left,\n\t\t\tActions.right,\n\t\t\tActions.forward,\n\t\t\tActions.pickup,\n\t\t\tActions.drop,\n\t\t\tActions.toggle,\n\t\t\tActions.done\n\t\t])\n\n\t\tself.params = EnvParams(\n\t\t\theight=height,\n\t\t\twidth=width,\n\t\t\tn_walls=n_walls,\n\t\t\tagent_view_size=agent_view_size,\n\t\t\treplace_wall_pos=replace_wall_pos and not sample_n_walls,\n\t\t\tsee_through_walls=see_through_walls,\n\t\t\tsee_agent=see_agent,\n\t\t\tmax_episode_steps=max_episode_steps,\n\t\t\tnormalize_obs=normalize_obs,\n\t\t\tsample_n_walls=sample_n_walls,\n\t\t\tobs_agent_pos=obs_agent_pos,\n\t\t\tsingleton_seed=-1,\n\t\t)\n\n\t@property\n\tdef default_params(self) -> EnvParams:\n\t\t# Default environment parameters\n\t\treturn EnvParams()\n\n\tdef step_env(\n\t\tself,\n\t\tkey: chex.PRNGKey,\n\t\tstate: EnvState,\n\t\taction: int,\n\t) -> Tuple[chex.Array, EnvState, float, bool, dict]:\n\t\t\"\"\"Perform single timestep state transition.\"\"\"\n\t\ta = self.action_set[action]\n\t\tstate, reward = self.step_agent(key, state, a)\n\t\t# Check game condition & no. steps for termination condition\n\t\tstate = state.replace(time=state.time + 1)\n\t\tdone = self.is_terminal(state)\n\t\tstate = state.replace(terminal=done)\n\n\t\treturn (\n\t\t\tlax.stop_gradient(self.get_obs(state)),\n\t\t\tlax.stop_gradient(state),\n\t\t\treward.astype(jnp.float32),\n\t\t\tdone,\n\t\t\t{},\n\t\t)\n\n\tdef reset_env(\n\t\tself, \n\t\tkey: chex.PRNGKey, \n\t) -> Tuple[chex.Array, EnvState]:\n\t\t\"\"\"Reset environment state by resampling contents of maze_map\n\t\t- initial agent position\n\t\t- goal position\n\t\t- wall positions\n\t\t\"\"\"\n\t\tparams = self.params\n\t\th = params.height\n\t\tw = params.width\n\t\tall_pos = np.arange(np.prod([h, w]), dtype=jnp.uint32)\n\n\t\t# Reset wall map, with shape H x W, and value of 1 at (i,j) iff there is a wall at (i,j)\n\t\tkey, subkey = jax.random.split(key)\n\t\twall_idx = jax.random.choice(\n\t\t\tsubkey, all_pos, \n\t\t\tshape=(params.n_walls,), \n\t\t\treplace=params.replace_wall_pos)\n\n\t\tif params.sample_n_walls:\n\t\t\tkey, subkey = jax.random.split(key)\n\t\t\tsampled_n_walls = jax.random.randint(subkey, (), minval=0, maxval=params.n_walls)\n\t\t\tsample_wall_mask = jnp.arange(params.n_walls) < sampled_n_walls\n\t\t\tdummy_wall_idx = wall_idx.at[0].get().repeat(params.n_walls)\n\t\t\twall_idx = jax.lax.select(\n\t\t\t\tsample_wall_mask,\n\t\t\t\twall_idx,\n\t\t\t\tdummy_wall_idx\n\t\t\t)\n\n\t\toccupied_mask = jnp.zeros_like(all_pos)\n\t\toccupied_mask = occupied_mask.at[wall_idx].set(1)\n\t\twall_map = occupied_mask.reshape(h, w).astype(jnp.bool_)\n\n\t\t# Reset agent position + dir\n\t\tkey, subkey = jax.random.split(key)\n\t\tagent_idx = jax.random.choice(subkey, all_pos, shape=(1,), p=(~occupied_mask.astype(jnp.bool_)).astype(jnp.float32))\n\t\toccupied_mask = occupied_mask.at[agent_idx].set(1)\n\t\tagent_pos = jnp.array([agent_idx%w,agent_idx//w], dtype=jnp.uint32).flatten()\n\n\t\tkey, subkey = jax.random.split(key)\n\t\tagent_dir_idx = jax.random.choice(subkey, jnp.arange(len(DIR_TO_VEC), dtype=jnp.uint8))\n\t\tagent_dir = DIR_TO_VEC.at[agent_dir_idx].get()\n\n\t\t# Reset goal position\n\t\tkey, subkey = jax.random.split(key)\n\t\tgoal_idx = jax.random.choice(subkey, all_pos, shape=(1,), p=(~occupied_mask.astype(jnp.bool_)).astype(jnp.float32))\n\t\tgoal_pos = jnp.array([goal_idx%w,goal_idx//w], dtype=jnp.uint32).flatten()\n\n\t\tmaze_map = make_maze_map(\n\t\t\tparams,\n\t\t\twall_map, \n\t\t\tgoal_pos, \n\t\t\tagent_pos, \n\t\t\tagent_dir_idx, \n\t\t\tpad_obs=True)\n\n\t\tstate = EnvState(\n\t\t\tagent_pos=agent_pos,\n\t\t\tagent_dir=agent_dir,\n\t\t\tagent_dir_idx=agent_dir_idx,\n\t\t\tgoal_pos=goal_pos,\n\t\t\twall_map=wall_map.astype(jnp.bool_),\n\t\t\tmaze_map=maze_map,\n\t\t\ttime=0,\n\t\t\tterminal=False,\n\t\t)\n\n\t\treturn self.get_obs(state), state\n\n\tdef set_env_instance(\n\t\t\tself, \n\t\t\tencoding: EnvInstance):\n\t\t\"\"\"\n\t\tInstance is encoded as a PyTree containing the following fields:\n\t\tagent_pos, agent_dir, goal_pos, wall_map\n\t\t\"\"\"\n\t\tparams = self.params\n\t\tagent_pos = encoding.agent_pos\n\t\tagent_dir_idx = encoding.agent_dir_idx\n\n\t\tagent_dir = DIR_TO_VEC.at[agent_dir_idx].get()\n\t\tgoal_pos = encoding.goal_pos\n\t\twall_map = encoding.wall_map\n\t\tmaze_map = make_maze_map(\n\t\t\tparams,\n\t\t\twall_map, \n\t\t\tgoal_pos, \n\t\t\tagent_pos, \n\t\t\tagent_dir_idx, # ued instances include wall padding\n\t\t\tpad_obs=True)\n\n\t\tstate = EnvState(\n\t\t\tagent_pos=agent_pos,\n\t\t\tagent_dir=agent_dir,\n\t\t\tagent_dir_idx=agent_dir_idx,\n\t\t\tgoal_pos=goal_pos,\n\t\t\twall_map=wall_map,\n\t\t\tmaze_map=maze_map,\n\t\t\ttime=0,\n\t\t\tterminal=False\n\t\t)\n\n\t\treturn self.get_obs(state), state\n\n\tdef get_obs(self, state: EnvState) -> chex.Array:\n\t\t\"\"\"Return limited grid view ahead of agent.\"\"\"\n\t\tobs = jnp.zeros(self.obs_shape, dtype=jnp.uint8)\n\t\t\n\t\tagent_x, agent_y = state.agent_pos\n\n\t\tobs_fwd_bound1 = state.agent_pos\n\t\tobs_fwd_bound2 = state.agent_pos + state.agent_dir*(self.obs_shape[0]-1)\n\n\t\tside_offset = self.obs_shape[0]//2\n\t\tobs_side_bound1 = state.agent_pos + (state.agent_dir == 0)*side_offset\n\t\tobs_side_bound2 = state.agent_pos - (state.agent_dir == 0)*side_offset\n\n\t\tall_bounds = jnp.stack([obs_fwd_bound1, obs_fwd_bound2, obs_side_bound1, obs_side_bound2])\n\n\t\t# Clip obs to grid bounds appropriately\n\t\tpadding = obs.shape[0]-1\n\t\tobs_bounds_min = np.min(all_bounds, 0) + padding\n\t\tobs_range_x = jnp.arange(obs.shape[0]) + obs_bounds_min[1]\n\t\tobs_range_y = jnp.arange(obs.shape[0]) + obs_bounds_min[0]\n\n\t\tmeshgrid = jnp.meshgrid(obs_range_y,obs_range_x)\n\t\tcoord_y = meshgrid[1].flatten()\n\t\tcoord_x = meshgrid[0].flatten()\n\n\t\tobs = state.maze_map.at[\n\t\t\tcoord_y,coord_x,:].get().reshape(obs.shape[0],obs.shape[1],3)\n\n\t\tobs = (state.agent_dir_idx == 0)*jnp.rot90(obs, 1) + \\\n\t\t\t \t(state.agent_dir_idx == 1)*jnp.rot90(obs, 2) + \\\n\t\t\t \t(state.agent_dir_idx == 2)*jnp.rot90(obs, 3) + \\\n\t\t\t \t(state.agent_dir_idx == 3)*jnp.rot90(obs, 4)\n\n\t\tif not self.params.see_agent:\n\t\t\tobs = obs.at[-1, side_offset].set(\n\t\t\t\tjnp.array([OBJECT_TO_INDEX['empty'], 0, 0], dtype=jnp.uint8)\n\t\t\t)\n\n\t\tif not self.params.see_through_walls:\n\t\t\tpass\n\n\t\timage = obs.astype(jnp.uint8)\n\t\tif self.params.normalize_obs:\n\t\t\timage = image/10.0\n\n\t\tobs_dict = dict(\n\t\t\timage=image,\n\t\t\tagent_dir=state.agent_dir_idx\n\t\t)\n\t\tif self.params.obs_agent_pos:\n\t\t\tobs_dict.update(dict(agent_pos=state.agent_pos))\n\n\t\treturn OrderedDict(obs_dict)\n\n\tdef step_agent(self, key: chex.PRNGKey, state: EnvState, action: int) -> Tuple[EnvState, float]:\n\t\tparams = self.params\n\n\t\t# Update agent position (forward action)\n\t\tfwd_pos = jnp.minimum(\n\t\t\tjnp.maximum(state.agent_pos + (action == Actions.forward)*state.agent_dir, 0), \n\t\t\tjnp.array((params.width-1, params.height-1), dtype=jnp.uint32))\n\n\t\t# Can't go past wall or goal\n\t\tfwd_pos_has_wall = state.wall_map.at[fwd_pos[1], fwd_pos[0]].get()\n\t\tfwd_pos_has_goal = jnp.logical_and(fwd_pos[0] == state.goal_pos[0], fwd_pos[1] == state.goal_pos[1])\n\n\t\tfwd_pos_blocked = jnp.logical_or(fwd_pos_has_wall, fwd_pos_has_goal)\n\n\t\tagent_pos_prev = jnp.array(state.agent_pos)\n\t\tagent_pos = (fwd_pos_blocked*state.agent_pos + (~fwd_pos_blocked)*fwd_pos).astype(jnp.uint32)\n\n\t\t# Update agent direction (left_turn or right_turn action)\n\t\tagent_dir_offset = \\\n\t\t\t0 \\\n\t\t\t+ (action == Actions.left)*(-1) \\\n\t\t\t+ (action == Actions.right)*1\n\n\t\tagent_dir_idx = (state.agent_dir_idx + agent_dir_offset) % 4\n\t\tagent_dir = DIR_TO_VEC[agent_dir_idx]\n\n\t\t# Update agent component in maze_map\n\t\tempty = jnp.array([OBJECT_TO_INDEX['empty'], 0, 0], dtype=jnp.uint8)\n\t\tagent = jnp.array([OBJECT_TO_INDEX['agent'], COLOR_TO_INDEX['red'], agent_dir_idx], dtype=jnp.uint8)\n\t\tpadding = self.obs_shape[0]-1\n\t\tmaze_map = state.maze_map\n\t\tmaze_map = maze_map.at[padding+agent_pos_prev[1],padding+agent_pos_prev[0],:].set(empty)\n\t\tmaze_map = maze_map.at[padding+agent_pos[1],padding+agent_pos[0],:].set(agent)\n\n\t\t# Return reward\n\t\t# rng = jax.random.PRNGKey(agent_dir_idx + agent_pos[0] + agent_pos[1])\n\t\t# rand_reward = jax.random.uniform(rng)\n\t\treward = (1.0 - 0.9*((state.time+1)/params.max_episode_steps))*fwd_pos_has_goal # rand_reward\n\n\t\treturn (\n\t\t\tstate.replace(\n\t\t\t\tagent_pos=agent_pos,\n\t\t\t\tagent_dir_idx=agent_dir_idx,\n\t\t\t\tagent_dir=agent_dir,\n\t\t\t\tmaze_map=maze_map,\t\n\t\t\t\tterminal=fwd_pos_has_goal),\n\t\t\treward\n\t\t)\n\n\tdef is_terminal(self, state: EnvState) -> bool:\n\t\t\"\"\"Check whether state is terminal.\"\"\"\n\t\tdone_steps = state.time >= self.params.max_episode_steps\n\t\treturn jnp.logical_or(done_steps, state.terminal)\n\n\tdef get_eval_solved_rate_fn(self):\n\t\tdef _fn(ep_stats):\n\t\t\treturn ep_stats['return'] > 0\n\n\t\treturn _fn\n\n\t@property\n\tdef name(self) -> str:\n\t\t\"\"\"Environment name.\"\"\"\n\t\treturn \"Maze\"\n\n\t@property\n\tdef num_actions(self) -> int:\n\t\t\"\"\"Number of actions possible in environment.\"\"\"\n\t\treturn len(self.action_set)\n\n\tdef action_space(self) -> spaces.Discrete:\n\t\t\"\"\"Action space of the environment.\"\"\"\n\t\treturn spaces.Discrete(\n\t\t\tlen(self.action_set),\n\t\t\tdtype=jnp.uint32\n\t\t)\n\n\tdef observation_space(self) -> spaces.Dict:\n\t\t\"\"\"Observation space of the environment.\"\"\"\n\t\tspaces_dict = {\n\t\t\t'image':spaces.Box(0, 255, self.obs_shape),\n\t\t\t'agent_dir': spaces.Discrete(4)\n\t\t}\n\t\tif self.params.obs_agent_pos:\n\t\t\tparams = self.params\n\t\t\th = params.height\n\t\t\tw = params.width\n\t\t\tspaces_dict.update({'agent_pos': spaces.Box(0, max(w, h), (2,), dtype=jnp.uint32)})\n\n\t\treturn spaces.Dict(spaces_dict)\n\n\tdef state_space(self) -> spaces.Dict:\n\t\t\"\"\"State space of the environment.\"\"\"\n\t\tparams = self.params\n\t\th = params.height\n\t\tw = params.width\n\t\tagent_view_size = params.agent_view_size\n\t\treturn spaces.Dict({\n\t\t\t\"agent_pos\": spaces.Box(0, max(w, h), (2,), dtype=jnp.uint32),\n\t\t\t\"agent_dir\": spaces.Discrete(4),\n\t\t\t\"goal_pos\": spaces.Box(0, max(w, h), (2,), dtype=jnp.uint32),\n\t\t\t\"maze_map\": spaces.Box(0, 255, (w + agent_view_size, h + agent_view_size, 3), dtype=jnp.uint32),\n\t\t\t\"time\": spaces.Discrete(params.max_episode_steps),\n\t\t\t\"terminal\": spaces.Discrete(2),\n\t\t})\n\n\tdef max_episode_steps(self) -> int:\n\t\treturn self.params.max_episode_steps\n\n\tdef get_env_metrics(self, state: EnvState) -> dict:\n\t\tn_walls = state.wall_map.sum()\n\t\tshortest_path_length = _graph_util.shortest_path_len(\n\t\t\tstate.wall_map,\n\t\t\tstate.agent_pos,\n\t\t\tstate.goal_pos\n\t\t)\n\n\t\treturn dict(\n\t\t\tn_walls=n_walls,\n\t\t\tshortest_path_length=shortest_path_length,\n\t\t\tpassable=shortest_path_length > 0,\n\t\t)" }, { "identifier": "EnvParams", "path": "src/minimax/envs/maze/maze.py", "snippet": "class EnvParams:\n\theight: int = 15\n\twidth: int = 15\n\tn_walls: int = 25 \n\tagent_view_size: int = 5\n\treplace_wall_pos: bool = False\n\tsee_through_walls: bool = True\n\tsee_agent: bool = False\n\tnormalize_obs: bool = False\n\tsample_n_walls: bool = False # Sample n_walls uniformly in [0, n_walls]\n\tobs_agent_pos: bool = False\n\tmax_episode_steps: int = 250\n\tsingleton_seed: int = -1," }, { "identifier": "EnvState", "path": "src/minimax/envs/maze/maze.py", "snippet": "class EnvState:\n\tagent_pos: chex.Array\n\tagent_dir: chex.Array\n\tagent_dir_idx: int\n\tgoal_pos: chex.Array\n\twall_map: chex.Array\n\tmaze_map: chex.Array\n\ttime: int\n\tterminal: bool" }, { "identifier": "Actions", "path": "src/minimax/envs/maze/maze.py", "snippet": "class Actions(IntEnum):\n # Turn left, turn right, move forward\n left = 0\n right = 1\n forward = 2\n\n # Pick up an object\n pickup = 3\n # Drop an object\n drop = 4\n # Toggle/activate an object\n toggle = 5\n\n # Done completing task\n done = 6" } ]
from typing import Tuple, Optional from flax import struct from minimax.envs.registration import register from .common import ( DIR_TO_VEC, OBJECT_TO_INDEX, COLOR_TO_INDEX, make_maze_map, ) from .maze import ( Maze, EnvParams, EnvState, Actions ) import jax import jax.numpy as jnp import chex
5,667
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ # ======== Singleton mazes ======== class MazeSingleton(Maze): def __init__( self, height=15, width=15, wall_map=None, goal_pos=None, agent_pos=None, agent_dir_idx=None, agent_view_size=5, see_through_walls=True, see_agent=False, normalize_obs=False, obs_agent_pos=False, max_episode_steps=None, singleton_seed=-1, ): super().__init__( height=height, width=width, agent_view_size=agent_view_size, see_through_walls=see_through_walls, see_agent=see_agent, normalize_obs=normalize_obs, obs_agent_pos=obs_agent_pos, max_episode_steps=max_episode_steps, singleton_seed=singleton_seed ) if wall_map is None: self.wall_map = jnp.zeros((height,width), dtype=jnp.bool_) else: self.wall_map = \ jnp.array( [[int(x) for x in row.split()] for row in wall_map], dtype=jnp.bool_) height, width = self.wall_map.shape if max_episode_steps is None: max_episode_steps = 2*(height+2)*(width+2) # Match original eval steps self.goal_pos_choices = None if goal_pos is None: self.goal_pos = jnp.array([height, width]) - jnp.ones(2, dtype=jnp.uint32) elif isinstance(goal_pos, (tuple, list)) \ and isinstance(goal_pos[0], (tuple, list)): self.goal_pos_choices = jnp.array(goal_pos, dtype=jnp.uint32) self.goal_pos = goal_pos[0] else: self.goal_pos = jnp.array(goal_pos, dtype=jnp.uint32) if agent_pos is None: self.agent_pos = jnp.zeros(2, dtype=jnp.uint32) else: self.agent_pos = jnp.array(agent_pos, dtype=jnp.uint32) self.agent_dir_idx = agent_dir_idx if self.agent_dir_idx is None: self.agent_dir_idx = 0 self.params = EnvParams( height=height, width=width, agent_view_size=agent_view_size, see_through_walls=see_through_walls, see_agent=see_agent, normalize_obs=normalize_obs, obs_agent_pos=obs_agent_pos, max_episode_steps=max_episode_steps, singleton_seed=-1, ) self.maze_map = make_maze_map( self.params, self.wall_map, self.goal_pos, self.agent_pos, self.agent_dir_idx, pad_obs=True) @property def default_params(self) -> EnvParams: # Default environment parameters return EnvParams() def reset_env( self, key: chex.PRNGKey,
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ # ======== Singleton mazes ======== class MazeSingleton(Maze): def __init__( self, height=15, width=15, wall_map=None, goal_pos=None, agent_pos=None, agent_dir_idx=None, agent_view_size=5, see_through_walls=True, see_agent=False, normalize_obs=False, obs_agent_pos=False, max_episode_steps=None, singleton_seed=-1, ): super().__init__( height=height, width=width, agent_view_size=agent_view_size, see_through_walls=see_through_walls, see_agent=see_agent, normalize_obs=normalize_obs, obs_agent_pos=obs_agent_pos, max_episode_steps=max_episode_steps, singleton_seed=singleton_seed ) if wall_map is None: self.wall_map = jnp.zeros((height,width), dtype=jnp.bool_) else: self.wall_map = \ jnp.array( [[int(x) for x in row.split()] for row in wall_map], dtype=jnp.bool_) height, width = self.wall_map.shape if max_episode_steps is None: max_episode_steps = 2*(height+2)*(width+2) # Match original eval steps self.goal_pos_choices = None if goal_pos is None: self.goal_pos = jnp.array([height, width]) - jnp.ones(2, dtype=jnp.uint32) elif isinstance(goal_pos, (tuple, list)) \ and isinstance(goal_pos[0], (tuple, list)): self.goal_pos_choices = jnp.array(goal_pos, dtype=jnp.uint32) self.goal_pos = goal_pos[0] else: self.goal_pos = jnp.array(goal_pos, dtype=jnp.uint32) if agent_pos is None: self.agent_pos = jnp.zeros(2, dtype=jnp.uint32) else: self.agent_pos = jnp.array(agent_pos, dtype=jnp.uint32) self.agent_dir_idx = agent_dir_idx if self.agent_dir_idx is None: self.agent_dir_idx = 0 self.params = EnvParams( height=height, width=width, agent_view_size=agent_view_size, see_through_walls=see_through_walls, see_agent=see_agent, normalize_obs=normalize_obs, obs_agent_pos=obs_agent_pos, max_episode_steps=max_episode_steps, singleton_seed=-1, ) self.maze_map = make_maze_map( self.params, self.wall_map, self.goal_pos, self.agent_pos, self.agent_dir_idx, pad_obs=True) @property def default_params(self) -> EnvParams: # Default environment parameters return EnvParams() def reset_env( self, key: chex.PRNGKey,
) -> Tuple[chex.Array, EnvState]:
6
2023-10-28 12:12:01+00:00
8k
reworkd/bananalyzer
bananalyzer/__main__.py
[ { "identifier": "AgentRunner", "path": "bananalyzer/runner/agent_runner.py", "snippet": "class AgentRunner(ABC):\n \"\"\"\n Wrapper class clients must implement to run an agent against the evaluations\n \"\"\"\n\n @abstractmethod\n async def run(\n self,\n page: Page,\n eval_context: Example,\n ) -> AgentResult:\n pass" }, { "identifier": "get_test_examples", "path": "bananalyzer/data/examples.py", "snippet": "def get_test_examples() -> List[Example]:\n return load_examples_at_path(get_examples_path(), test_examples_name)" }, { "identifier": "get_training_examples", "path": "bananalyzer/data/examples.py", "snippet": "def get_training_examples() -> List[Example]:\n return load_examples_at_path(get_examples_path(), train_examples_name)" }, { "identifier": "download_examples", "path": "bananalyzer/data/examples.py", "snippet": "def download_examples() -> None:\n \"\"\"\n Downloads the repo via git and places contents of the `/static` data directory in ~/.bananalyzer_data\n \"\"\"\n repo_url = \"https://github.com/reworkd/bananalyzer.git\"\n branch = \"main\"\n data_folder_name = \"static/\"\n\n try:\n subprocess.run(\n [\"git\", \"clone\", \"-b\", branch, repo_url, \"repo_temp\"], check=True\n )\n\n data_folder_path = Path(\"repo_temp\") / data_folder_name\n if not data_folder_path.exists():\n raise FileNotFoundError(\n f\"The folder '{data_folder_name}' does not exist in the repository.\"\n )\n\n downloaded_examples_path.mkdir(parents=True, exist_ok=True)\n for item in downloaded_examples_path.iterdir():\n if item.is_dir():\n shutil.rmtree(item)\n else:\n item.unlink()\n\n for item in data_folder_path.iterdir():\n target_path = shutil.move(str(item), downloaded_examples_path)\n for root, dirs, files in os.walk(target_path):\n for file in files:\n convert_to_crlf(Path(root) / file)\n\n finally:\n print(\"Cleaning up repo...\")\n shutil.rmtree(\"repo_temp\", ignore_errors=True)" }, { "identifier": "PytestTestGenerator", "path": "bananalyzer/runner/generator.py", "snippet": "class PytestTestGenerator:\n def __init__(self) -> None:\n self._classnames: Dict[str, int] = {}\n\n def generate_test(self, example: Example) -> BananalyzerTest:\n return BananalyzerTest(\n code=f\"\"\"\[email protected]\nclass {self._generate_class_name(example)}:\n\n @classmethod\n def setup_class(cls):\n cls.example = get_example_by_url(\"{example.url}\")\n\n\n @pytest_asyncio.fixture(scope=\"class\")\n async def result(self, page, agent):\n yield await agent.run(page, self.example)\n\n {\"\".join(self._generate_eval_test(eval_, i, {\n \"category\": example.category,\n \"subcategory\": example.subcategory,\n \"type\": example.type,\n }) for i, eval_ in enumerate(example.evals))}\n\"\"\",\n example=example,\n )\n\n @staticmethod\n def _generate_eval_test(eval_: Eval, i: int, attrs: dict[str, str]) -> str:\n marks = \"\\n \".join(\n f\"@pytest.mark.{MARKER_PREFIX}{k}('{v}')\" for k, v in attrs.items()\n )\n\n if eval_.type == \"json_match\" and isinstance(eval_.expected, dict):\n return f\"\"\"\n {marks}\n @pytest.mark.parametrize(\"key\", {list(eval_.expected.keys())})\n async def test_match_field(self, key, result) -> None:\n self.example.evals[{i}].eval_results(None, result, field=key)\n\n\"\"\"\n return f\"\"\"\n {marks}\n async def test_{eval_.type}(self, page, result) -> None:\n self.example.evals[{i}].eval_results(page, result)\n\n\"\"\"\n\n def _generate_class_name(self, example: Example) -> str:\n domain = urlparse(example.url).netloc\n domain = domain.replace(\".\", \"_\")\n domain = domain.replace(\"-\", \"_\")\n if domain.startswith(\"www_\"):\n domain = domain[4:]\n\n domain = \"\".join([part.capitalize() for part in domain.split(\"_\")])\n\n key = f\"{example.type.capitalize()}{domain}\"\n self._classnames[key] = self._classnames.get(key, -1) + 1\n suffix = \"\" if not self._classnames[key] else f\"{self._classnames[key] + 1}\"\n return f\"Test{key}{suffix}_{example.id.replace('-', '_')}\"" }, { "identifier": "run_tests", "path": "bananalyzer/runner/runner.py", "snippet": "def run_tests(\n tests: List[BananalyzerTest],\n runner: AgentRunnerClass,\n pytest_args: PytestArgs,\n xdist_args: XDistArgs,\n headless: bool = False,\n single_browser_instance: bool = False,\n) -> int:\n \"\"\"\n Create temporary test files based on intent, run them, and then delete them\n \"\"\"\n intents = {test.example.type for test in tests}\n intent_separated_tests = [\n [test for test in tests if test.example.type == intent] for intent in intents\n ]\n\n cache_dir = Path(os.getcwd()) / \".banana_cache\"\n cache_dir.mkdir(exist_ok=True)\n with open(cache_dir / \".gitignore\", \"w\") as f:\n f.write(\"# Generated by bananalyzer automatically\\n*\")\n\n with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir:\n temp_path = Path(temp_dir)\n\n test_file_names = [\n create_test_file(\n tests,\n f\"{tests[0].example.type}_intent_\",\n temp_path,\n runner,\n headless,\n single_browser_instance,\n )\n for tests in intent_separated_tests\n ]\n\n args = (\n test_file_names\n + [\"-s\"] * pytest_args.s\n + ([\"-q\"] if pytest_args.q else [\"-vvv\"])\n + [\"-n\", str(xdist_args.n)]\n + [\"--dist\", xdist_args.dist]\n + [f\"--junitxml={pytest_args.xml}\"] * bool(pytest_args.xml)\n + [\"--disable-warnings\"]\n )\n\n kwargs = dict()\n if not xdist_args.n:\n kwargs[\"plugins\"] = [BananalyzerPytestPlugin()]\n else:\n hooks = Path(__file__).parent.parent / \"hooks.py\"\n shutil.copy(hooks, temp_path / \"conftest.py\")\n\n exit_code = pytest.main(args, **kwargs)\n if pytest_args.xml:\n enrich_report(pytest_args.xml)\n\n return exit_code" }, { "identifier": "AgentRunnerClass", "path": "bananalyzer/schema.py", "snippet": "class AgentRunnerClass(BaseModel):\n class_name: str\n class_path: str" }, { "identifier": "Args", "path": "bananalyzer/schema.py", "snippet": "class Args(BaseModel):\n path: Union[str, Literal[\"DOWNLOAD_ONLY\"]]\n headless: bool\n single_browser_instance: bool\n id: Optional[str] = Field(default=None)\n domain: Optional[str] = Field(default=None)\n intent: Optional[GoalType] = Field(default=None)\n category: Optional[str] = Field(default=None)\n subcategory: Optional[str] = Field(default=None)\n skip: List[str]\n type: Optional[str] = Field(default=None)\n download: bool\n test: bool\n count: Optional[int]\n pytest_args: PytestArgs\n xdist_args: XDistArgs" }, { "identifier": "PytestArgs", "path": "bananalyzer/schema.py", "snippet": "class PytestArgs(BaseModel):\n s: bool\n q: bool\n xml: Optional[str] = Field(description=\"Path to the xml report file\")" }, { "identifier": "XDistArgs", "path": "bananalyzer/schema.py", "snippet": "class XDistArgs(BaseModel):\n dist: XDistDistributionMode = Field(description=\"Distribution mode (xdist)\")\n n: Union[int, Literal[\"logical\", \"auto\"]] = Field(\n description=\"Number of workers (xdist)\"\n )" } ]
import argparse import ast import importlib.util import sys from pathlib import Path from typing import List from urllib.parse import urlparse from bananalyzer import AgentRunner from bananalyzer.data.examples import ( get_test_examples, get_training_examples, download_examples, ) from bananalyzer.runner.generator import PytestTestGenerator from bananalyzer.runner.runner import run_tests from bananalyzer.schema import AgentRunnerClass, Args, PytestArgs, XDistArgs
3,708
help="The distribution mode for pytest-xdist", ) args = parser.parse_args() if args.download and not args.path: args.path = "DOWNLOAD_ONLY" if not args.path: print( f"Please provide the path to a {file_name} file. " f"Use the --help flag for more information." ) exit(1) return Args( path=args.path, headless=args.headless, intent=args.intent, id=args.id, domain=args.domain, category=args.category, subcategory=args.subcategory, skip=args.skip, single_browser_instance=args.single_browser_instance, type=args.type, test=args.test, download=args.download, count=args.count, pytest_args=PytestArgs( s=args.s, n=args.n, q=args.quiet, xml=args.junitxml, dist=args.dist, ), xdist_args=XDistArgs( n=args.n, dist=args.dist, ), ) def find_agents(file_path: Path) -> List[AgentRunnerClass]: with open(file_path, "r") as source: node = ast.parse(source.read()) runners: List[AgentRunnerClass] = [] for clazz in [n for n in node.body if isinstance(n, ast.ClassDef)]: if "AgentRunner" in [getattr(base, "id", "") for base in clazz.bases]: runners.append( AgentRunnerClass( class_name=clazz.name, class_path=str(file_path), ) ) return runners def load_agent_from_path(path: Path) -> AgentRunnerClass: if path.is_dir(): files = [p for p in path.glob("**/*.py") if "venv" not in p.parts] else: files = [path] runners: List[AgentRunnerClass] = [] for file in files: runners.extend(find_agents(file)) if len(runners) == 0: raise RuntimeError(f"Could not find any agent runners in {path}") if len(runners) > 1: raise RuntimeError(f"Found multiple agent runners in {path}") runner = runners[0] runner_file = Path(runner.class_path) module_name = path.stem spec = importlib.util.spec_from_file_location(module_name, runner_file) if spec is None or spec.loader is None: raise ImportError(f"Cannot load module from path {runner_file}") module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) agent = getattr(module, runner.class_name)() if not isinstance(agent, AgentRunner): raise TypeError("User defined agent is is not an instance of AgentRunner") return runner def main() -> int: """ Load the agent from the provided path and run it against the benchmark Note that pytest creates a new global context when running tests. Because of this, we first load the agent and validate that it is of the correct type here. Then we pass the path to the agent runner and let it load it within the pytest context. Note your AgentRunner must be concurrency safe. """ print_intro() # Load the agent args = parse_args() if args.download: print("##################################################") print("# Downloading examples, this may take a while... #") print("##################################################") download_examples() if args.path == "DOWNLOAD_ONLY": return 0 agent = load_agent_from_path(Path(args.path)) print(f"Loaded agent {agent.class_name} from {agent.class_name}") # Filter examples based on args
# Separate banana-lyzer args from pytest args # Look for an instance of Banana-lyzer in the current directory # If it doesn't exist, error def print_intro() -> None: # https://www.asciiart.eu/food-and-drinks/bananas print( r""" //\ V \ \ \_ \,'.`-. |\ `. `. ( \ `. `-. _,.-:\ \ \ `. `-._ __..--' ,-';/ \ `. `-. `-..___..---' _.--' ,'/ `. `. `-._ __..--' ,' / `. `-_ ``--..'' _.-' ,' `-_ `-.___ __,--' ,' `-.__ `----''' __.-' `--..____..--' """ ) print("Bananalyzing... 🍌") def parse_args() -> Args: file_name = "bananalyzer-agent.py" parser = argparse.ArgumentParser( description="Run the agent inside a bananalyzer agent definition file " "against the benchmark", ) parser.add_argument( "path", type=str, nargs="?", default=None, help=f"Path to the {file_name} file" ) parser.add_argument( "--headless", action="store_true", help="Whether to run headless or not" ) parser.add_argument( "-s", "--s", action="store_true", help="Shortcut for --capture=no in pytest. Will print stdout and stderr", ) parser.add_argument( "-id", "--id", type=str, default=None, help="Filter tests by id. " "Ids could be of shape a4c8292a_079c_4e49_bca1_cf7c9da205ec or a4c8292a-079c-4e49-bca1-cf7c9da205ec", ) parser.add_argument( "-d", "--domain", type=str, default=None, help="Filter tests by a particular URL domain", ) parser.add_argument( "-i", "--intent", type=str, default=None, help="Filter tests by a particular intent", ) parser.add_argument( "-c", "--category", type=str, default=None, help="Filter tests by a particular category", ) parser.add_argument( "--subcategory", type=str, default=None, help="Filter tests by a particular subcategory", ) parser.add_argument( "-n", "--n", type=str, default="logical", help="Number of test workers to use. The default is 1", ) parser.add_argument( "-skip", "--skip", type=lambda s: s.split(","), default=[], help="A list of ids to skip tests on, separated by commas", ) parser.add_argument( "-q", "--quiet", action="store_true", help="Will decrease the verbosity of pytest. By default we run with the `--v` pytest param.", ) parser.add_argument( "--single_browser_instance", action="store_true", help="Run tests in a single browser instance as opposed to creating a browser " "instance per test. This is faster but less reliable as test contexts can " "occasionally bleed into each other, causing tests to fail", ) parser.add_argument( "--type", type=str, default=None, help="Filter tests by a particular type", ) parser.add_argument( "--download", action="store_true", help="Will re-download training and test examples", ) parser.add_argument( "--test", action="store_true", help="Use test set examples instead of training set examples", ) parser.add_argument( "--count", type=int, default=None, help="The number of times to run an individual test. Won't work for detail pages", ) parser.add_argument( "--junitxml", type=str, default=None, help="The path for the junitxml report file", ) parser.add_argument( "--dist", type=str, default="loadscope", help="The distribution mode for pytest-xdist", ) args = parser.parse_args() if args.download and not args.path: args.path = "DOWNLOAD_ONLY" if not args.path: print( f"Please provide the path to a {file_name} file. " f"Use the --help flag for more information." ) exit(1) return Args( path=args.path, headless=args.headless, intent=args.intent, id=args.id, domain=args.domain, category=args.category, subcategory=args.subcategory, skip=args.skip, single_browser_instance=args.single_browser_instance, type=args.type, test=args.test, download=args.download, count=args.count, pytest_args=PytestArgs( s=args.s, n=args.n, q=args.quiet, xml=args.junitxml, dist=args.dist, ), xdist_args=XDistArgs( n=args.n, dist=args.dist, ), ) def find_agents(file_path: Path) -> List[AgentRunnerClass]: with open(file_path, "r") as source: node = ast.parse(source.read()) runners: List[AgentRunnerClass] = [] for clazz in [n for n in node.body if isinstance(n, ast.ClassDef)]: if "AgentRunner" in [getattr(base, "id", "") for base in clazz.bases]: runners.append( AgentRunnerClass( class_name=clazz.name, class_path=str(file_path), ) ) return runners def load_agent_from_path(path: Path) -> AgentRunnerClass: if path.is_dir(): files = [p for p in path.glob("**/*.py") if "venv" not in p.parts] else: files = [path] runners: List[AgentRunnerClass] = [] for file in files: runners.extend(find_agents(file)) if len(runners) == 0: raise RuntimeError(f"Could not find any agent runners in {path}") if len(runners) > 1: raise RuntimeError(f"Found multiple agent runners in {path}") runner = runners[0] runner_file = Path(runner.class_path) module_name = path.stem spec = importlib.util.spec_from_file_location(module_name, runner_file) if spec is None or spec.loader is None: raise ImportError(f"Cannot load module from path {runner_file}") module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) agent = getattr(module, runner.class_name)() if not isinstance(agent, AgentRunner): raise TypeError("User defined agent is is not an instance of AgentRunner") return runner def main() -> int: """ Load the agent from the provided path and run it against the benchmark Note that pytest creates a new global context when running tests. Because of this, we first load the agent and validate that it is of the correct type here. Then we pass the path to the agent runner and let it load it within the pytest context. Note your AgentRunner must be concurrency safe. """ print_intro() # Load the agent args = parse_args() if args.download: print("##################################################") print("# Downloading examples, this may take a while... #") print("##################################################") download_examples() if args.path == "DOWNLOAD_ONLY": return 0 agent = load_agent_from_path(Path(args.path)) print(f"Loaded agent {agent.class_name} from {agent.class_name}") # Filter examples based on args
examples = get_test_examples() if args.test else get_training_examples()
1
2023-10-30 16:40:57+00:00
8k
innnky/ar-vits
AR/modules/transformer.py
[ { "identifier": "MultiheadAttention", "path": "AR/modules/activation.py", "snippet": "class MultiheadAttention(Module):\n r\"\"\"Allows the model to jointly attend to information\n from different representation subspaces as described in the paper:\n `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.\n\n Multi-Head Attention is defined as:\n\n .. math::\n \\text{MultiHead}(Q, K, V) = \\text{Concat}(head_1,\\dots,head_h)W^O\n\n where :math:`head_i = \\text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.\n\n ``forward()`` will use a special optimized implementation if all of the following\n conditions are met:\n\n - self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This\n restriction will be loosened in the future.)\n - Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``\n - training is disabled (using ``.eval()``)\n - dropout is 0\n - ``add_bias_kv`` is ``False``\n - ``add_zero_attn`` is ``False``\n - ``batch_first`` is ``True`` and the input is batched\n - ``kdim`` and ``vdim`` are equal to ``embed_dim``\n - at most one of ``key_padding_mask`` or ``attn_mask`` is passed\n - if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``\n nor ``attn_mask`` is passed\n\n If the optimized implementation is in use, a\n `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for\n ``query``/``key``/``value`` to represent padding more efficiently than using a\n padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_\n will be returned, and an additional speedup proportional to the fraction of the input\n that is padding can be expected.\n\n Args:\n embed_dim: Total dimension of the model.\n num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split\n across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).\n dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).\n bias: If specified, adds bias to input / output projection layers. Default: ``True``.\n add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.\n add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.\n Default: ``False``.\n kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).\n vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).\n batch_first: If ``True``, then the input and output tensors are provided\n as (batch, seq, feature). Default: ``False`` (seq, batch, feature).\n\n Examples::\n\n >>> # xdoctest: +SKIP\n >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)\n >>> attn_output, attn_output_weights = multihead_attn(query, key, value)\n\n \"\"\"\n __constants__ = [\"batch_first\"]\n bias_k: Optional[torch.Tensor]\n bias_v: Optional[torch.Tensor]\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n dropout=0.0,\n bias=True,\n add_bias_kv=False,\n add_zero_attn=False,\n kdim=None,\n vdim=None,\n batch_first=False,\n linear1_cls=Linear,\n linear2_cls=Linear,\n device=None,\n dtype=None, ) -> None:\n factory_kwargs = {\"device\": device, \"dtype\": dtype}\n super(MultiheadAttention, self).__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self._qkv_same_embed_dim = (self.kdim == embed_dim and\n self.vdim == embed_dim)\n\n self.num_heads = num_heads\n self.dropout = dropout\n self.batch_first = batch_first\n self.head_dim = embed_dim // num_heads\n assert (self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n\n if add_bias_kv:\n self.bias_k = Parameter(\n torch.empty((1, 1, embed_dim), **factory_kwargs))\n self.bias_v = Parameter(\n torch.empty((1, 1, embed_dim), **factory_kwargs))\n else:\n self.bias_k = self.bias_v = None\n\n if linear1_cls == Linear:\n if not self._qkv_same_embed_dim:\n self.q_proj_weight = Parameter(\n torch.empty((embed_dim, embed_dim), **factory_kwargs))\n self.k_proj_weight = Parameter(\n torch.empty((embed_dim, self.kdim), **factory_kwargs))\n self.v_proj_weight = Parameter(\n torch.empty((embed_dim, self.vdim), **factory_kwargs))\n self.register_parameter(\"in_proj_weight\", None)\n else:\n self.in_proj_weight = Parameter(\n torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))\n self.register_parameter(\"q_proj_weight\", None)\n self.register_parameter(\"k_proj_weight\", None)\n self.register_parameter(\"v_proj_weight\", None)\n\n if bias:\n self.in_proj_bias = Parameter(\n torch.empty(3 * embed_dim, **factory_kwargs))\n else:\n self.register_parameter(\"in_proj_bias\", None)\n self.out_proj = NonDynamicallyQuantizableLinear(\n embed_dim, embed_dim, bias=bias, **factory_kwargs)\n\n self._reset_parameters()\n else:\n if not self._qkv_same_embed_dim:\n raise NotImplementedError\n else:\n self.in_proj_linear = linear1_cls(\n embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)\n self.in_proj_weight = self.in_proj_linear.weight\n\n self.register_parameter(\"q_proj_weight\", None)\n self.register_parameter(\"k_proj_weight\", None)\n self.register_parameter(\"v_proj_weight\", None)\n\n if bias:\n self.in_proj_bias = self.in_proj_linear.bias\n else:\n self.register_parameter(\"in_proj_bias\", None)\n\n self.out_proj = linear2_cls(\n embed_dim, embed_dim, bias=bias, **factory_kwargs)\n\n if self.bias_k is not None:\n xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n xavier_normal_(self.bias_v)\n\n self.add_zero_attn = add_zero_attn\n\n def _reset_parameters(self):\n if self._qkv_same_embed_dim:\n xavier_uniform_(self.in_proj_weight)\n else:\n xavier_uniform_(self.q_proj_weight)\n xavier_uniform_(self.k_proj_weight)\n xavier_uniform_(self.v_proj_weight)\n\n if self.in_proj_bias is not None:\n constant_(self.in_proj_bias, 0.0)\n constant_(self.out_proj.bias, 0.0)\n\n if self.bias_k is not None:\n xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n xavier_normal_(self.bias_v)\n\n def __setstate__(self, state):\n # Support loading old MultiheadAttention checkpoints generated by v1.1.0\n if \"_qkv_same_embed_dim\" not in state:\n state[\"_qkv_same_embed_dim\"] = True\n\n super(MultiheadAttention, self).__setstate__(state)\n\n def forward(\n self,\n query: Tensor,\n key: Tensor,\n value: Tensor,\n key_padding_mask: Optional[Tensor]=None,\n need_weights: bool=True,\n attn_mask: Optional[Tensor]=None,\n average_attn_weights: bool=True,\n ) -> Tuple[Tensor, Optional[Tensor]]:\n r\"\"\"\n Args:\n query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``\n or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,\n :math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.\n Queries are compared against key-value pairs to produce the output.\n See \"Attention Is All You Need\" for more details.\n key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``\n or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,\n :math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.\n See \"Attention Is All You Need\" for more details.\n value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when\n ``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source\n sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.\n See \"Attention Is All You Need\" for more details.\n key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``\n to ignore for the purpose of attention (i.e. treat as \"padding\"). For unbatched `query`, shape should be :math:`(S)`.\n Binary and byte masks are supported.\n For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for\n the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.\n need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.\n Default: ``True``.\n attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape\n :math:`(L, S)` or :math:`(N\\cdot\\text{num\\_heads}, L, S)`, where :math:`N` is the batch size,\n :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be\n broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.\n Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the\n corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the\n corresponding position is not allowed to attend. For a float mask, the mask values will be added to\n the attention weight.\n average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across\n heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an\n effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)\n\n Outputs:\n - **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,\n :math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,\n where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the\n embedding dimension ``embed_dim``.\n - **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,\n returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or\n :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and\n :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per\n head of shape :math:`(\\text{num\\_heads}, L, S)` when input is unbatched or :math:`(N, \\text{num\\_heads}, L, S)`.\n\n .. note::\n `batch_first` argument is ignored for unbatched inputs.\n \"\"\"\n is_batched = query.dim() == 3\n if key_padding_mask is not None:\n _kpm_dtype = key_padding_mask.dtype\n if _kpm_dtype != torch.bool and not torch.is_floating_point(\n key_padding_mask):\n raise AssertionError(\n \"only bool and floating types of key_padding_mask are supported\"\n )\n why_not_fast_path = \"\"\n if not is_batched:\n why_not_fast_path = f\"input not batched; expected query.dim() of 3 but got {query.dim()}\"\n elif query is not key or key is not value:\n # When lifting this restriction, don't forget to either\n # enforce that the dtypes all match or test cases where\n # they don't!\n why_not_fast_path = \"non-self attention was used (query, key, and value are not the same Tensor)\"\n elif (self.in_proj_bias is not None and\n query.dtype != self.in_proj_bias.dtype):\n why_not_fast_path = f\"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match\"\n elif (self.in_proj_weight is not None and\n query.dtype != self.in_proj_weight.dtype):\n # this case will fail anyway, but at least they'll get a useful error message.\n why_not_fast_path = f\"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match\"\n elif self.training:\n why_not_fast_path = \"training is enabled\"\n elif not self.batch_first:\n why_not_fast_path = \"batch_first was not True\"\n elif self.bias_k is not None:\n why_not_fast_path = \"self.bias_k was not None\"\n elif self.bias_v is not None:\n why_not_fast_path = \"self.bias_v was not None\"\n elif self.dropout:\n why_not_fast_path = f\"dropout was {self.dropout}, required zero\"\n elif self.add_zero_attn:\n why_not_fast_path = \"add_zero_attn was enabled\"\n elif not self._qkv_same_embed_dim:\n why_not_fast_path = \"_qkv_same_embed_dim was not True\"\n elif attn_mask is not None:\n why_not_fast_path = \"attn_mask was not None\"\n elif query.is_nested and key_padding_mask is not None:\n why_not_fast_path = (\n \"key_padding_mask is not supported with NestedTensor input\")\n elif self.num_heads % 2 == 1:\n why_not_fast_path = \"num_heads is odd\"\n elif torch.is_autocast_enabled():\n why_not_fast_path = \"autocast is enabled\"\n\n if not why_not_fast_path:\n tensor_args = (query, key, value, self.in_proj_weight,\n self.in_proj_bias, self.out_proj.weight,\n self.out_proj.bias, )\n # We have to use list comprehensions below because TorchScript does not support\n # generator expressions.\n if torch.overrides.has_torch_function(tensor_args):\n why_not_fast_path = \"some Tensor argument has_torch_function\"\n elif not all([(x is None or x.is_cuda or \"cpu\" in str(x.device))\n for x in tensor_args]):\n why_not_fast_path = (\n \"some Tensor argument is neither CUDA nor CPU\")\n elif torch.is_grad_enabled() and any(\n [x is not None and x.requires_grad for x in tensor_args]):\n why_not_fast_path = (\n \"grad is enabled and at least one of query or the \"\n \"input/output projection weights or biases requires_grad\")\n if not why_not_fast_path:\n return torch._native_multi_head_attention(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n self.in_proj_weight,\n self.in_proj_bias,\n self.out_proj.weight,\n self.out_proj.bias,\n key_padding_mask\n if key_padding_mask is not None else attn_mask,\n need_weights,\n average_attn_weights,\n 1 if key_padding_mask is not None else 0\n if attn_mask is not None else None, )\n\n any_nested = query.is_nested or key.is_nested or value.is_nested\n assert not any_nested, (\n \"MultiheadAttention does not support NestedTensor outside of its fast path. \"\n + f\"The fast path was not hit because {why_not_fast_path}\")\n\n if self.batch_first and is_batched:\n # make sure that the transpose op does not affect the \"is\" property\n if key is value:\n if query is key:\n query = key = value = query.transpose(1, 0)\n else:\n query, key = [x.transpose(1, 0) for x in (query, key)]\n value = key\n else:\n query, key, value = [\n x.transpose(1, 0) for x in (query, key, value)\n ]\n\n if not self._qkv_same_embed_dim:\n attn_output, attn_output_weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n self.in_proj_weight,\n self.in_proj_bias,\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask,\n need_weights=need_weights,\n attn_mask=attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_weight,\n k_proj_weight=self.k_proj_weight,\n v_proj_weight=self.v_proj_weight,\n average_attn_weights=average_attn_weights, )\n else:\n attn_output, attn_output_weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n self.in_proj_weight,\n self.in_proj_bias,\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n training=self.training,\n key_padding_mask=key_padding_mask,\n need_weights=need_weights,\n attn_mask=attn_mask,\n average_attn_weights=average_attn_weights, )\n if self.batch_first and is_batched:\n return attn_output.transpose(1, 0), attn_output_weights\n else:\n return attn_output, attn_output_weights" }, { "identifier": "BalancedDoubleSwish", "path": "AR/modules/scaling.py", "snippet": "def BalancedDoubleSwish(d_model, channel_dim=-1, max_abs=10.0,\n min_prob=0.25) -> nn.Sequential:\n \"\"\"\n ActivationBalancer -> DoubleSwish\n \"\"\"\n balancer = ActivationBalancer(\n d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob)\n return nn.Sequential(\n balancer,\n DoubleSwish(), )" } ]
import copy import numbers import torch from functools import partial from typing import Any from typing import Callable from typing import List from typing import Optional from typing import Tuple from typing import Union from AR.modules.activation import MultiheadAttention from AR.modules.scaling import BalancedDoubleSwish from torch import nn from torch import Tensor from torch.nn import functional as F
6,112
assert embedding is None return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps) def extra_repr(self) -> str: return ( "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)) class IdentityNorm(nn.Module): def __init__( self, d_model: int, eps: float=1e-5, device=None, dtype=None, ) -> None: super(IdentityNorm, self).__init__() def forward(self, input: Tensor, embedding: Any=None) -> Tensor: if isinstance(input, tuple): return input assert embedding is None return input class TransformerEncoder(nn.Module): r"""TransformerEncoder is a stack of N encoder layers. Users can build the BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters. Args: encoder_layer: an instance of the TransformerEncoderLayer() class (required). num_layers: the number of sub-encoder-layers in the encoder (required). norm: the layer normalization component (optional). enable_nested_tensor: if True, input will automatically convert to nested tensor (and convert back on output). This will improve the overall performance of TransformerEncoder when padding rate is high. Default: ``True`` (enabled). Examples:: >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6) >>> src = torch.rand(10, 32, 512) >>> out = transformer_encoder(src) """ __constants__ = ["norm"] def __init__(self, encoder_layer, num_layers, norm=None): super(TransformerEncoder, self).__init__() self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers self.norm = norm def forward( self, src: Tensor, mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, return_layer_states: bool=False, ) -> Tensor: r"""Pass the input through the encoder layers in turn. Args: src: the sequence to the encoder (required). mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). return_layer_states: return layers' state (optional). Shape: see the docs in Transformer class. """ if return_layer_states: layer_states = [] # layers' output output = src for mod in self.layers: output = mod( output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, ) layer_states.append(output[0]) if self.norm is not None: output = self.norm(output) return layer_states, output output = src for mod in self.layers: output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask) if self.norm is not None: output = self.norm(output) return output class TransformerEncoderLayer(nn.Module): __constants__ = ["batch_first", "norm_first"] def __init__( self, d_model: int, nhead: int, dim_feedforward: int=2048, dropout: float=0.1, activation: Union[str, Callable[[Tensor], Tensor]]=F.relu, batch_first: bool=False, norm_first: bool=False, device=None, dtype=None, linear1_self_attention_cls: nn.Module=nn.Linear, linear2_self_attention_cls: nn.Module=nn.Linear, linear1_feedforward_cls: nn.Module=nn.Linear, linear2_feedforward_cls: nn.Module=nn.Linear, layer_norm_cls: nn.Module=LayerNorm, layer_norm_eps: float=1e-5, adaptive_layer_norm=False, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super(TransformerEncoderLayer, self).__init__()
# modified from https://github.com/lifeiteng/vall-e/blob/main/valle/modules/transformer.py _shape_t = Union[int, List[int], torch.Size] class LayerNorm(nn.Module): __constants__ = ["normalized_shape", "eps", "elementwise_affine"] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__( self, normalized_shape: _shape_t, eps: float=1e-5, elementwise_affine: bool=True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super(LayerNorm, self).__init__() if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape, ) # type: ignore[assignment] self.normalized_shape = tuple( normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = nn.Parameter( torch.empty(self.normalized_shape, **factory_kwargs)) self.bias = nn.Parameter( torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter("weight", None) self.register_parameter("bias", None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, input: Tensor, embedding: Any=None) -> Tensor: if isinstance(input, tuple): input, embedding = input return (F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps, ), embedding, ) assert embedding is None return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps) def extra_repr(self) -> str: return ( "{normalized_shape}, eps={eps}, " "elementwise_affine={elementwise_affine}".format(**self.__dict__)) class IdentityNorm(nn.Module): def __init__( self, d_model: int, eps: float=1e-5, device=None, dtype=None, ) -> None: super(IdentityNorm, self).__init__() def forward(self, input: Tensor, embedding: Any=None) -> Tensor: if isinstance(input, tuple): return input assert embedding is None return input class TransformerEncoder(nn.Module): r"""TransformerEncoder is a stack of N encoder layers. Users can build the BERT(https://arxiv.org/abs/1810.04805) model with corresponding parameters. Args: encoder_layer: an instance of the TransformerEncoderLayer() class (required). num_layers: the number of sub-encoder-layers in the encoder (required). norm: the layer normalization component (optional). enable_nested_tensor: if True, input will automatically convert to nested tensor (and convert back on output). This will improve the overall performance of TransformerEncoder when padding rate is high. Default: ``True`` (enabled). Examples:: >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6) >>> src = torch.rand(10, 32, 512) >>> out = transformer_encoder(src) """ __constants__ = ["norm"] def __init__(self, encoder_layer, num_layers, norm=None): super(TransformerEncoder, self).__init__() self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers self.norm = norm def forward( self, src: Tensor, mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, return_layer_states: bool=False, ) -> Tensor: r"""Pass the input through the encoder layers in turn. Args: src: the sequence to the encoder (required). mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). return_layer_states: return layers' state (optional). Shape: see the docs in Transformer class. """ if return_layer_states: layer_states = [] # layers' output output = src for mod in self.layers: output = mod( output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, ) layer_states.append(output[0]) if self.norm is not None: output = self.norm(output) return layer_states, output output = src for mod in self.layers: output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask) if self.norm is not None: output = self.norm(output) return output class TransformerEncoderLayer(nn.Module): __constants__ = ["batch_first", "norm_first"] def __init__( self, d_model: int, nhead: int, dim_feedforward: int=2048, dropout: float=0.1, activation: Union[str, Callable[[Tensor], Tensor]]=F.relu, batch_first: bool=False, norm_first: bool=False, device=None, dtype=None, linear1_self_attention_cls: nn.Module=nn.Linear, linear2_self_attention_cls: nn.Module=nn.Linear, linear1_feedforward_cls: nn.Module=nn.Linear, linear2_feedforward_cls: nn.Module=nn.Linear, layer_norm_cls: nn.Module=LayerNorm, layer_norm_eps: float=1e-5, adaptive_layer_norm=False, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(
0
2023-10-30 04:40:19+00:00
8k
nv-tlabs/vid2player3d
embodied_pose/utils/torch_transform.py
[ { "identifier": "angle_axis_to_quaternion", "path": "embodied_pose/utils/konia_transform.py", "snippet": "@torch.jit.script\ndef angle_axis_to_quaternion(\n angle_axis: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ\n) -> torch.Tensor:\n r\"\"\"Convert an angle axis to a quaternion.\n\n The quaternion vector has components in (x, y, z, w) or (w, x, y, z) format.\n\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n\n Args:\n angle_axis: tensor with angle axis.\n order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n tensor with quaternion.\n\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`\n\n Example:\n >>> angle_axis = torch.rand(2, 3) # Nx3\n >>> quaternion = angle_axis_to_quaternion(angle_axis, order=QuaternionCoeffOrder.WXYZ) # Nx4\n \"\"\"\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(f\"Input must be a tensor of shape Nx3 or 3. Got {angle_axis.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # unpack input and compute conversion\n a0: torch.Tensor = angle_axis[..., 0:1]\n a1: torch.Tensor = angle_axis[..., 1:2]\n a2: torch.Tensor = angle_axis[..., 2:3]\n theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2\n\n theta: torch.Tensor = torch.sqrt((theta_squared).clamp_min(eps))\n half_theta: torch.Tensor = theta * 0.5\n\n mask: torch.Tensor = theta_squared > 0.0\n ones: torch.Tensor = torch.ones_like(half_theta)\n\n k_neg: torch.Tensor = 0.5 * ones\n k_pos: torch.Tensor = safe_zero_division(torch.sin(half_theta), theta, eps)\n k: torch.Tensor = torch.where(mask, k_pos, k_neg)\n w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)\n\n quaternion: torch.Tensor = torch.zeros(\n size=angle_axis.shape[:-1] + (4,), dtype=angle_axis.dtype, device=angle_axis.device\n )\n if order == QuaternionCoeffOrder.XYZW:\n quaternion[..., 0:1] = a0 * k\n quaternion[..., 1:2] = a1 * k\n quaternion[..., 2:3] = a2 * k\n quaternion[..., 3:4] = w\n else:\n quaternion[..., 1:2] = a0 * k\n quaternion[..., 2:3] = a1 * k\n quaternion[..., 3:4] = a2 * k\n quaternion[..., 0:1] = w\n return quaternion" }, { "identifier": "quaternion_to_rotation_matrix", "path": "embodied_pose/utils/konia_transform.py", "snippet": "@torch.jit.script\ndef quaternion_to_rotation_matrix(\n quaternion: torch.Tensor, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ\n) -> torch.Tensor:\n r\"\"\"Converts a quaternion to a rotation matrix.\n\n The quaternion should be in (x, y, z, w) or (w, x, y, z) format.\n\n Args:\n quaternion: a tensor containing a quaternion to be converted.\n The tensor can be of shape :math:`(*, 4)`.\n order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n the rotation matrix of shape :math:`(*, 3, 3)`.\n\n Example:\n >>> quaternion = torch.tensor((0., 0., 0., 1.))\n >>> quaternion_to_rotation_matrix(quaternion, order=QuaternionCoeffOrder.WXYZ)\n tensor([[-1., 0., 0.],\n [ 0., -1., 0.],\n [ 0., 0., 1.]])\n \"\"\"\n if not isinstance(quaternion, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(quaternion)}\")\n\n if not quaternion.shape[-1] == 4:\n raise ValueError(f\"Input must be a tensor of shape (*, 4). Got {quaternion.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n # normalize the input quaternion\n quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)\n\n # unpack the normalized quaternion components\n if order == QuaternionCoeffOrder.XYZW:\n x, y, z, w = quaternion_norm[..., 0], quaternion_norm[..., 1], quaternion_norm[..., 2], quaternion_norm[..., 3]\n else:\n w, x, y, z = quaternion_norm[..., 0], quaternion_norm[..., 1], quaternion_norm[..., 2], quaternion_norm[..., 3]\n\n # compute the actual conversion\n tx: torch.Tensor = 2.0 * x\n ty: torch.Tensor = 2.0 * y\n tz: torch.Tensor = 2.0 * z\n twx: torch.Tensor = tx * w\n twy: torch.Tensor = ty * w\n twz: torch.Tensor = tz * w\n txx: torch.Tensor = tx * x\n txy: torch.Tensor = ty * x\n txz: torch.Tensor = tz * x\n tyy: torch.Tensor = ty * y\n tyz: torch.Tensor = tz * y\n tzz: torch.Tensor = tz * z\n one: torch.Tensor = torch.tensor(1.0)\n\n matrix: torch.Tensor = torch.stack(\n (\n one - (tyy + tzz),\n txy - twz,\n txz + twy,\n txy + twz,\n one - (txx + tzz),\n tyz - twx,\n txz - twy,\n tyz + twx,\n one - (txx + tyy),\n ),\n dim=-1,\n ).view(quaternion.shape[:-1] + (3, 3))\n\n # if len(quaternion.shape) == 1:\n # matrix = torch.squeeze(matrix, dim=0)\n return matrix" }, { "identifier": "rotation_matrix_to_quaternion", "path": "embodied_pose/utils/konia_transform.py", "snippet": "@torch.jit.script\ndef rotation_matrix_to_quaternion(\n rotation_matrix: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ\n) -> torch.Tensor:\n r\"\"\"Convert 3x3 rotation matrix to 4d quaternion vector.\n\n The quaternion vector has components in (w, x, y, z) or (x, y, z, w) format.\n\n .. note::\n The (x, y, z, w) order is going to be deprecated in favor of efficiency.\n\n Args:\n rotation_matrix: the rotation matrix to convert.\n eps: small value to avoid zero division.\n order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.\n\n Return:\n the rotation in quaternion.\n\n Shape:\n - Input: :math:`(*, 3, 3)`\n - Output: :math:`(*, 4)`\n\n Example:\n >>> input = torch.rand(4, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_quaternion(input, eps=torch.finfo(input.dtype).eps,\n ... order=QuaternionCoeffOrder.WXYZ) # Nx4\n \"\"\"\n if not isinstance(rotation_matrix, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(rotation_matrix)}\")\n\n if not rotation_matrix.shape[-2:] == (3, 3):\n raise ValueError(f\"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}\")\n\n if not torch.jit.is_scripting():\n if order.name not in QuaternionCoeffOrder.__members__.keys():\n raise ValueError(f\"order must be one of {QuaternionCoeffOrder.__members__.keys()}\")\n\n if order == QuaternionCoeffOrder.XYZW:\n warnings.warn(\n \"`XYZW` quaternion coefficient order is deprecated and\"\n \" will be removed after > 0.6. \"\n \"Please use `QuaternionCoeffOrder.WXYZ` instead.\"\n )\n\n m00, m01, m02 = rotation_matrix[..., 0, 0], rotation_matrix[..., 0, 1], rotation_matrix[..., 0, 2]\n m10, m11, m12 = rotation_matrix[..., 1, 0], rotation_matrix[..., 1, 1], rotation_matrix[..., 1, 2]\n m20, m21, m22 = rotation_matrix[..., 2, 0], rotation_matrix[..., 2, 1], rotation_matrix[..., 2, 2]\n\n trace: torch.Tensor = m00 + m11 + m22\n\n sq = torch.sqrt((trace + 1.0).clamp_min(eps)) * 2.0 # sq = 4 * qw.\n qw = 0.25 * sq\n qx = safe_zero_division(m21 - m12, sq)\n qy = safe_zero_division(m02 - m20, sq)\n qz = safe_zero_division(m10 - m01, sq)\n if order == QuaternionCoeffOrder.XYZW:\n trace_positive_cond = torch.stack((qx, qy, qz, qw), dim=-1)\n trace_positive_cond = torch.stack((qw, qx, qy, qz), dim=-1)\n\n sq = torch.sqrt((1.0 + m00 - m11 - m22).clamp_min(eps)) * 2.0 # sq = 4 * qx.\n qw = safe_zero_division(m21 - m12, sq)\n qx = 0.25 * sq\n qy = safe_zero_division(m01 + m10, sq)\n qz = safe_zero_division(m02 + m20, sq)\n if order == QuaternionCoeffOrder.XYZW:\n cond_1 = torch.stack((qx, qy, qz, qw), dim=-1)\n cond_1 = torch.stack((qw, qx, qy, qz), dim=-1)\n\n sq = torch.sqrt((1.0 + m11 - m00 - m22).clamp_min(eps)) * 2.0 # sq = 4 * qy.\n qw = safe_zero_division(m02 - m20, sq)\n qx = safe_zero_division(m01 + m10, sq)\n qy = 0.25 * sq\n qz = safe_zero_division(m12 + m21, sq)\n if order == QuaternionCoeffOrder.XYZW:\n cond_2 = torch.stack((qx, qy, qz, qw), dim=-1)\n cond_2 = torch.stack((qw, qx, qy, qz), dim=-1)\n\n sq = torch.sqrt((1.0 + m22 - m00 - m11).clamp_min(eps)) * 2.0 # sq = 4 * qz.\n qw = safe_zero_division(m10 - m01, sq)\n qx = safe_zero_division(m02 + m20, sq)\n qy = safe_zero_division(m12 + m21, sq)\n qz = 0.25 * sq\n if order == QuaternionCoeffOrder.XYZW:\n cond_3 = torch.stack((qx, qy, qz, qw), dim=-1)\n cond_3 = torch.stack((qw, qx, qy, qz), dim=-1)\n\n where_2 = torch.where((m11 > m22).unsqueeze(-1), cond_2, cond_3)\n where_1 = torch.where(((m00 > m11) & (m00 > m22)).unsqueeze(-1), cond_1, where_2)\n\n quaternion: torch.Tensor = torch.where((trace > 0.0).unsqueeze(-1), trace_positive_cond, where_1)\n return quaternion" }, { "identifier": "rotation_matrix_to_angle_axis", "path": "embodied_pose/utils/konia_transform.py", "snippet": "@torch.jit.script\ndef rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert 3x3 rotation matrix to Rodrigues vector.\n\n Args:\n rotation_matrix: rotation matrix.\n\n Returns:\n Rodrigues vector transformation.\n\n Shape:\n - Input: :math:`(N, 3, 3)`\n - Output: :math:`(N, 3)`\n\n Example:\n >>> input = torch.rand(2, 3, 3) # Nx3x3\n >>> output = rotation_matrix_to_angle_axis(input) # Nx3\n \"\"\"\n if not isinstance(rotation_matrix, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(rotation_matrix)}\")\n\n if not rotation_matrix.shape[-2:] == (3, 3):\n raise ValueError(f\"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}\")\n quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix, order=QuaternionCoeffOrder.WXYZ)\n return quaternion_to_angle_axis(quaternion, order=QuaternionCoeffOrder.WXYZ)" }, { "identifier": "angle_axis_to_rotation_matrix", "path": "embodied_pose/utils/konia_transform.py", "snippet": "@torch.jit.script\ndef angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:\n r\"\"\"Convert 3d vector of axis-angle rotation to 3x3 rotation matrix.\n\n Args:\n angle_axis: tensor of 3d vector of axis-angle rotations.\n\n Returns:\n tensor of 3x3 rotation matrices.\n\n Shape:\n - Input: :math:`(N, 3)`\n - Output: :math:`(N, 3, 3)`\n\n Example:\n >>> input = torch.rand(1, 3) # Nx3\n >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3\n \"\"\"\n if not isinstance(angle_axis, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(angle_axis)))\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(\"Input size must be a (*, 3) tensor. Got {}\".format(angle_axis.shape))\n\n orig_shape = angle_axis.shape\n angle_axis = angle_axis.reshape(-1, 3)\n\n # stolen from ceres/rotation.h\n\n _angle_axis = torch.unsqueeze(angle_axis, dim=1)\n theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))\n theta2 = torch.squeeze(theta2, dim=1)\n\n # compute rotation matrices\n rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)\n rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)\n\n # create mask to handle both cases\n eps = 1e-6\n mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)\n mask_pos = (mask).type_as(theta2)\n mask_neg = (mask == torch.tensor(False)).type_as(theta2) # noqa\n\n # create output pose matrix\n batch_size = angle_axis.shape[0]\n rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)\n rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)\n # fill output matrix with masked values\n rotation_matrix[..., :3, :3] = mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor\n\n rotation_matrix = rotation_matrix.view(orig_shape[:-1] + (3, 3))\n return rotation_matrix # Nx3x3" } ]
import numpy as np import torch from .konia_transform import angle_axis_to_quaternion, quaternion_to_rotation_matrix, rotation_matrix_to_quaternion, rotation_matrix_to_angle_axis, angle_axis_to_rotation_matrix
6,730
cos[..., 0] * sin[..., 1] * cos[..., 2] + sin[..., 0] * cos[..., 1] * sin[..., 2], cos[..., 0] * cos[..., 1] * sin[..., 2] - sin[..., 0] * sin[..., 1] * cos[..., 2] ], dim=-1) return q def quat_between_two_vec(v1, v2, eps: float = 1e-6): """ quaternion for rotating v1 to v2 """ orig_shape = v1.shape v1 = v1.reshape(-1, 3) v2 = v2.reshape(-1, 3) dot = (v1 * v2).sum(-1) cross = torch.cross(v1, v2, dim=-1) out = torch.cat([(1 + dot).unsqueeze(-1), cross], dim=-1) # handle v1 & v2 with same direction sind = dot > 1 - eps out[sind] = torch.tensor([1., 0., 0., 0.], device=v1.device) # handle v1 & v2 with opposite direction nind = dot < -1 + eps if torch.any(nind): vx = torch.tensor([1., 0., 0.], device=v1.device) vxdot = (v1 * vx).sum(-1).abs() nxind = nind & (vxdot < 1 - eps) if torch.any(nxind): out[nxind] = angle_axis_to_quaternion(normalize(torch.cross(vx.expand_as(v1[nxind]), v1[nxind], dim=-1)) * np.pi) # handle v1 & v2 with opposite direction and they are parallel to x axis pind = nind & (vxdot >= 1 - eps) if torch.any(pind): vy = torch.tensor([0., 1., 0.], device=v1.device) out[pind] = angle_axis_to_quaternion(normalize(torch.cross(vy.expand_as(v1[pind]), v1[pind], dim=-1)) * np.pi) # normalize and reshape out = normalize(out).view(orig_shape[:-1] + (4,)) return out @torch.jit.script def get_yaw(q, eps: float = 1e-6): yaw_atany = 2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2]) yaw_atanx = 1 - 2 * (q[..., 2] * q[..., 2] + q[..., 3] * q[..., 3]) yaw = torch_safe_atan2(yaw_atany, yaw_atanx, eps) return yaw @torch.jit.script def get_yaw_q(q): yaw = get_yaw(q) angle_axis = torch.cat([torch.zeros(yaw.shape + (2,), device=q.device), yaw.unsqueeze(-1)], dim=-1) heading_q = angle_axis_to_quaternion(angle_axis) return heading_q @torch.jit.script def get_heading(q, eps: float = 1e-6): heading_atany = q[..., 3] heading_atanx = q[..., 0] heading = 2 * torch_safe_atan2(heading_atany, heading_atanx, eps) return heading def get_heading_q(q): q_new = q.clone() q_new[..., 1] = 0 q_new[..., 2] = 0 q_new = normalize(q_new) return q_new @torch.jit.script def heading_to_vec(h_theta): v = torch.stack([torch.cos(h_theta), torch.sin(h_theta)], dim=-1) return v @torch.jit.script def vec_to_heading(h_vec): h_theta = torch_safe_atan2(h_vec[..., 1], h_vec[..., 0]) return h_theta @torch.jit.script def heading_to_quat(h_theta): angle_axis = torch.cat([torch.zeros(h_theta.shape + (2,), device=h_theta.device), h_theta.unsqueeze(-1)], dim=-1) heading_q = angle_axis_to_quaternion(angle_axis) return heading_q def deheading_quat(q, heading_q=None): if heading_q is None: heading_q = get_heading_q(q) dq = quat_mul(quat_conjugate(heading_q), q) return dq @torch.jit.script def rotmat_to_rot6d(mat): rot6d = torch.cat([mat[..., 0], mat[..., 1]], dim=-1) return rot6d @torch.jit.script def rot6d_to_rotmat(rot6d, eps: float = 1e-8): a1 = rot6d[..., :3].clone() a2 = rot6d[..., 3:].clone() ind = torch.norm(a1, dim=-1) < eps a1[ind] = torch.tensor([1.0, 0.0, 0.0], device=a1.device) b1 = normalize(a1) b2 = normalize(a2 - (b1 * a2).sum(dim=-1).unsqueeze(-1) * b1) ind = torch.norm(b2, dim=-1) < eps b2[ind] = torch.tensor([0.0, 1.0, 0.0], device=b2.device) b3 = torch.cross(b1, b2, dim=-1) mat = torch.stack([b1, b2, b3], dim=-1) return mat @torch.jit.script def angle_axis_to_rot6d(aa):
def normalize(x, eps: float = 1e-9): return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1) @torch.jit.script def quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) return torch.stack([w, x, y, z], dim=-1).view(shape) @torch.jit.script def quat_conjugate(a): shape = a.shape a = a.reshape(-1, 4) return torch.cat((a[:, 0:1], -a[:, 1:]), dim=-1).view(shape) @torch.jit.script def quat_apply(a, b): shape = b.shape a = a.reshape(-1, 4) b = b.reshape(-1, 3) xyz = a[:, 1:].clone() t = xyz.cross(b, dim=-1) * 2 return (b + a[:, 0:1].clone() * t + xyz.cross(t, dim=-1)).view(shape) @torch.jit.script def quat_angle(a, eps: float = 1e-6): shape = a.shape a = a.reshape(-1, 4) s = 2 * (a[:, 0] ** 2) - 1 s = s.clamp(-1 + eps, 1 - eps) s = s.acos() return s.view(shape[:-1]) @torch.jit.script def quat_angle_diff(quat1, quat2): return quat_angle(quat_mul(quat1, quat_conjugate(quat2))) @torch.jit.script def torch_safe_atan2(y, x, eps: float = 1e-8): y = y.clone() y[(y.abs() < eps) & (x.abs() < eps)] += eps return torch.atan2(y, x) @torch.jit.script def ypr_euler_from_quat(q, handle_singularity: bool = False, eps: float = 1e-6, singular_eps: float = 1e-6): """ convert quaternion to yaw-pitch-roll euler angles """ yaw_atany = 2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2]) yaw_atanx = 1 - 2 * (q[..., 2] * q[..., 2] + q[..., 3] * q[..., 3]) roll_atany = 2 * (q[..., 0] * q[..., 1] + q[..., 2] * q[..., 3]) roll_atanx = 1 - 2 * (q[..., 1] * q[..., 1] + q[..., 2] * q[..., 2]) yaw = torch_safe_atan2(yaw_atany, yaw_atanx, eps) pitch = torch.asin(torch.clamp(2 * (q[..., 0] * q[..., 2] - q[..., 1] * q[..., 3]), min=-1 + eps, max=1 - eps)) roll = torch_safe_atan2(roll_atany, roll_atanx, eps) if handle_singularity: """ handle two special cases """ test = q[..., 0] * q[..., 2] - q[..., 1] * q[..., 3] # north pole, pitch ~= 90 degrees np_ind = test > 0.5 - singular_eps if torch.any(np_ind): # print('ypr_euler_from_quat singularity -- north pole!') roll[np_ind] = 0.0 pitch[np_ind].clamp_max_(0.5 * np.pi) yaw_atany = q[..., 3][np_ind] yaw_atanx = q[..., 0][np_ind] yaw[np_ind] = 2 * torch_safe_atan2(yaw_atany, yaw_atanx, eps) # south pole, pitch ~= -90 degrees sp_ind = test < -0.5 + singular_eps if torch.any(sp_ind): # print('ypr_euler_from_quat singularity -- south pole!') roll[sp_ind] = 0.0 pitch[sp_ind].clamp_min_(-0.5 * np.pi) yaw_atany = q[..., 3][sp_ind] yaw_atanx = q[..., 0][sp_ind] yaw[sp_ind] = 2 * torch_safe_atan2(yaw_atany, yaw_atanx, eps) return torch.stack([roll, pitch, yaw], dim=-1) @torch.jit.script def quat_from_ypr_euler(angles): """ convert yaw-pitch-roll euler angles to quaternion """ half_ang = angles * 0.5 sin = torch.sin(half_ang) cos = torch.cos(half_ang) q = torch.stack([ cos[..., 0] * cos[..., 1] * cos[..., 2] + sin[..., 0] * sin[..., 1] * sin[..., 2], sin[..., 0] * cos[..., 1] * cos[..., 2] - cos[..., 0] * sin[..., 1] * sin[..., 2], cos[..., 0] * sin[..., 1] * cos[..., 2] + sin[..., 0] * cos[..., 1] * sin[..., 2], cos[..., 0] * cos[..., 1] * sin[..., 2] - sin[..., 0] * sin[..., 1] * cos[..., 2] ], dim=-1) return q def quat_between_two_vec(v1, v2, eps: float = 1e-6): """ quaternion for rotating v1 to v2 """ orig_shape = v1.shape v1 = v1.reshape(-1, 3) v2 = v2.reshape(-1, 3) dot = (v1 * v2).sum(-1) cross = torch.cross(v1, v2, dim=-1) out = torch.cat([(1 + dot).unsqueeze(-1), cross], dim=-1) # handle v1 & v2 with same direction sind = dot > 1 - eps out[sind] = torch.tensor([1., 0., 0., 0.], device=v1.device) # handle v1 & v2 with opposite direction nind = dot < -1 + eps if torch.any(nind): vx = torch.tensor([1., 0., 0.], device=v1.device) vxdot = (v1 * vx).sum(-1).abs() nxind = nind & (vxdot < 1 - eps) if torch.any(nxind): out[nxind] = angle_axis_to_quaternion(normalize(torch.cross(vx.expand_as(v1[nxind]), v1[nxind], dim=-1)) * np.pi) # handle v1 & v2 with opposite direction and they are parallel to x axis pind = nind & (vxdot >= 1 - eps) if torch.any(pind): vy = torch.tensor([0., 1., 0.], device=v1.device) out[pind] = angle_axis_to_quaternion(normalize(torch.cross(vy.expand_as(v1[pind]), v1[pind], dim=-1)) * np.pi) # normalize and reshape out = normalize(out).view(orig_shape[:-1] + (4,)) return out @torch.jit.script def get_yaw(q, eps: float = 1e-6): yaw_atany = 2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2]) yaw_atanx = 1 - 2 * (q[..., 2] * q[..., 2] + q[..., 3] * q[..., 3]) yaw = torch_safe_atan2(yaw_atany, yaw_atanx, eps) return yaw @torch.jit.script def get_yaw_q(q): yaw = get_yaw(q) angle_axis = torch.cat([torch.zeros(yaw.shape + (2,), device=q.device), yaw.unsqueeze(-1)], dim=-1) heading_q = angle_axis_to_quaternion(angle_axis) return heading_q @torch.jit.script def get_heading(q, eps: float = 1e-6): heading_atany = q[..., 3] heading_atanx = q[..., 0] heading = 2 * torch_safe_atan2(heading_atany, heading_atanx, eps) return heading def get_heading_q(q): q_new = q.clone() q_new[..., 1] = 0 q_new[..., 2] = 0 q_new = normalize(q_new) return q_new @torch.jit.script def heading_to_vec(h_theta): v = torch.stack([torch.cos(h_theta), torch.sin(h_theta)], dim=-1) return v @torch.jit.script def vec_to_heading(h_vec): h_theta = torch_safe_atan2(h_vec[..., 1], h_vec[..., 0]) return h_theta @torch.jit.script def heading_to_quat(h_theta): angle_axis = torch.cat([torch.zeros(h_theta.shape + (2,), device=h_theta.device), h_theta.unsqueeze(-1)], dim=-1) heading_q = angle_axis_to_quaternion(angle_axis) return heading_q def deheading_quat(q, heading_q=None): if heading_q is None: heading_q = get_heading_q(q) dq = quat_mul(quat_conjugate(heading_q), q) return dq @torch.jit.script def rotmat_to_rot6d(mat): rot6d = torch.cat([mat[..., 0], mat[..., 1]], dim=-1) return rot6d @torch.jit.script def rot6d_to_rotmat(rot6d, eps: float = 1e-8): a1 = rot6d[..., :3].clone() a2 = rot6d[..., 3:].clone() ind = torch.norm(a1, dim=-1) < eps a1[ind] = torch.tensor([1.0, 0.0, 0.0], device=a1.device) b1 = normalize(a1) b2 = normalize(a2 - (b1 * a2).sum(dim=-1).unsqueeze(-1) * b1) ind = torch.norm(b2, dim=-1) < eps b2[ind] = torch.tensor([0.0, 1.0, 0.0], device=b2.device) b3 = torch.cross(b1, b2, dim=-1) mat = torch.stack([b1, b2, b3], dim=-1) return mat @torch.jit.script def angle_axis_to_rot6d(aa):
return rotmat_to_rot6d(angle_axis_to_rotation_matrix(aa))
4
2023-10-30 20:43:43+00:00
8k
vLAR-group/RayDF
run_mv.py
[ { "identifier": "config_parser", "path": "config.py", "snippet": "def config_parser():\n parser = configargparse.ArgumentParser()\n parser.add_argument('--config', is_config_file=True,\n help='config file path')\n parser.add_argument(\"--eval_only\", action='store_true',\n help='only evaluation with pretrained model')\n\n # parameterization options\n parser.add_argument(\"--radius\", type=float, default=1.5,\n help='radius of sphere for distance field')\n\n # training options\n parser.add_argument(\"--N_rand\", type=int, default=8192,\n help='batch size')\n parser.add_argument(\"--N_iters\", type=int, default=100000,\n help='number of epochs')\n parser.add_argument(\"--lrate\", type=float, default=1e-4,\n help='learning rate')\n\n # classifier options\n parser.add_argument(\"--dist_thres\", type=float, default=1e-2,\n help='threshold to determine if the query point is occluded for the sampled view')\n parser.add_argument(\"--vis_thres\", type=float, default=0.5,\n help='threshold for binary classification')\n parser.add_argument(\"--netdepth_cls\", type=int, default=8,\n help='layers in visibilit classifier')\n parser.add_argument(\"--netwidth_cls\", type=int, default=512,\n help='channels per layer')\n parser.add_argument(\"--ext_layer_cls\", type=int, default=1,\n help='number of layers to extract individual features')\n parser.add_argument(\"--pos_weight\", type=float, default=1.,\n help='positive weight for cross-entropy loss')\n\n # multiview optimization options\n parser.add_argument(\"--N_views\", type=int, default=20,\n help='the number of reference views per ray')\n parser.add_argument(\"--w_rgb\", type=float, default=1.,\n help='weight of rgb loss')\n parser.add_argument(\"--ckpt_path_cls\", type=str, default=None,\n help='checkpoint path of classifier to reload')\n\n # ray-surface distance network\n parser.add_argument(\"--netdepth\", type=int, default=13,\n help='layers in network')\n parser.add_argument(\"--netwidth\", type=int, default=1024,\n help='channels per layer')\n parser.add_argument(\"--rgb_layer\", type=int, default=0,\n help='if true, network predicts radiance')\n parser.add_argument(\"--denoise\", action='store_true',\n help='if true, compute gradients to remove outliers')\n parser.add_argument(\"--grad_normal\", action='store_true',\n help='if true, use gradients to compute surface normal')\n parser.add_argument(\"--grad_clip\", type=float, default=-1,\n help='maximum clip value for grad norm')\n parser.add_argument(\"--outlier_thres\", type=float, default=10.,\n help='threshold to select outliers for minimizing the surface gradient')\n\n # dataset options\n parser.add_argument(\"--datadir\", type=str, default='./datasets',\n help='input data directory')\n parser.add_argument(\"--dataset\", type=str, required=True,\n help='the name of dataset for train/eval')\n parser.add_argument(\"--scene\", type=str, required=True,\n help='the name of scene for train/eval')\n parser.add_argument(\"--trainskip\", type=int, default=1,\n help='will load 1/N images from test/val sets')\n parser.add_argument(\"--testskip\", type=int, default=8,\n help='will load 1/N images from test/val sets')\n parser.add_argument(\"--voxel_sz\", type=float, default=0.005,\n help='size of voxel for tsdf integration')\n parser.add_argument(\"--cd_sample\", type=int, default=30000,\n help='the number of sampling points to compute chamfer-distance')\n parser.add_argument(\"--continuous\", action='store_true',\n help='output continuous distance maps')\n\n # logging/saving options\n parser.add_argument(\"--logdir\", type=str, default='./logs',\n help='where to store ckpts and logs')\n parser.add_argument(\"--expname\", type=str, default='',\n help='experiment name')\n parser.add_argument(\"--i_print\", type=int, default=100,\n help='frequency of console printout and metric loggin')\n parser.add_argument(\"--i_img\", type=int, default=5000,\n help='frequency of image logging')\n parser.add_argument(\"--i_weights\", type=int, default=10000,\n help='frequency of weight ckpt saving')\n\n return parser" }, { "identifier": "log", "path": "utils/log.py", "snippet": "EPS = 1e-8\ndef to_distmap(x, m=None, white_bkgd=True, min=None, max=None):\ndef to_normalmap(x, m=None, white_bkgd=True):\ndef to_colormap(x):\ndef save_config(args):" }, { "identifier": "convert_d", "path": "utils/math.py", "snippet": "def convert_d(d, scene_info, out='dist'):\n H, W, focal = scene_info['H'], scene_info['W'], scene_info['focal']\n i, j = np.meshgrid(np.arange(W, dtype=np.float32),\n np.arange(H, dtype=np.float32), indexing='xy')\n L = np.sqrt(np.power(j - H / 2., 2) + np.power(i - W / 2., 2) + focal ** 2)\n fl = focal / L\n if out == 'dist':\n return d / fl\n elif out == 'dep':\n return d * fl\n else:\n raise NotImplementedError" }, { "identifier": "Dataloader", "path": "utils/dataloader.py", "snippet": "class Dataloader:\n def __init__(self, args, device):\n self.args = args\n self.device = device\n self.N_rand = args.N_rand\n\n i_split, self.all_dists, self.all_images, masks, self.cam_poses, self.scene_info = \\\n dataloder_func[args.dataset](args.datadir, args.trainskip, args.testskip)\n\n # restore scene info\n self.scene_info['sphere_radius'] = args.radius\n self.i_train, self.i_test = i_split\n print('TRAIN views are', self.i_train)\n print('TEST views are', self.i_test)\n\n # compute rays\n all_rays = []\n for i, pose in enumerate(self.cam_poses):\n rays_o, rays_d = get_rays_np(self.scene_info, pose) # (H, W, 3), (H, W, 3), (H, W, 1)\n ray = np.concatenate([rays_o, rays_d], -1)\n all_rays.append(ray)\n all_rays = np.stack(all_rays, axis=0)\n\n self.rays, self.dists, self.masks, self.imgs = {}, {}, {}, {}\n for mode, split in zip(['train', 'test'], [self.i_train, self.i_test]):\n self.rays[mode] = np.reshape(all_rays[split], [-1, 6])\n self.dists[mode] = np.reshape(self.all_dists[split], [-1, 1])\n self.masks[mode] = np.reshape(masks[split], [-1, 1])\n if args.rgb_layer > 0:\n self.imgs[mode] = np.reshape(self.all_images[split], [-1, 3])\n\n # extract foreground rays for train/eval\n self.rays[mode+'_fg'] = self.rays[mode][self.masks[mode][:, 0]==1]\n self.dists[mode+'_fg'] = self.dists[mode][self.masks[mode][:, 0]==1]\n self.masks[mode+'_fg'] = self.masks[mode][self.masks[mode][:, 0]==1]\n if args.rgb_layer > 0:\n self.imgs[mode+'_fg'] = self.imgs[mode][self.masks[mode][:, 0]==1]\n\n\n def __call__(self, inds, mode):\n batch_rays = torch.Tensor(self.rays[mode][inds]).to(self.device)\n dists = torch.Tensor(self.dists[mode][inds]).to(self.device)\n masks = torch.Tensor(self.masks[mode][inds]).to(self.device)\n targ_dict = {'dist': dists, 'mask': masks}\n\n if self.args.rgb_layer > 0:\n images = torch.Tensor(self.imgs[mode][inds]).to(self.device)\n targ_dict['image'] = images\n\n return batch_rays, targ_dict" }, { "identifier": "get_ray_param", "path": "utils/ray.py", "snippet": "def get_ray_param(ray_fn, rays):\n samples, hit_info = ray_fn(rays)\n return samples, hit_info['t0'].detach(), hit_info['ray_dir']" }, { "identifier": "create_net", "path": "net_multiview/network.py", "snippet": "def create_net(args, scene_info, device):\n ray_fn, input_ch = get_rayparam_func(scene_info)\n\n # initialise classifier and load ckpt\n model_cls = DualVisClassifier(D=args.netdepth_cls, W=args.netwidth_cls,\n input_ch=input_ch, ext_layer=args.ext_layer_cls).to(device)\n if not args.eval_only:\n print('Reloading vis classifier from', args.ckpt_path_cls)\n cls_ckpt = torch.load(args.ckpt_path_cls)\n model_cls.load_state_dict(cls_ckpt['network_fn'])\n\n # initialise distance network for multiview optimization\n model = RaySurfDNet(D=args.netdepth, W=args.netwidth, input_ch=input_ch, rgb_layer=args.rgb_layer).to(device)\n optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lrate, betas=(0.9, 0.999), capturable=True)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.N_iters, eta_min=args.lrate*0.01)\n\n\n ############# Load checkpoints #############\n ckpts = [os.path.join(args.logdir, args.expname, f) for f in sorted(os.listdir(\n os.path.join(args.logdir, args.expname))) if 'tar' in f]\n print('Found ckpts', ckpts)\n\n start = 0\n if len(ckpts) > 0:\n ckpt_path = ckpts[-1]\n print('Loading ckpt from:', ckpt_path)\n ckpt = torch.load(ckpt_path)\n\n start = ckpt['global_step']\n model.load_state_dict(ckpt['network_fn'])\n optimizer.load_state_dict(ckpt['optimizer'])\n optimizer.param_groups[0]['capturable'] = True\n scheduler.load_state_dict(ckpt['scheduler'])\n scheduler.last_epoch = ckpt['global_step']\n\n return ray_fn, start, model, model_cls, optimizer, scheduler" }, { "identifier": "get_multiview_rays", "path": "net_multiview/sampler.py", "snippet": "def get_multiview_rays(args, query_rays, query_gts):\n # define the query surface points\n wcoords = query_rays[..., :3] + query_gts['dist'] * query_rays[..., 3:]\n\n # sample points on a unit sphere to construct vectors\n x = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.\n y = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.\n z = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.\n mv_dirs = torch.stack([x, y, z], dim=-1).to(wcoords.device)\n mv_dirs = mv_dirs / (torch.linalg.norm(mv_dirs, dim=-1, keepdim=True) + EPS)\n rays_d = -mv_dirs\n\n # generate new rays\n dist = args.radius * 2.\n rays_o = wcoords[:, None] - dist * rays_d\n mv_rays = torch.concat([rays_o, rays_d], dim=-1) # (B, N_views, 6)\n target_dict = {'dist': torch.ones_like(rays_d[..., :1]) * dist}\n if args.rgb_layer > 0:\n target_dict['image'] = torch.tile(query_gts['image'][:, None], (1, args.N_views, 1))\n\n mv_rays_flat = mv_rays.reshape(-1, 6)\n for k in target_dict:\n target_dict[k] = target_dict[k].reshape(-1, target_dict[k].shape[-1])\n\n return mv_rays_flat, target_dict" }, { "identifier": "get_surface_gradient", "path": "utils/math.py", "snippet": "def get_surface_gradient(t, raydirs):\n dt = gradient(t, raydirs)\n return torch.norm(dt, dim=-1, keepdim=True)" }, { "identifier": "get_surface_normal", "path": "utils/math.py", "snippet": "def get_surface_normal(t, raydirs):\n dt = gradient(t, raydirs)\n dtdtheta, dtdphi = dt[..., :1], dt[..., 1:]\n sin_theta, cos_theta = torch.sin(raydirs[..., :1]), torch.cos(raydirs[..., :1])\n sin_phi, cos_phi = torch.sin(raydirs[..., 1:]), torch.cos(raydirs[..., 1:])\n dtheta = torch.cat([(dtdtheta * sin_theta + t * cos_theta) * cos_phi,\n (dtdtheta * sin_theta + t * cos_theta) * sin_phi,\n dtdtheta * cos_theta - t * sin_theta], dim=-1)\n dphi = torch.cat([(dtdphi * cos_phi - t * sin_phi) * sin_theta,\n (dtdphi * sin_phi + t * cos_phi) * sin_theta,\n dtdphi * cos_theta], dim=-1)\n\n normal = torch.cross(dphi, dtheta)\n normal = normal / (torch.linalg.norm(normal+EPS, dim=-1, keepdim=True)+EPS)\n return normal" } ]
import os import torch import numpy as np import imageio import trimesh import open3d as o3d import wandb from tqdm import trange from config import config_parser from open3d import pipelines from wandb import AlertLevel from utils import log from utils.math import convert_d from utils.dataloader import Dataloader from utils.ray import get_ray_param from net_multiview.network import create_net from net_multiview.sampler import get_multiview_rays from utils.math import get_surface_gradient, get_surface_normal from torchmetrics.functional import peak_signal_noise_ratio as PSNR from torchmetrics.functional import structural_similarity_index_measure as SSIM from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from chamfer_distance import ChamferDistance
5,148
# normalize query gt distance and query surface point target_dict['dist_norm'] = (target_dict['dist'] - d0) / (args.radius * 2.) batch_pts = batch_rays[..., :3] + target_dict['dist'] * batch_rays[..., 3:] for c in range(batch_pts.shape[-1]): batch_pts[..., c] -= dataloader.scene_info['sphere_center'][c] target_dict['pts_norm'] = batch_pts / dataloader.scene_info['sphere_radius'] # ================= Multiview Rays ===================== # Sample multiview rays and get their ray parameters mv_rays, mv_targets = get_multiview_rays(args, query_rays=batch_rays, query_gts=target_dict) mv_inputs, mv_d0, _ = get_ray_param(ray_fn, mv_rays) mv_targets['dist_norm'] = (mv_targets['dist'] - mv_d0) / (args.radius * 2.) # Compute visibility with torch.no_grad(): cls_inputs = [torch.tile(batch_inputs[:, None], (1, args.N_views, 1)).reshape(-1, batch_inputs.shape[-1]), mv_inputs, torch.tile(target_dict['pts_norm'][:, None], (1, args.N_views, 1)).reshape(-1, 3)] vis = model_cls(cls_inputs) mv_targets['vis_score'] = torch.sigmoid(vis).reshape(args.N_rand, args.N_views) reweigh = 0.5 mv_targets['vis_score'] = mv_targets['vis_score'] ** reweigh / (mv_targets['vis_score'] ** reweigh + (1. - mv_targets['vis_score']) ** reweigh) # Multiview forward mv_batch_inputs = torch.cat([batch_inputs, mv_inputs], dim=0) mv_outputs = model(mv_batch_inputs) # ================= Optimization ===================== loss_d_query = torch.abs(mv_outputs['dist'][:args.N_rand] - target_dict['dist_norm'])[:, 0] loss_d_mv = torch.abs(mv_outputs['dist'][args.N_rand:] - mv_targets['dist_norm']).reshape(args.N_rand, args.N_views) loss_d = loss_d_query + (mv_targets['vis_score'] * loss_d_mv).sum(-1) loss_d = (loss_d / (1. + mv_targets['vis_score'].sum(-1))).mean() loss_rgb = torch.tensor(0.).to(device) if 'rgb' in mv_outputs: mv_outputs['rgb_pred'] = torch.sigmoid(mv_outputs['rgb']) loss_rgb_query = ((mv_outputs['rgb_pred'][:args.N_rand] - target_dict['image'])**2).mean(-1) loss_rgb_mv = ((mv_outputs['rgb_pred'][args.N_rand:] - mv_targets['image'])**2).reshape(args.N_rand, args.N_views, 3).mean(-1) loss_rgb = loss_rgb_query + (mv_targets['vis_score'] * loss_rgb_mv).sum(-1) loss_rgb = (loss_rgb / (1. + mv_targets['vis_score'].sum(-1))).mean() loss = loss_d + args.w_rgb * loss_rgb loss.backward() if args.grad_clip > 0.: torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.grad_clip) optimizer.step() new_lrate = optimizer.param_groups[0]['lr'] scheduler.step() # ================= Logging ========================== if i % args.i_print == 0 and i != start: wandb.log({ 'train/_ep': ep, 'train/_lr': new_lrate, 'train/loss': loss.item(), 'train/loss_d': loss_d.item(), 'train/loss_rgb': loss_rgb.item() }) # ================= Evaluation ===================== if i % args.i_img == 0 and i != start: torch.cuda.empty_cache() eval(args, dataloader, ray_fn, model, mode='train') eval(args, dataloader, ray_fn, model, mode='test') # Save checkpoints if (i % args.i_weights == 0 and i != start) or (i + 1) == args.N_iters: path = os.path.join(args.logdir, args.expname, '{:07d}.tar'.format(i)) ckpt_dict = { 'global_step': global_step, 'network_fn': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict() } torch.save(ckpt_dict, path) print('Saved checkpoints at', path) # a quick evaluation on the whole dataset evaluate_all(args, dataloader, ray_fn, model, global_step=global_step, mode='train') evaluate_all(args, dataloader, ray_fn, model, global_step=global_step, mode='test') global_step += 1 wandb.alert( title='Training Finished', text=f'Start to evaluate.', level=AlertLevel.WARN) args.eval_only = True args.denoise = True args.grad_normal = True evaluate(args) def eval(args, dataloader, ray_fn, model, img_i=None, mode='test', log_level=2): # log_level - 0: return metrics, 1: return metrics and maps, otherwise: wandb logging H = dataloader.scene_info['H'] W = dataloader.scene_info['W'] if img_i is None: i_split = dataloader.i_test if mode.startswith('test') else dataloader.i_train img_i = np.random.choice(np.arange(0, len(i_split))) inds = img_i * H * W + np.arange(0, H * W) rays, targets = dataloader(inds, mode=mode) # (H*W, C) targets['dist_norm'] = torch.zeros_like(targets['dist']) # Forward network outputs = {} with torch.enable_grad(): for i in range(0, len(rays), args.N_rand): batch_inputs, d0, batch_raydirs = get_ray_param(ray_fn, rays[i:i+args.N_rand]) targets['dist_norm'][i:i+args.N_rand] = (targets['dist'][i:i+args.N_rand] - d0) / (args.radius * 2.) outs = model(batch_inputs) outs['dist_abs'] = outs['dist'] * (2.*args.radius) + d0 if 'rgb' in outs: outs['rgb_pred'] = torch.sigmoid(outs['rgb']) if args.denoise:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") torch.backends.cudnn.benchmark = True np.random.seed(0) LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='alex').to(device) CD = ChamferDistance().to(device) def train(args): # Load dataset dataloader = Dataloader(args, device) # Create rayparam function and network ray_fn, global_step, model, model_cls, optimizer, scheduler = create_net(args, dataloader.scene_info, device) # Create experiment logger wandb.init(project="RayDF-RaySurfDNet") wandb.run.name = args.expname wandb.watch(model, log="all") start = global_step train_num = len(dataloader.dists['train_fg']) inds = np.random.permutation(train_num) step_batch = train_num // args.N_rand for i in trange(start, args.N_iters): optimizer.zero_grad() j = i % step_batch ep = i // step_batch # re-random train indices after one epoch if j == 0 and i != start: inds = np.random.permutation(train_num) # =================== Query Rays ======================== # Random rays from all images train_i = inds[j * args.N_rand: (j+1) * args.N_rand] # load query rays batch_rays, target_dict = dataloader(inds=train_i, mode='train_fg') batch_inputs, d0, _ = get_ray_param(ray_fn, batch_rays) # normalize query gt distance and query surface point target_dict['dist_norm'] = (target_dict['dist'] - d0) / (args.radius * 2.) batch_pts = batch_rays[..., :3] + target_dict['dist'] * batch_rays[..., 3:] for c in range(batch_pts.shape[-1]): batch_pts[..., c] -= dataloader.scene_info['sphere_center'][c] target_dict['pts_norm'] = batch_pts / dataloader.scene_info['sphere_radius'] # ================= Multiview Rays ===================== # Sample multiview rays and get their ray parameters mv_rays, mv_targets = get_multiview_rays(args, query_rays=batch_rays, query_gts=target_dict) mv_inputs, mv_d0, _ = get_ray_param(ray_fn, mv_rays) mv_targets['dist_norm'] = (mv_targets['dist'] - mv_d0) / (args.radius * 2.) # Compute visibility with torch.no_grad(): cls_inputs = [torch.tile(batch_inputs[:, None], (1, args.N_views, 1)).reshape(-1, batch_inputs.shape[-1]), mv_inputs, torch.tile(target_dict['pts_norm'][:, None], (1, args.N_views, 1)).reshape(-1, 3)] vis = model_cls(cls_inputs) mv_targets['vis_score'] = torch.sigmoid(vis).reshape(args.N_rand, args.N_views) reweigh = 0.5 mv_targets['vis_score'] = mv_targets['vis_score'] ** reweigh / (mv_targets['vis_score'] ** reweigh + (1. - mv_targets['vis_score']) ** reweigh) # Multiview forward mv_batch_inputs = torch.cat([batch_inputs, mv_inputs], dim=0) mv_outputs = model(mv_batch_inputs) # ================= Optimization ===================== loss_d_query = torch.abs(mv_outputs['dist'][:args.N_rand] - target_dict['dist_norm'])[:, 0] loss_d_mv = torch.abs(mv_outputs['dist'][args.N_rand:] - mv_targets['dist_norm']).reshape(args.N_rand, args.N_views) loss_d = loss_d_query + (mv_targets['vis_score'] * loss_d_mv).sum(-1) loss_d = (loss_d / (1. + mv_targets['vis_score'].sum(-1))).mean() loss_rgb = torch.tensor(0.).to(device) if 'rgb' in mv_outputs: mv_outputs['rgb_pred'] = torch.sigmoid(mv_outputs['rgb']) loss_rgb_query = ((mv_outputs['rgb_pred'][:args.N_rand] - target_dict['image'])**2).mean(-1) loss_rgb_mv = ((mv_outputs['rgb_pred'][args.N_rand:] - mv_targets['image'])**2).reshape(args.N_rand, args.N_views, 3).mean(-1) loss_rgb = loss_rgb_query + (mv_targets['vis_score'] * loss_rgb_mv).sum(-1) loss_rgb = (loss_rgb / (1. + mv_targets['vis_score'].sum(-1))).mean() loss = loss_d + args.w_rgb * loss_rgb loss.backward() if args.grad_clip > 0.: torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.grad_clip) optimizer.step() new_lrate = optimizer.param_groups[0]['lr'] scheduler.step() # ================= Logging ========================== if i % args.i_print == 0 and i != start: wandb.log({ 'train/_ep': ep, 'train/_lr': new_lrate, 'train/loss': loss.item(), 'train/loss_d': loss_d.item(), 'train/loss_rgb': loss_rgb.item() }) # ================= Evaluation ===================== if i % args.i_img == 0 and i != start: torch.cuda.empty_cache() eval(args, dataloader, ray_fn, model, mode='train') eval(args, dataloader, ray_fn, model, mode='test') # Save checkpoints if (i % args.i_weights == 0 and i != start) or (i + 1) == args.N_iters: path = os.path.join(args.logdir, args.expname, '{:07d}.tar'.format(i)) ckpt_dict = { 'global_step': global_step, 'network_fn': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict() } torch.save(ckpt_dict, path) print('Saved checkpoints at', path) # a quick evaluation on the whole dataset evaluate_all(args, dataloader, ray_fn, model, global_step=global_step, mode='train') evaluate_all(args, dataloader, ray_fn, model, global_step=global_step, mode='test') global_step += 1 wandb.alert( title='Training Finished', text=f'Start to evaluate.', level=AlertLevel.WARN) args.eval_only = True args.denoise = True args.grad_normal = True evaluate(args) def eval(args, dataloader, ray_fn, model, img_i=None, mode='test', log_level=2): # log_level - 0: return metrics, 1: return metrics and maps, otherwise: wandb logging H = dataloader.scene_info['H'] W = dataloader.scene_info['W'] if img_i is None: i_split = dataloader.i_test if mode.startswith('test') else dataloader.i_train img_i = np.random.choice(np.arange(0, len(i_split))) inds = img_i * H * W + np.arange(0, H * W) rays, targets = dataloader(inds, mode=mode) # (H*W, C) targets['dist_norm'] = torch.zeros_like(targets['dist']) # Forward network outputs = {} with torch.enable_grad(): for i in range(0, len(rays), args.N_rand): batch_inputs, d0, batch_raydirs = get_ray_param(ray_fn, rays[i:i+args.N_rand]) targets['dist_norm'][i:i+args.N_rand] = (targets['dist'][i:i+args.N_rand] - d0) / (args.radius * 2.) outs = model(batch_inputs) outs['dist_abs'] = outs['dist'] * (2.*args.radius) + d0 if 'rgb' in outs: outs['rgb_pred'] = torch.sigmoid(outs['rgb']) if args.denoise:
gn = get_surface_gradient(outs['dist'], batch_raydirs)
7
2023-10-30 14:05:51+00:00
8k
snap-stanford/relbench
test/external/test_nn.py
[ { "identifier": "TaskType", "path": "relbench/data/task.py", "snippet": "class TaskType(Enum):\n r\"\"\"The type of the task.\n\n Attributes:\n REGRESSION: Regression task.\n MULTICLASS_CLASSIFICATION: Multi-class classification task.\n BINARY_CLASSIFICATION: Binary classification task.\n \"\"\"\n REGRESSION = \"regression\"\n BINARY_CLASSIFICATION = \"binary_classification\"" }, { "identifier": "FakeDataset", "path": "relbench/datasets/fake.py", "snippet": "class FakeDataset(Dataset):\n name = \"rel-fake\"\n\n def __init__(\n self, num_products: int = 30, num_customers: int = 100, num_reviews: int = 500\n ):\n db = self.make_db(num_products, num_customers, num_reviews)\n db.reindex_pkeys_and_fkeys()\n val_timestamp = db.min_timestamp + 0.8 * (db.max_timestamp - db.min_timestamp)\n test_timestamp = db.min_timestamp + 0.9 * (db.max_timestamp - db.min_timestamp)\n super().__init__(\n db=db,\n val_timestamp=val_timestamp,\n test_timestamp=test_timestamp,\n task_cls_list=[ChurnTask, LTVTask],\n )\n\n def make_db(self, num_products, num_customers, num_reviews) -> Database:\n product_df = pd.DataFrame(\n {\n \"product_id\": [f\"product_id_{i}\" for i in range(num_products)],\n \"category\": [None, [], [\"toy\", \"health\"]] * (num_products // 3),\n \"title\": [_generate_random_string(5, 15) for _ in range(num_products)],\n \"price\": np.random.rand(num_products) * 10,\n }\n )\n customer_df = pd.DataFrame(\n {\n \"customer_id\": [f\"customer_id_{i}\" for i in range(num_customers)],\n \"age\": np.random.randint(10, 50, size=(num_customers,)),\n \"gender\": [\"male\", \"female\"] * (num_customers // 2),\n }\n )\n # Add some dangling foreign keys:\n review_df = pd.DataFrame(\n {\n \"customer_id\": [\n f\"customer_id_{random.randint(0, num_customers+5)}\"\n for _ in range(num_reviews)\n ],\n \"product_id\": [\n f\"product_id_{random.randint(0, num_products-1)}\"\n for _ in range(num_reviews)\n ],\n \"review_time\": pd.to_datetime(10 * np.arange(num_reviews), unit=\"D\"),\n \"rating\": np.random.randint(1, 6, size=(num_reviews,)),\n }\n )\n\n return Database(\n table_dict={\n \"product\": Table(\n df=product_df,\n fkey_col_to_pkey_table={},\n pkey_col=\"product_id\",\n ),\n \"customer\": Table(\n df=customer_df,\n fkey_col_to_pkey_table={},\n pkey_col=\"customer_id\",\n ),\n \"review\": Table(\n df=review_df,\n fkey_col_to_pkey_table={\n \"customer_id\": \"customer\",\n \"product_id\": \"product\",\n },\n time_col=\"review_time\",\n ),\n }\n )" }, { "identifier": "get_stype_proposal", "path": "relbench/external/graph.py", "snippet": "def get_stype_proposal(db: Database) -> Dict[str, Dict[str, Any]]:\n r\"\"\"Propose stype for columns of a set of tables in the given database.\n\n Args:\n db (Database): : The database object containing a set of tables.\n\n Returns:\n Dict[str, Dict[str, Any]]: A dictionary mapping table name into\n :obj:`col_to_stype` (mapping column names into inferred stypes).\n \"\"\"\n\n inferred_col_to_stype_dict = {}\n for table_name, table in db.table_dict.items():\n inferred_col_to_stype = infer_df_stype(table.df)\n\n # Remove pkey, fkey columns since they will not be used as input\n # feature.\n if table.pkey_col is not None:\n inferred_col_to_stype.pop(table.pkey_col)\n for fkey in table.fkey_col_to_pkey_table.keys():\n inferred_col_to_stype.pop(fkey)\n\n inferred_col_to_stype_dict[table_name] = inferred_col_to_stype\n\n return inferred_col_to_stype_dict" }, { "identifier": "get_train_table_input", "path": "relbench/external/graph.py", "snippet": "def get_train_table_input(\n table: Table,\n task: Task,\n) -> TrainTableInput:\n nodes = torch.from_numpy(table.df[task.entity_col].astype(int).values)\n\n time: Optional[Tensor] = None\n if table.time_col is not None:\n time = to_unix_time(table.df[table.time_col])\n\n target: Optional[Tensor] = None\n transform: Optional[AttachTargetTransform] = None\n if task.target_col in table.df:\n target_type = float\n if task.task_type == \"multiclass_classification\":\n target_type = int\n target = torch.from_numpy(table.df[task.target_col].values.astype(target_type))\n transform = AttachTargetTransform(task.entity_table, target)\n\n return TrainTableInput(\n nodes=(task.entity_table, nodes),\n time=time,\n target=target,\n transform=transform,\n )" }, { "identifier": "make_pkey_fkey_graph", "path": "relbench/external/graph.py", "snippet": "def make_pkey_fkey_graph(\n db: Database,\n col_to_stype_dict: Dict[str, Dict[str, stype]],\n text_embedder_cfg: Optional[TextEmbedderConfig] = None,\n cache_dir: Optional[str] = None,\n) -> HeteroData:\n r\"\"\"Given a :class:`Database` object, construct a heterogeneous graph with\n primary-foreign key relationships, together with the column stats of each\n table.\n\n Args:\n db (Database): A database object containing a set of tables.\n col_to_stype_dict (Dict[str, Dict[str, stype]]): Column to stype for\n each table.\n cache_dir (str, optional): A directory for storing materialized tensor\n frames. If specified, we will either cache the file or use the\n cached file. If not specified, we will not use cached file and\n re-process everything from scrach without saving the cache.\n\n Returns:\n HeteroData: The heterogeneous :class:`PyG` object with\n :class:`TensorFrame` feature.\n \"\"\"\n data = HeteroData()\n if cache_dir is not None:\n os.makedirs(cache_dir, exist_ok=True)\n\n for table_name, table in db.table_dict.items():\n # Materialize the tables into tensor frames:\n df = table.df\n # Ensure that pkey is consecutive.\n if table.pkey_col is not None:\n assert (df[table.pkey_col].values == np.arange(len(df))).all()\n\n col_to_stype = col_to_stype_dict[table_name]\n\n if len(col_to_stype) == 0: # Add constant feature in case df is empty:\n col_to_stype = {\"__const__\": stype.numerical}\n df = pd.DataFrame({\"__const__\": np.ones(len(table.df))})\n\n path = (\n None if cache_dir is None else os.path.join(cache_dir, f\"{table_name}.pt\")\n )\n dataset = Dataset(\n df=df,\n col_to_stype=col_to_stype,\n col_to_text_embedder_cfg=text_embedder_cfg,\n ).materialize(path=path)\n\n data[table_name].tf = dataset.tensor_frame\n data[table_name].col_stats = dataset.col_stats\n\n # Add time attribute:\n if table.time_col is not None:\n data[table_name].time = to_unix_time(table.df[table.time_col])\n\n # Add edges:\n for fkey_name, pkey_table_name in table.fkey_col_to_pkey_table.items():\n pkey_index = df[fkey_name]\n # Filter out dangling foreign keys\n mask = ~pkey_index.isna()\n fkey_index = torch.arange(len(pkey_index))\n # Filter dangling foreign keys:\n pkey_index = torch.from_numpy(pkey_index[mask].astype(int).values)\n fkey_index = fkey_index[torch.from_numpy(mask.values)]\n # Ensure no dangling fkeys\n assert (pkey_index < len(db.table_dict[pkey_table_name])).all()\n\n # fkey -> pkey edges\n edge_index = torch.stack([fkey_index, pkey_index], dim=0)\n edge_type = (table_name, f\"f2p_{fkey_name}\", pkey_table_name)\n data[edge_type].edge_index = sort_edge_index(edge_index)\n\n # pkey -> fkey edges\n edge_index = torch.stack([pkey_index, fkey_index], dim=0)\n edge_type = (pkey_table_name, f\"p2f_{fkey_name}\", table_name)\n data[edge_type].edge_index = sort_edge_index(edge_index)\n\n data.validate()\n\n return data" }, { "identifier": "HeteroEncoder", "path": "relbench/external/nn.py", "snippet": "class HeteroEncoder(torch.nn.Module):\n r\"\"\"HeteroEncoder based on PyTorch Frame.\n\n Args:\n channels (int): The output channels for each node type.\n node_to_col_names_dict (Dict[NodeType, Dict[torch_frame.stype, List[str]]]):\n A dictionary mapping from node type to column names dictionary\n compatible to PyTorch Frame.\n torch_frame_model_cls: Model class for PyTorch Frame. The class object\n takes :class:`TensorFrame` object as input and outputs\n :obj:`channels`-dimensional embeddings. Default to\n :class:`torch_frame.nn.ResNet`.\n torch_frame_model_kwargs (Dict[str, Any]): Keyword arguments for\n :class:`torch_frame_model_cls` class. Default keyword argument is\n set specific for :class:`torch_frame.nn.ResNet`. Expect it to\n be changed for different :class:`torch_frame_model_cls`.\n default_stype_encoder_cls_kwargs (Dict[torch_frame.stype, Any]):\n A dictionary mapping from :obj:`torch_frame.stype` object into a\n tuple specifying :class:`torch_frame.nn.StypeEncoder` class and its\n keyword arguments :obj:`kwargs`.\n \"\"\"\n\n def __init__(\n self,\n channels: int,\n node_to_col_names_dict: Dict[NodeType, Dict[torch_frame.stype, List[str]]],\n node_to_col_stats: Dict[NodeType, Dict[str, Dict[StatType, Any]]],\n torch_frame_model_cls=ResNet,\n torch_frame_model_kwargs: Dict[str, Any] = {\n \"channels\": 128,\n \"num_layers\": 4,\n },\n default_stype_encoder_cls_kwargs: Dict[torch_frame.stype, Any] = {\n torch_frame.categorical: (torch_frame.nn.EmbeddingEncoder, {}),\n torch_frame.numerical: (torch_frame.nn.LinearEncoder, {}),\n torch_frame.multicategorical: (\n torch_frame.nn.MultiCategoricalEmbeddingEncoder,\n {},\n ),\n torch_frame.embedding: (torch_frame.nn.LinearEmbeddingEncoder, {}),\n torch_frame.timestamp: (torch_frame.nn.TimestampEncoder, {}),\n },\n ):\n super().__init__()\n\n self.encoders = torch.nn.ModuleDict()\n\n for node_type in node_to_col_names_dict.keys():\n stype_encoder_dict = {\n stype: default_stype_encoder_cls_kwargs[stype][0](\n **default_stype_encoder_cls_kwargs[stype][1]\n )\n for stype in node_to_col_names_dict[node_type].keys()\n }\n torch_frame_model = torch_frame_model_cls(\n **torch_frame_model_kwargs,\n out_channels=channels,\n col_stats=node_to_col_stats[node_type],\n col_names_dict=node_to_col_names_dict[node_type],\n stype_encoder_dict=stype_encoder_dict,\n )\n self.encoders[node_type] = torch_frame_model\n\n def reset_parameters(self):\n for encoder in self.encoders.values():\n encoder.reset_parameters()\n\n def forward(\n self,\n tf_dict: Dict[NodeType, torch_frame.TensorFrame],\n ) -> Dict[NodeType, Tensor]:\n x_dict = {\n node_type: self.encoders[node_type](tf) for node_type, tf in tf_dict.items()\n }\n return x_dict" }, { "identifier": "HeteroGraphSAGE", "path": "relbench/external/nn.py", "snippet": "class HeteroGraphSAGE(torch.nn.Module):\n def __init__(\n self,\n node_types: List[NodeType],\n edge_types: List[EdgeType],\n channels: int,\n aggr: str = \"mean\",\n num_layers: int = 2,\n ):\n super().__init__()\n\n self.convs = torch.nn.ModuleList()\n for _ in range(num_layers):\n conv = HeteroConv(\n {\n edge_type: SAGEConv((channels, channels), channels, aggr=aggr)\n for edge_type in edge_types\n },\n aggr=\"sum\",\n )\n self.convs.append(conv)\n\n self.norms = torch.nn.ModuleList()\n for _ in range(num_layers):\n norm_dict = torch.nn.ModuleDict()\n for node_type in node_types:\n norm_dict[node_type] = LayerNorm(channels, mode=\"node\")\n self.norms.append(norm_dict)\n\n def reset_parameters(self):\n for conv in self.convs:\n conv.reset_parameters()\n for norm_dict in self.norms:\n for norm in norm_dict.values():\n norm.reset_parameters()\n\n def forward(\n self,\n x_dict: Dict[NodeType, Tensor],\n edge_index_dict: Dict[NodeType, Tensor],\n num_sampled_nodes_dict: Optional[Dict[NodeType, List[int]]] = None,\n num_sampled_edges_dict: Optional[Dict[EdgeType, List[int]]] = None,\n ) -> Dict[NodeType, Tensor]:\n for i, (conv, norm_dict) in enumerate(zip(self.convs, self.norms)):\n # Trim graph and features to only hold required data per layer:\n if num_sampled_nodes_dict is not None:\n assert num_sampled_edges_dict is not None\n x_dict, edge_index_dict, _ = trim_to_layer(\n layer=i,\n num_sampled_nodes_per_hop=num_sampled_nodes_dict,\n num_sampled_edges_per_hop=num_sampled_edges_dict,\n x=x_dict,\n edge_index=edge_index_dict,\n )\n\n x_dict = conv(x_dict, edge_index_dict)\n x_dict = {key: norm_dict[key](x) for key, x in x_dict.items()}\n x_dict = {key: x.relu() for key, x in x_dict.items()}\n\n return x_dict" } ]
from typing import Dict from torch_frame.config.text_embedder import TextEmbedderConfig from torch_frame.testing.text_embedder import HashTextEmbedder from torch_geometric.loader import NeighborLoader from torch_geometric.nn import MLP from relbench.data.task import TaskType from relbench.datasets import FakeDataset from relbench.external.graph import ( get_stype_proposal, get_train_table_input, make_pkey_fkey_graph, ) from relbench.external.nn import HeteroEncoder, HeteroGraphSAGE import torch import torch.nn.functional as F
3,942
def test_train_fake_product_dataset(tmp_path): dataset = FakeDataset() data = make_pkey_fkey_graph( dataset.db, get_stype_proposal(dataset.db), text_embedder_cfg=TextEmbedderConfig( text_embedder=HashTextEmbedder(8), batch_size=None ), cache_dir=tmp_path, ) node_to_col_names_dict = { # TODO Expose as method in `HeteroData`. node_type: data[node_type].tf.col_names_dict for node_type in data.node_types } # Ensure that full-batch model works as expected ########################## encoder = HeteroEncoder(64, node_to_col_names_dict, data.col_stats_dict) gnn = HeteroGraphSAGE(data.node_types, data.edge_types, 64) head = MLP(64, out_channels=1, num_layers=1) x_dict = encoder(data.tf_dict) x_dict = gnn(x_dict, data.edge_index_dict) x = head(x_dict["customer"]) assert len(x_dict) == 3 assert x_dict["customer"].size() == (100, 64) assert x_dict["review"].size() == (450, 64) assert x_dict["product"].size() == (30, 64) assert x.size() == (100, 1) # Ensure that neighbor loading works on train/val/test splits ############ task = dataset.get_task("rel-amazon-churn", process=True)
def test_train_fake_product_dataset(tmp_path): dataset = FakeDataset() data = make_pkey_fkey_graph( dataset.db, get_stype_proposal(dataset.db), text_embedder_cfg=TextEmbedderConfig( text_embedder=HashTextEmbedder(8), batch_size=None ), cache_dir=tmp_path, ) node_to_col_names_dict = { # TODO Expose as method in `HeteroData`. node_type: data[node_type].tf.col_names_dict for node_type in data.node_types } # Ensure that full-batch model works as expected ########################## encoder = HeteroEncoder(64, node_to_col_names_dict, data.col_stats_dict) gnn = HeteroGraphSAGE(data.node_types, data.edge_types, 64) head = MLP(64, out_channels=1, num_layers=1) x_dict = encoder(data.tf_dict) x_dict = gnn(x_dict, data.edge_index_dict) x = head(x_dict["customer"]) assert len(x_dict) == 3 assert x_dict["customer"].size() == (100, 64) assert x_dict["review"].size() == (450, 64) assert x_dict["product"].size() == (30, 64) assert x.size() == (100, 1) # Ensure that neighbor loading works on train/val/test splits ############ task = dataset.get_task("rel-amazon-churn", process=True)
assert task.task_type == TaskType.BINARY_CLASSIFICATION
0
2023-10-29 18:29:52+00:00
8k
francescofugazzi/3dgsconverter
gsconverter/main.py
[ { "identifier": "Utility", "path": "gsconverter/utils/utility.py", "snippet": "class Utility:\n @staticmethod\n def text_based_detect_format(file_path):\n debug_print(\"[DEBUG] Executing 'text_based_detect_format' function...\")\n\n \"\"\"Detect if the given file is in '3dgs' or 'cc' format.\"\"\"\n with open(file_path, 'rb') as file:\n header_bytes = file.read(2048) # Read the beginning to detect the format\n\n header = header_bytes.decode('utf-8', errors='ignore')\n\n if \"property float f_dc_0\" in header:\n debug_print(\"[DEBUG] Detected format: 3dgs\")\n return \"3dgs\"\n elif \"property float scal_f_dc_0\" in header or \"property float scalar_scal_f_dc_0\" in header or \"property float scalar_f_dc_0\" in header:\n debug_print(\"[DEBUG] Detected format: cc\")\n return \"cc\"\n else:\n return None\n\n @staticmethod\n def copy_data_with_prefix_check(source, target, possible_prefixes):\n debug_print(\"[DEBUG] Executing 'copy_data_with_prefix_check' function...\")\n\n \"\"\"\n Given two structured numpy arrays (source and target), copy the data from source to target.\n If a field exists in source but not in target, this function will attempt to find the field\n in target by adding any of the possible prefixes to the field name.\n \"\"\"\n for name in source.dtype.names:\n if name in target.dtype.names:\n target[name] = source[name]\n else:\n copied = False\n for prefix in possible_prefixes:\n # If the field starts with the prefix, try the field name without the prefix\n if name.startswith(prefix):\n stripped_name = name[len(prefix):]\n if stripped_name in target.dtype.names:\n target[stripped_name] = source[name]\n copied = True\n break\n # If the field doesn't start with any prefix, try adding the prefix\n else:\n prefixed_name = prefix + name\n if prefixed_name in target.dtype.names:\n debug_print(f\"[DEBUG] Copying data from '{name}' to '{prefixed_name}'\")\n target[prefixed_name] = source[name]\n copied = True\n break\n ##if not copied:\n ## print(f\"Warning: Field {name} not found in target.\")\n\n @staticmethod\n def compute_rgb_from_vertex(vertices):\n debug_print(\"[DEBUG] Executing 'compute_rgb_from_vertex' function...\")\n \n # Depending on the available field names, choose the appropriate ones\n if 'f_dc_0' in vertices.dtype.names:\n f_dc = np.column_stack((vertices['f_dc_0'], vertices['f_dc_1'], vertices['f_dc_2']))\n else:\n f_dc = np.column_stack((vertices['scalar_scal_f_dc_0'], vertices['scalar_scal_f_dc_1'], vertices['scalar_scal_f_dc_2']))\n \n colors = (f_dc + 1) * 127.5\n colors = np.clip(colors, 0, 255).astype(np.uint8)\n \n debug_print(\"[DEBUG] RGB colors computed.\")\n return colors\n\n @staticmethod\n def parallel_voxel_counting(vertices, voxel_size=1.0):\n debug_print(\"[DEBUG] Executing 'parallel_voxel_counting' function...\")\n \n \"\"\"Counts the number of points in each voxel in a parallelized manner.\"\"\"\n num_processes = cpu_count()\n chunk_size = len(vertices) // num_processes\n chunks = [vertices[i:i + chunk_size] for i in range(0, len(vertices), chunk_size)]\n\n num_cores = max(1, multiprocessing.cpu_count() - 1)\n with Pool(processes=num_cores, initializer=init_worker) as pool:\n results = pool.starmap(Utility.count_voxels_chunk, [(chunk, voxel_size) for chunk in chunks])\n\n # Aggregate results from all processes\n total_voxel_counts = {}\n for result in results:\n for k, v in result.items():\n if k in total_voxel_counts:\n total_voxel_counts[k] += v\n else:\n total_voxel_counts[k] = v\n\n debug_print(f\"[DEBUG] Voxel counting completed with {len(total_voxel_counts)} unique voxels found.\")\n return total_voxel_counts\n \n @staticmethod\n def count_voxels_chunk(vertices_chunk, voxel_size):\n debug_print(\"[DEBUG] Executing 'count_voxels_chunk' function for a chunk...\")\n \n \"\"\"Count the number of points in each voxel for a chunk of vertices.\"\"\"\n voxel_counts = {}\n for vertex in vertices_chunk:\n voxel_coords = (int(vertex['x'] / voxel_size), int(vertex['y'] / voxel_size), int(vertex['z'] / voxel_size))\n if voxel_coords in voxel_counts:\n voxel_counts[voxel_coords] += 1\n else:\n voxel_counts[voxel_coords] = 1\n \n debug_print(f\"[DEBUG] Chunk processed with {len(voxel_counts)} voxels counted.\")\n return voxel_counts\n \n @staticmethod\n def get_neighbors(voxel_coords):\n debug_print(f\"[DEBUG] Getting neighbors for voxel: {voxel_coords}...\")\n \n \"\"\"Get the face-touching neighbors of the given voxel coordinates.\"\"\"\n x, y, z = voxel_coords\n neighbors = [\n (x-1, y, z), (x+1, y, z),\n (x, y-1, z), (x, y+1, z),\n (x, y, z-1), (x, y, z+1)\n ]\n return neighbors\n\n @staticmethod\n def knn_worker(args):\n debug_print(f\"[DEBUG] Executing 'knn_worker' function for vertex: {args[0]}...\")\n \n \"\"\"Utility function for parallel KNN computation.\"\"\"\n coords, tree, k = args\n coords = coords.reshape(1, -1) # Reshape to a 2D array\n distances, _ = tree.kneighbors(coords)\n avg_distance = np.mean(distances[:, 1:])\n \n debug_print(f\"[DEBUG] Average distance computed for vertex: {args[0]} is {avg_distance}.\")\n return avg_distance" }, { "identifier": "convert", "path": "gsconverter/utils/conversion_functions.py", "snippet": "def convert(data, source_format, target_format, **kwargs):\n debug_print(f\"[DEBUG] Starting conversion from {source_format} to {target_format}...\")\n \n if source_format == \"3dgs\":\n converter = Format3dgs(data)\n elif source_format == \"cc\":\n converter = FormatCC(data)\n elif source_format == \"parquet\":\n converter = FormatParquet(data)\n else:\n raise ValueError(\"Unsupported source format\")\n \n # Apply optional pre-processing steps using process_data (newly added)\n process_data(converter, bbox=kwargs.get(\"bbox\"), apply_density_filter=kwargs.get(\"density_filter\"), remove_flyers=kwargs.get(\"remove_flyers\"))\n\n # RGB processing\n if source_format == \"cc\":\n if kwargs.get(\"process_rgb\", False) and converter.has_rgb():\n print(\"Error: Source CC file already contains RGB data. Conversion stopped.\")\n return None\n debug_print(\"[DEBUG] Adding or ignoring RGB for CC data...\")\n converter.add_or_ignore_rgb(process_rgb=kwargs.get(\"process_rgb\", False))\n\n # Conversion operations\n process_rgb_flag = kwargs.get(\"process_rgb\", False)\n if source_format == \"3dgs\" and target_format == \"cc\":\n debug_print(\"[DEBUG] Converting 3DGS to CC...\")\n return converter.to_cc(process_rgb=process_rgb_flag)\n elif source_format == \"cc\" and target_format == \"3dgs\":\n debug_print(\"[DEBUG] Converting CC to 3DGS...\")\n return converter.to_3dgs()\n elif source_format == \"parquet\" and target_format == \"cc\":\n debug_print(\"[DEBUG] Converting Parquet to CC...\")\n return converter.to_cc(process_rgb=process_rgb_flag)\n elif source_format == \"parquet\" and target_format == \"3dgs\":\n debug_print(\"[DEBUG] Converting Parquet to 3DGS...\")\n return converter.to_3dgs()\n elif source_format == \"3dgs\" and target_format == \"3dgs\":\n debug_print(\"[DEBUG] Applying operations on 3DGS data...\")\n if not any(kwargs.values()): # If no flags are provided\n print(\"[INFO] No flags provided. The conversion will not happen as the output would be identical to the input.\")\n return data['vertex'].data\n else:\n return converter.to_3dgs()\n elif source_format == \"cc\" and target_format == \"cc\":\n debug_print(\"[DEBUG] Applying operations on CC data...\")\n converted_data = converter.to_cc()\n if isinstance(converted_data, np.ndarray):\n return converted_data\n else:\n return data['vertex'].data\n else:\n raise ValueError(\"Unsupported conversion\")" }, { "identifier": "config", "path": "gsconverter/utils/config.py", "snippet": "DEBUG = False" }, { "identifier": "init_worker", "path": "gsconverter/utils/utility_functions.py", "snippet": "def init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)" }, { "identifier": "DensityFilterAction", "path": "gsconverter/utils/argument_actions.py", "snippet": "class DensityFilterAction(argparse.Action):\n def __call__(self, parser, args, values, option_string=None):\n if values:\n if len(values) != 2:\n parser.error(\"--density_filter requires two numbers: voxel_size and threshold_percentage.\")\n try:\n values = [float(v) for v in values]\n except ValueError:\n parser.error(\"Both arguments for --density_filter must be numbers.\")\n else:\n values = [1.0, 0.32] # Default values if none are provided\n setattr(args, self.dest, values)" }, { "identifier": "RemoveFlyersAction", "path": "gsconverter/utils/argument_actions.py", "snippet": "class RemoveFlyersAction(argparse.Action):\n def __call__(self, parser, args, values, option_string=None):\n if values:\n if len(values) != 2:\n parser.error(\"--remove_flyers requires two numbers: 'k' for the number of neighbors and 'threshold_factor' for the multiplier of the standard deviation.\")\n try:\n values = [float(v) for v in values]\n except ValueError:\n parser.error(\"Both arguments for --remove_flyers must be numbers.\")\n else:\n values = [25, 10.5] # Default values if none are provided\n setattr(args, self.dest, values)" }, { "identifier": "AboutAction", "path": "gsconverter/utils/argument_actions.py", "snippet": "class AboutAction(argparse.Action):\n def __init__(self, option_strings, dest, nargs=0, **kwargs):\n super(AboutAction, self).__init__(option_strings, dest, nargs=0, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n copyright_info = \"\"\"\n 3D Gaussian Splatting Converter\n Copyright (c) 2023 Francesco Fugazzi\n\n This software is released under the MIT License.\n For more information about the license, please see the LICENSE file.\n \"\"\"\n print(copyright_info)\n parser.exit() # Exit after displaying the information." }, { "identifier": "BaseConverter", "path": "gsconverter/utils/base_converter.py", "snippet": "class BaseConverter:\n def __init__(self, data):\n self.data = data\n\n def extract_vertex_data(vertices, has_scal=True, has_rgb=False):\n \"\"\"Extract and convert vertex data from a structured numpy array of vertices.\"\"\"\n debug_print(\"[DEBUG] Executing 'extract_vertex_data' function...\")\n converted_data = []\n \n # Determine the prefix to be used based on whether \"scal_\" should be included\n prefix = 'scal_' if has_scal else ''\n debug_print(f\"[DEBUG] Prefix determined as: {prefix}\")\n \n # Iterate over each vertex and extract the necessary attributes\n for vertex in vertices:\n entry = (\n vertex['x'], vertex['y'], vertex['z'],\n vertex['nx'], vertex['ny'], vertex['nz'],\n vertex[f'{prefix}f_dc_0'], vertex[f'{prefix}f_dc_1'], vertex[f'{prefix}f_dc_2'],\n *[vertex[f'{prefix}f_rest_{i}'] for i in range(45)],\n vertex[f'{prefix}opacity'],\n vertex[f'{prefix}scale_0'], vertex[f'{prefix}scale_1'], vertex[f'{prefix}scale_2'],\n vertex[f'{prefix}rot_0'], vertex[f'{prefix}rot_1'], vertex[f'{prefix}rot_2'], vertex[f'{prefix}rot_3']\n )\n \n # If the point cloud contains RGB data, append it to the entry\n if has_rgb:\n entry += (vertex['red'], vertex['green'], vertex['blue'])\n \n converted_data.append(entry)\n \n debug_print(\"[DEBUG] 'extract_vertex_data' function completed.\")\n return converted_data\n\n def apply_density_filter(self, voxel_size=1.0, threshold_percentage=0.32):\n debug_print(\"[DEBUG] Executing 'apply_density_filter' function...\")\n # Ensure self.data is a numpy structured array\n if not isinstance(self.data, np.ndarray):\n raise TypeError(\"self.data must be a numpy structured array.\")\n \n vertices = self.data # This assumes self.data is already a numpy structured array\n\n # Convert threshold_percentage into a ratio\n threshold_ratio = threshold_percentage / 100.0\n\n # Parallelized voxel counting\n voxel_counts = Utility.parallel_voxel_counting(vertices, voxel_size)\n\n threshold = int(len(vertices) * threshold_ratio)\n dense_voxels = {k: v for k, v in voxel_counts.items() if v >= threshold}\n\n visited = set()\n max_cluster = set()\n for voxel in dense_voxels:\n if voxel not in visited:\n current_cluster = set()\n queue = deque([voxel])\n while queue:\n current_voxel = queue.popleft()\n visited.add(current_voxel)\n current_cluster.add(current_voxel)\n for neighbor in Utility.get_neighbors(current_voxel):\n if neighbor in dense_voxels and neighbor not in visited:\n queue.append(neighbor)\n visited.add(neighbor)\n if len(current_cluster) > len(max_cluster):\n max_cluster = current_cluster\n\n # Filter vertices to only include those in dense voxels\n filtered_vertices = [vertex for vertex in vertices if (int(vertex['x'] / voxel_size), int(vertex['y'] / voxel_size), int(vertex['z'] / voxel_size)) in max_cluster]\n\n # Convert the filtered vertices list to a numpy structured array\n self.data = np.array(filtered_vertices, dtype=vertices.dtype)\n\n # Informative print statement\n print(f\"After density filter, retained {len(self.data)} out of {len(vertices)} vertices.\")\n\n # Since we're working with numpy arrays, just return self.data\n return self.data\n\n def remove_flyers(self, k=25, threshold_factor=10.5, chunk_size=50000):\n debug_print(\"[DEBUG] Executing 'remove_flyers' function...\")\n\n # Ensure self.data is a numpy structured array\n if not isinstance(self.data, np.ndarray):\n raise TypeError(\"self.data must be a numpy structured array.\")\n\n # Extract vertex data from the current object's data\n vertices = self.data\n num_vertices = len(vertices)\n \n # Display the number of input vertices\n debug_print(f\"[DEBUG] Number of input vertices: {num_vertices}\")\n \n # Adjust k based on the number of vertices\n k = max(3, min(k, num_vertices // 100)) # Example: ensure k is between 3 and 1% of the total vertices\n debug_print(f\"[DEBUG] Adjusted k to: {k}\")\n\n # Number of chunks\n num_chunks = (num_vertices + chunk_size - 1) // chunk_size # Ceiling division\n masks = []\n\n # Create a pool of workers\n num_cores = max(1, cpu_count() - 1) # Leave one core free\n with Pool(processes=num_cores, initializer=init_worker) as pool:\n for i in range(num_chunks):\n start_idx = i * chunk_size\n end_idx = min(start_idx + chunk_size, num_vertices) # Avoid going out of bounds\n chunk_coords = np.vstack((vertices['x'][start_idx:end_idx], vertices['y'][start_idx:end_idx], vertices['z'][start_idx:end_idx])).T\n\n # Compute K-Nearest Neighbors for the chunk\n nbrs = NearestNeighbors(n_neighbors=k+1, algorithm='ball_tree').fit(chunk_coords)\n avg_distances = pool.map(Utility.knn_worker, [(coord, nbrs, k) for coord in chunk_coords])\n\n # Calculate the threshold for removal based on the mean and standard deviation of the average distances\n threshold = np.mean(avg_distances) + threshold_factor * np.std(avg_distances)\n\n # Create a mask for points to retain for this chunk\n mask = np.array(avg_distances) < threshold\n masks.append(mask)\n\n # Combine masks from all chunks\n combined_mask = np.concatenate(masks)\n\n # Apply the mask to the vertices and store the result in self.data\n self.data = vertices[combined_mask]\n \n print(f\"After removing flyers, retained {np.count_nonzero(combined_mask)} out of {num_vertices} vertices.\")\n return self.data\n\n @staticmethod\n def define_dtype(has_scal, has_rgb=False):\n debug_print(\"[DEBUG] Executing 'define_dtype' function...\")\n \n prefix = 'scalar_scal_' if has_scal else ''\n debug_print(f\"[DEBUG] Prefix determined as: {prefix}\")\n \n dtype = [\n ('x', 'f4'), ('y', 'f4'), ('z', 'f4'),\n ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),\n (f'{prefix}f_dc_0', 'f4'), (f'{prefix}f_dc_1', 'f4'), (f'{prefix}f_dc_2', 'f4'),\n *[(f'{prefix}f_rest_{i}', 'f4') for i in range(45)],\n (f'{prefix}opacity', 'f4'),\n (f'{prefix}scale_0', 'f4'), (f'{prefix}scale_1', 'f4'), (f'{prefix}scale_2', 'f4'),\n (f'{prefix}rot_0', 'f4'), (f'{prefix}rot_1', 'f4'), (f'{prefix}rot_2', 'f4'), (f'{prefix}rot_3', 'f4')\n ]\n debug_print(\"[DEBUG] Main dtype constructed.\")\n \n if has_rgb:\n dtype.extend([('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])\n debug_print(\"[DEBUG] RGB fields added to dtype.\")\n \n debug_print(\"[DEBUG] 'define_dtype' function completed.\")\n return dtype, prefix\n \n def has_rgb(self):\n return 'red' in self.data.dtype.names and 'green' in self.data.dtype.names and 'blue' in self.data.dtype.names\n\n def crop_by_bbox(self, min_x, min_y, min_z, max_x, max_y, max_z):\n # Perform cropping based on the bounding box\n self.data = self.data[\n (self.data['x'] >= min_x) &\n (self.data['x'] <= max_x) &\n (self.data['y'] >= min_y) &\n (self.data['y'] <= max_y) &\n (self.data['z'] >= min_z) &\n (self.data['z'] <= max_z)\n ]\n # Print the number of vertices after cropping\n debug_print(f\"[DEBUG] Number of vertices after cropping: {len(self.data)}\")\n \n # Informative print statement\n print(f\"After cropping, retained {len(self.data)} vertices.\")\n\n return self.data\n\n @staticmethod\n def load_parquet(file_path):\n # Load the Parquet file into a DataFrame\n df = pd.read_parquet(file_path)\n \n # Define a mapping from the Parquet column names to the expected dtype names\n column_mapping = {\n 'x': 'x',\n 'y': 'y',\n 'z': 'z',\n # Assuming 'nx', 'ny', 'nz' need to be created and set to 0\n 'r_sh0': 'f_dc_0',\n 'g_sh0': 'f_dc_1',\n 'b_sh0': 'f_dc_2',\n 'r_sh1': 'f_rest_0',\n 'r_sh2': 'f_rest_1',\n 'r_sh3': 'f_rest_2',\n 'r_sh4': 'f_rest_3',\n 'r_sh5': 'f_rest_4',\n 'r_sh6': 'f_rest_5',\n 'r_sh7': 'f_rest_6',\n 'r_sh8': 'f_rest_7',\n 'r_sh9': 'f_rest_8',\n 'r_sh10': 'f_rest_9',\n 'r_sh11': 'f_rest_10',\n 'r_sh12': 'f_rest_11',\n 'r_sh13': 'f_rest_12',\n 'r_sh14': 'f_rest_13',\n 'r_sh15': 'f_rest_14',\n 'g_sh1': 'f_rest_15',\n 'g_sh2': 'f_rest_16',\n 'g_sh3': 'f_rest_17',\n 'g_sh4': 'f_rest_18',\n 'g_sh5': 'f_rest_19',\n 'g_sh6': 'f_rest_20',\n 'g_sh7': 'f_rest_21',\n 'g_sh8': 'f_rest_22',\n 'g_sh9': 'f_rest_23',\n 'g_sh10': 'f_rest_24',\n 'g_sh11': 'f_rest_25',\n 'g_sh12': 'f_rest_26',\n 'g_sh13': 'f_rest_27',\n 'g_sh14': 'f_rest_28',\n 'g_sh15': 'f_rest_29',\n 'b_sh1': 'f_rest_30',\n 'b_sh2': 'f_rest_31',\n 'b_sh3': 'f_rest_32',\n 'b_sh4': 'f_rest_33',\n 'b_sh5': 'f_rest_34',\n 'b_sh6': 'f_rest_35',\n 'b_sh7': 'f_rest_36',\n 'b_sh8': 'f_rest_37',\n 'b_sh9': 'f_rest_38',\n 'b_sh10': 'f_rest_39',\n 'b_sh11': 'f_rest_40',\n 'b_sh12': 'f_rest_41',\n 'b_sh13': 'f_rest_42',\n 'b_sh14': 'f_rest_43',\n 'b_sh15': 'f_rest_44',\n 'alpha': 'opacity',\n 'cov_s0': 'scale_0',\n 'cov_s1': 'scale_1',\n 'cov_s2': 'scale_2',\n 'cov_q3': 'rot_0',\n 'cov_q0': 'rot_1',\n 'cov_q1': 'rot_2',\n 'cov_q2': 'rot_3',\n }\n\n for col in ['nx', 'ny', 'nz']:\n if col not in df.columns:\n df[col] = 0.0\n\n # Rename the DataFrame columns according to the mapping\n df_renamed = df.rename(columns=column_mapping)\n\n # Fetch the dtype from BaseConverter\n dtype_list, _ = BaseConverter.define_dtype(has_scal=False, has_rgb=False)\n \n # Convert the dtype list to a structured dtype object\n dtype_structured = np.dtype(dtype_list)\n\n # Convert DataFrame to a structured array with the defined dtype\n structured_array = np.zeros(df_renamed.shape[0], dtype=dtype_structured)\n for name in dtype_structured.names:\n structured_array[name] = df_renamed[name].values if name in df_renamed.columns else 0\n\n return structured_array" } ]
import argparse import os import sys import numpy as np from .utils.utility import Utility from .utils.conversion_functions import convert from plyfile import PlyData, PlyElement from multiprocessing import Pool from .utils import config from .utils.utility_functions import init_worker from .utils.argument_actions import DensityFilterAction, RemoveFlyersAction, AboutAction from .utils.base_converter import BaseConverter
6,440
""" 3D Gaussian Splatting Converter Copyright (c) 2023 Francesco Fugazzi This software is released under the MIT License. For more information about the license, please see the LICENSE file. """ __version__ = '0.1' def main(): print(f"3D Gaussian Splatting Converter: {__version__}") parser = argparse.ArgumentParser(description="Convert between standard 3D Gaussian Splat and Cloud Compare formats.") # Arguments for input and output parser.add_argument("--input", "-i", required=True, help="Path to the source point cloud file.") parser.add_argument("--output", "-o", required=True, help="Path to save the converted point cloud file.") parser.add_argument("--target_format", "-f", choices=["3dgs", "cc"], required=True, help="Target point cloud format.") parser.add_argument("--debug", "-d", action="store_true", help="Enable debug prints.") parser.add_argument('--about', action=AboutAction, help='Show copyright and license info') # Other flags parser.add_argument("--rgb", action="store_true", help="Add RGB values to the output file based on f_dc values (only applicable when converting to Cloud Compare format).") parser.add_argument("--bbox", nargs=6, type=float, metavar=('minX', 'minY', 'minZ', 'maxX', 'maxY', 'maxZ'), help="Specify the 3D bounding box to crop the point cloud.") parser.add_argument("--density_filter", nargs='*', action=DensityFilterAction, help="Filter the points to keep only regions with higher point density. Optionally provide 'voxel_size' and 'threshold_percentage' as two numbers (e.g., --density_filter 0.5 0.25). If no numbers are provided, defaults of 1.0 and 0.32 are used.") parser.add_argument("--remove_flyers", nargs='*', action=RemoveFlyersAction, help="Remove flyers based on k-nearest neighbors. Requires two numbers: 'k' (number of neighbors) and 'threshold_factor'.") args = parser.parse_args() config.DEBUG = args.debug # Check and append ".ply" extension if absent if not args.output.lower().endswith('.ply'): args.output += '.ply' # Now check if the file exists after potentially appending the extension if os.path.exists(args.output): user_response = input(f"File {args.output} already exists. Do you want to overwrite it? (y/N): ").lower() if user_response != 'y': print("Operation aborted by the user.") return # Detect the format of the input file if args.input.lower().endswith('.parquet'): source_format = 'parquet' else:
""" 3D Gaussian Splatting Converter Copyright (c) 2023 Francesco Fugazzi This software is released under the MIT License. For more information about the license, please see the LICENSE file. """ __version__ = '0.1' def main(): print(f"3D Gaussian Splatting Converter: {__version__}") parser = argparse.ArgumentParser(description="Convert between standard 3D Gaussian Splat and Cloud Compare formats.") # Arguments for input and output parser.add_argument("--input", "-i", required=True, help="Path to the source point cloud file.") parser.add_argument("--output", "-o", required=True, help="Path to save the converted point cloud file.") parser.add_argument("--target_format", "-f", choices=["3dgs", "cc"], required=True, help="Target point cloud format.") parser.add_argument("--debug", "-d", action="store_true", help="Enable debug prints.") parser.add_argument('--about', action=AboutAction, help='Show copyright and license info') # Other flags parser.add_argument("--rgb", action="store_true", help="Add RGB values to the output file based on f_dc values (only applicable when converting to Cloud Compare format).") parser.add_argument("--bbox", nargs=6, type=float, metavar=('minX', 'minY', 'minZ', 'maxX', 'maxY', 'maxZ'), help="Specify the 3D bounding box to crop the point cloud.") parser.add_argument("--density_filter", nargs='*', action=DensityFilterAction, help="Filter the points to keep only regions with higher point density. Optionally provide 'voxel_size' and 'threshold_percentage' as two numbers (e.g., --density_filter 0.5 0.25). If no numbers are provided, defaults of 1.0 and 0.32 are used.") parser.add_argument("--remove_flyers", nargs='*', action=RemoveFlyersAction, help="Remove flyers based on k-nearest neighbors. Requires two numbers: 'k' (number of neighbors) and 'threshold_factor'.") args = parser.parse_args() config.DEBUG = args.debug # Check and append ".ply" extension if absent if not args.output.lower().endswith('.ply'): args.output += '.ply' # Now check if the file exists after potentially appending the extension if os.path.exists(args.output): user_response = input(f"File {args.output} already exists. Do you want to overwrite it? (y/N): ").lower() if user_response != 'y': print("Operation aborted by the user.") return # Detect the format of the input file if args.input.lower().endswith('.parquet'): source_format = 'parquet' else:
source_format = Utility.text_based_detect_format(args.input)
0
2023-10-28 15:09:50+00:00
8k
masked-spacetime-hashing/msth
nerfstudio/fields/semantic_nerf_field.py
[ { "identifier": "RaySamples", "path": "nerfstudio/cameras/rays.py", "snippet": "class RaySamples(TensorDataclass):\n \"\"\"Samples along a ray\"\"\"\n\n frustums: Frustums\n \"\"\"Frustums along ray.\"\"\"\n camera_indices: Optional[TensorType[\"bs\":..., 1]] = None\n \"\"\"Camera index.\"\"\"\n deltas: Optional[TensorType[\"bs\":..., 1]] = None\n \"\"\"\"width\" of each sample.\"\"\"\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None\n \"\"\"Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling.\"\"\"\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None\n \"\"\"Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling.\"\"\"\n spacing_to_euclidean_fn: Optional[Callable] = None\n \"\"\"Function to convert bins to euclidean distance.\"\"\"\n metadata: Optional[Dict[str, TensorType[\"bs\":..., \"latent_dims\"]]] = None\n \"\"\"additional information relevant to generating ray samples\"\"\"\n\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def get_transmittance(self, densities: TensorType[..., \"num_samples\", 1]) -> TensorType[..., \"num_samples\", 1]:\n \"\"\"Return weights based on predicted densities\n\n Args:\n densities: Predicted densities for samples along ray\n\n Returns:\n Weights for each sample\n \"\"\"\n\n delta_density = self.deltas * densities\n alphas = 1 - torch.exp(-delta_density)\n\n transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)\n transmittance = torch.cat(\n [torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2\n )\n transmittance = torch.exp(-transmittance) # [..., \"num_samples\"]\n transmittance = torch.nan_to_num(transmittance)\n\n return transmittance\n\n def get_weights(self, densities: TensorType[..., \"num_samples\", 1]) -> TensorType[..., \"num_samples\", 1]:\n \"\"\"Return weights based on predicted densities\n\n Args:\n densities: Predicted densities for samples along ray\n\n Returns:\n Weights for each sample\n \"\"\"\n\n delta_density = self.deltas * densities\n alphas = 1 - torch.exp(-delta_density)\n\n transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)\n transmittance = torch.cat(\n [torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2\n )\n transmittance = torch.exp(-transmittance) # [..., \"num_samples\"]\n\n weights = alphas * transmittance # [..., \"num_samples\"]\n weights = torch.nan_to_num(weights)\n\n return weights" }, { "identifier": "Encoding", "path": "nerfstudio/field_components/encodings.py", "snippet": "class Encoding(FieldComponent):\n \"\"\"Encode an input tensor. Intended to be subclassed\n\n Args:\n in_dim: Input dimension of tensor\n \"\"\"\n\n def __init__(self, in_dim: int) -> None:\n if in_dim <= 0:\n raise ValueError(\"Input dimension should be greater than zero\")\n super().__init__(in_dim=in_dim)\n\n @abstractmethod\n def forward(self, in_tensor: TensorType[\"bs\":..., \"input_dim\"]) -> TensorType[\"bs\":..., \"output_dim\"]:\n \"\"\"Call forward and returns and processed tensor\n\n Args:\n in_tensor: the input tensor to process\n \"\"\"\n raise NotImplementedError" }, { "identifier": "Identity", "path": "nerfstudio/field_components/encodings.py", "snippet": "class Identity(Encoding):\n \"\"\"Identity encoding (Does not modify input)\"\"\"\n\n def get_out_dim(self) -> int:\n if self.in_dim is None:\n raise ValueError(\"Input dimension has not been set\")\n return self.in_dim\n\n def forward(self, in_tensor: TensorType[\"bs\":..., \"input_dim\"]) -> TensorType[\"bs\":..., \"output_dim\"]:\n return in_tensor" }, { "identifier": "DensityFieldHead", "path": "nerfstudio/field_components/field_heads.py", "snippet": "class DensityFieldHead(FieldHead):\n \"\"\"Density output\n\n Args:\n in_dim: input dimension. If not defined in constructor, it must be set later.\n activation: output head activation\n \"\"\"\n\n def __init__(self, in_dim: Optional[int] = None, activation: Optional[nn.Module] = nn.Softplus()) -> None:\n super().__init__(in_dim=in_dim, out_dim=1, field_head_name=FieldHeadNames.DENSITY, activation=activation)" }, { "identifier": "FieldHeadNames", "path": "nerfstudio/field_components/field_heads.py", "snippet": "class FieldHeadNames(Enum):\n \"\"\"Possible field outputs\"\"\"\n\n RGB = \"rgb\"\n SH = \"sh\"\n DENSITY = \"density\"\n NORMALS = \"normals\"\n PRED_NORMALS = \"pred_normals\"\n UNCERTAINTY = \"uncertainty\"\n TRANSIENT_RGB = \"transient_rgb\"\n TRANSIENT_DENSITY = \"transient_density\"\n SEMANTICS = \"semantics\"" }, { "identifier": "RGBFieldHead", "path": "nerfstudio/field_components/field_heads.py", "snippet": "class RGBFieldHead(FieldHead):\n \"\"\"RGB output\n\n Args:\n in_dim: input dimension. If not defined in constructor, it must be set later.\n activation: output head activation\n \"\"\"\n\n def __init__(self, in_dim: Optional[int] = None, activation: Optional[nn.Module] = nn.Sigmoid()) -> None:\n super().__init__(in_dim=in_dim, out_dim=3, field_head_name=FieldHeadNames.RGB, activation=activation)" }, { "identifier": "SemanticFieldHead", "path": "nerfstudio/field_components/field_heads.py", "snippet": "class SemanticFieldHead(FieldHead):\n \"\"\"Semantic output\n\n Args:\n num_classes: Number of semantic classes\n in_dim: input dimension. If not defined in constructor, it must be set later.\n activation: output head activation\n \"\"\"\n\n def __init__(self, num_classes: int, in_dim: Optional[int] = None) -> None:\n super().__init__(in_dim=in_dim, out_dim=num_classes, field_head_name=FieldHeadNames.SEMANTICS, activation=None)" }, { "identifier": "MLP", "path": "nerfstudio/field_components/mlp.py", "snippet": "class MLP(FieldComponent):\n \"\"\"Multilayer perceptron\n\n Args:\n in_dim: Input layer dimension\n num_layers: Number of network layers\n layer_width: Width of each MLP layer\n out_dim: Output layer dimension. Uses layer_width if None.\n activation: intermediate layer activation function.\n out_activation: output activation function.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n num_layers: int,\n layer_width: int,\n out_dim: Optional[int] = None,\n skip_connections: Optional[Tuple[int]] = None,\n activation: Optional[nn.Module] = nn.ReLU(),\n out_activation: Optional[nn.Module] = None,\n ) -> None:\n\n super().__init__()\n self.in_dim = in_dim\n assert self.in_dim > 0\n self.out_dim = out_dim if out_dim is not None else layer_width\n self.num_layers = num_layers\n self.layer_width = layer_width\n self.skip_connections = skip_connections\n self._skip_connections: Set[int] = set(skip_connections) if skip_connections else set()\n self.activation = activation\n self.out_activation = out_activation\n self.net = None\n self.build_nn_modules()\n\n def build_nn_modules(self) -> None:\n \"\"\"Initialize multi-layer perceptron.\"\"\"\n layers = []\n if self.num_layers == 1:\n layers.append(nn.Linear(self.in_dim, self.out_dim))\n else:\n for i in range(self.num_layers - 1):\n if i == 0:\n assert i not in self._skip_connections, \"Skip connection at layer 0 doesn't make sense.\"\n layers.append(nn.Linear(self.in_dim, self.layer_width))\n elif i in self._skip_connections:\n layers.append(nn.Linear(self.layer_width + self.in_dim, self.layer_width))\n else:\n layers.append(nn.Linear(self.layer_width, self.layer_width))\n layers.append(nn.Linear(self.layer_width, self.out_dim))\n self.layers = nn.ModuleList(layers)\n\n def forward(self, in_tensor: TensorType[\"bs\":..., \"in_dim\"]) -> TensorType[\"bs\":..., \"out_dim\"]:\n \"\"\"Process input with a multilayer perceptron.\n\n Args:\n in_tensor: Network input\n\n Returns:\n MLP network output\n \"\"\"\n x = in_tensor\n for i, layer in enumerate(self.layers):\n # as checked in `build_nn_modules`, 0 should not be in `_skip_connections`\n if i in self._skip_connections:\n x = torch.cat([in_tensor, x], -1)\n x = layer(x)\n if self.activation is not None and i < len(self.layers) - 1:\n x = self.activation(x)\n if self.out_activation is not None:\n x = self.out_activation(x)\n return x" }, { "identifier": "Field", "path": "nerfstudio/fields/base_field.py", "snippet": "class Field(nn.Module):\n \"\"\"Base class for fields.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._sample_locations = None\n self._density_before_activation = None\n\n def density_fn(self, positions: TensorType[\"bs\":..., 3]) -> TensorType[\"bs\":..., 1]:\n \"\"\"Returns only the density. Used primarily with the density grid.\n\n Args:\n positions: the origin of the samples/frustums\n \"\"\"\n # Need to figure out a better way to describe positions with a ray.\n ray_samples = RaySamples(\n frustums=Frustums(\n origins=positions,\n directions=torch.ones_like(positions),\n starts=torch.zeros_like(positions[..., :1]),\n ends=torch.zeros_like(positions[..., :1]),\n pixel_area=torch.ones_like(positions[..., :1]),\n )\n )\n density, _ = self.get_density(ray_samples)\n return density\n\n @abstractmethod\n def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType[..., 1], TensorType[..., \"num_features\"]]:\n \"\"\"Computes and returns the densities. Returns a tensor of densities and a tensor of features.\n\n Args:\n ray_samples: Samples locations to compute density.\n \"\"\"\n\n def get_normals(self) -> TensorType[..., 3]:\n \"\"\"Computes and returns a tensor of normals.\n\n Args:\n density: Tensor of densities.\n \"\"\"\n assert self._sample_locations is not None, \"Sample locations must be set before calling get_normals.\"\n assert self._density_before_activation is not None, \"Density must be set before calling get_normals.\"\n assert (\n self._sample_locations.shape[:-1] == self._density_before_activation.shape[:-1]\n ), \"Sample locations and density must have the same shape besides the last dimension.\"\n\n normals = torch.autograd.grad(\n self._density_before_activation,\n self._sample_locations,\n grad_outputs=torch.ones_like(self._density_before_activation),\n retain_graph=True,\n )[0]\n\n normals = -torch.nn.functional.normalize(normals, dim=-1)\n\n return normals\n\n @abstractmethod\n def get_outputs(\n self, ray_samples: RaySamples, density_embedding: Optional[TensorType] = None\n ) -> Dict[FieldHeadNames, TensorType]:\n \"\"\"Computes and returns the colors. Returns output field values.\n\n Args:\n ray_samples: Samples locations to compute outputs.\n density_embedding: Density embeddings to condition on.\n \"\"\"\n\n def forward(self, ray_samples: RaySamples, compute_normals: bool = False) -> Dict[FieldHeadNames, TensorType]:\n \"\"\"Evaluates the field at points along the ray.\n\n Args:\n ray_samples: Samples to evaluate field on.\n \"\"\"\n if compute_normals:\n with torch.enable_grad():\n density, density_embedding = self.get_density(ray_samples)\n else:\n density, density_embedding = self.get_density(ray_samples)\n\n field_outputs = self.get_outputs(ray_samples, density_embedding=density_embedding)\n field_outputs[FieldHeadNames.DENSITY] = density # type: ignore\n\n if compute_normals:\n with torch.enable_grad():\n normals = self.get_normals()\n field_outputs[FieldHeadNames.NORMALS] = normals # type: ignore\n return field_outputs" } ]
from typing import Dict, Optional, Tuple from torch import nn from torchtyping import TensorType from nerfstudio.cameras.rays import RaySamples from nerfstudio.field_components.encodings import Encoding, Identity from nerfstudio.field_components.field_heads import ( DensityFieldHead, FieldHeadNames, RGBFieldHead, SemanticFieldHead, ) from nerfstudio.field_components.mlp import MLP from nerfstudio.fields.base_field import Field import torch
3,786
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Semantic NeRF field implementation. """ class SemanticNerfField(Field): """Semantic-NeRF field Args: num_semantic_classes: Number of distinct semantic classes. position_encoding: Position encoder. direction_encoding: Direction encoder. base_mlp_num_layers: Number of layers for base MLP. base_mlp_layer_width: Width of base MLP layers. head_mlp_num_layers: Number of layer for output head MLP. head_mlp_layer_width: Width of output head MLP layers. skip_connections: Where to add skip connection in base MLP. """ def __init__( self, num_semantic_classes: int, position_encoding: Encoding = Identity(in_dim=3), direction_encoding: Encoding = Identity(in_dim=3), base_mlp_num_layers: int = 8, base_mlp_layer_width: int = 256, head_mlp_num_layers: int = 2, head_mlp_layer_width: int = 128, skip_connections: Tuple[int] = (4,), ) -> None: super().__init__() self.num_semantic_classes = num_semantic_classes self.position_encoding = position_encoding self.direction_encoding = direction_encoding self.mlp_base = MLP( in_dim=self.position_encoding.get_out_dim(), num_layers=base_mlp_num_layers, layer_width=base_mlp_layer_width, skip_connections=skip_connections, out_activation=nn.ReLU(), ) self.mlp_head = MLP( in_dim=self.mlp_base.get_out_dim() + self.direction_encoding.get_out_dim(), num_layers=head_mlp_num_layers, layer_width=head_mlp_layer_width, out_activation=nn.ReLU(), ) self.mlp_semantic = MLP( in_dim=self.mlp_head.get_out_dim(), layer_width=self.mlp_head.layer_width // 2, num_layers=1, activation=nn.ReLU(), out_activation=nn.ReLU(), ) self.field_head_density = DensityFieldHead(in_dim=self.mlp_base.get_out_dim()) self.field_head_rgb = RGBFieldHead(in_dim=self.mlp_head.get_out_dim()) self.field_head_semantic = SemanticFieldHead( in_dim=self.mlp_semantic.get_out_dim(), num_classes=self.num_semantic_classes )
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Semantic NeRF field implementation. """ class SemanticNerfField(Field): """Semantic-NeRF field Args: num_semantic_classes: Number of distinct semantic classes. position_encoding: Position encoder. direction_encoding: Direction encoder. base_mlp_num_layers: Number of layers for base MLP. base_mlp_layer_width: Width of base MLP layers. head_mlp_num_layers: Number of layer for output head MLP. head_mlp_layer_width: Width of output head MLP layers. skip_connections: Where to add skip connection in base MLP. """ def __init__( self, num_semantic_classes: int, position_encoding: Encoding = Identity(in_dim=3), direction_encoding: Encoding = Identity(in_dim=3), base_mlp_num_layers: int = 8, base_mlp_layer_width: int = 256, head_mlp_num_layers: int = 2, head_mlp_layer_width: int = 128, skip_connections: Tuple[int] = (4,), ) -> None: super().__init__() self.num_semantic_classes = num_semantic_classes self.position_encoding = position_encoding self.direction_encoding = direction_encoding self.mlp_base = MLP( in_dim=self.position_encoding.get_out_dim(), num_layers=base_mlp_num_layers, layer_width=base_mlp_layer_width, skip_connections=skip_connections, out_activation=nn.ReLU(), ) self.mlp_head = MLP( in_dim=self.mlp_base.get_out_dim() + self.direction_encoding.get_out_dim(), num_layers=head_mlp_num_layers, layer_width=head_mlp_layer_width, out_activation=nn.ReLU(), ) self.mlp_semantic = MLP( in_dim=self.mlp_head.get_out_dim(), layer_width=self.mlp_head.layer_width // 2, num_layers=1, activation=nn.ReLU(), out_activation=nn.ReLU(), ) self.field_head_density = DensityFieldHead(in_dim=self.mlp_base.get_out_dim()) self.field_head_rgb = RGBFieldHead(in_dim=self.mlp_head.get_out_dim()) self.field_head_semantic = SemanticFieldHead( in_dim=self.mlp_semantic.get_out_dim(), num_classes=self.num_semantic_classes )
def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType, TensorType]:
0
2023-10-26 04:39:15+00:00
8k
mikacuy/PL-NeRF
nerf_extract_mesh.py
[ { "identifier": "load_llff_data", "path": "load_llff.py", "snippet": "def load_llff_data(basedir, factor=8, recenter=True, bd_factor=.75, spherify=False, path_zflat=False):\n \n\n poses, bds, imgs = _load_data(basedir, factor=factor) # factor=8 downsamples original imgs by 8x\n print('Loaded', basedir, bds.min(), bds.max())\n \n # Correct rotation matrix ordering and move variable dim to axis 0\n poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)\n poses = np.moveaxis(poses, -1, 0).astype(np.float32)\n imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)\n images = imgs\n bds = np.moveaxis(bds, -1, 0).astype(np.float32)\n \n # Rescale if bd_factor is provided\n sc = 1. if bd_factor is None else 1./(bds.min() * bd_factor)\n poses[:,:3,3] *= sc\n bds *= sc\n \n if recenter:\n poses = recenter_poses(poses)\n \n if spherify:\n poses, render_poses, bds = spherify_poses(poses, bds)\n\n else:\n \n c2w = poses_avg(poses)\n print('recentered', c2w.shape)\n print(c2w[:3,:4])\n\n ## Get spiral\n # Get average pose\n up = normalize(poses[:, :3, 1].sum(0))\n\n # Find a reasonable \"focus depth\" for this dataset\n close_depth, inf_depth = bds.min()*.9, bds.max()*5.\n dt = .75\n mean_dz = 1./(((1.-dt)/close_depth + dt/inf_depth))\n focal = mean_dz\n\n # Get radii for spiral path\n shrink_factor = .8\n zdelta = close_depth * .2\n tt = poses[:,:3,3] # ptstocam(poses[:3,3,:].T, c2w).T\n rads = np.percentile(np.abs(tt), 90, 0)\n c2w_path = c2w\n N_views = 120\n N_rots = 2\n if path_zflat:\n# zloc = np.percentile(tt, 10, 0)[2]\n zloc = -close_depth * .1\n c2w_path[:3,3] = c2w_path[:3,3] + zloc * c2w_path[:3,2]\n rads[2] = 0.\n N_rots = 1\n N_views/=2\n\n # Generate poses for spiral path\n render_poses = render_path_spiral(c2w_path, up, rads, focal, zdelta, zrate=.5, rots=N_rots, N=N_views)\n \n \n render_poses = np.array(render_poses).astype(np.float32)\n\n c2w = poses_avg(poses)\n print('Data:')\n print(poses.shape, images.shape, bds.shape)\n \n dists = np.sum(np.square(c2w[:3,3] - poses[:,:3,3]), -1)\n i_test = np.argmin(dists)\n print('HOLDOUT view is', i_test)\n \n images = images.astype(np.float32)\n poses = poses.astype(np.float32)\n\n return images, poses, bds, render_poses, i_test" }, { "identifier": "load_blender_data", "path": "load_blender.py", "snippet": "def load_blender_data(basedir, half_res=False, testskip=1):\n splits = ['train', 'val', 'test']\n metas = {}\n for s in splits:\n with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp:\n metas[s] = json.load(fp)\n\n all_imgs = []\n all_poses = []\n counts = [0]\n for s in splits:\n meta = metas[s]\n imgs = []\n poses = []\n if s=='train' or testskip==0:\n skip = 1\n else:\n skip = testskip\n \n for frame in meta['frames'][::skip]:\n fname = os.path.join(basedir, frame['file_path'] + '.png')\n imgs.append(imageio.imread(fname))\n poses.append(np.array(frame['transform_matrix']))\n imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)\n poses = np.array(poses).astype(np.float32)\n counts.append(counts[-1] + imgs.shape[0])\n all_imgs.append(imgs)\n all_poses.append(poses)\n \n i_split = [np.arange(counts[i], counts[i+1]) for i in range(3)]\n \n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n \n H, W = imgs[0].shape[:2]\n camera_angle_x = float(meta['camera_angle_x'])\n focal = .5 * W / np.tan(.5 * camera_angle_x)\n \n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n \n if half_res:\n H = H//2\n W = W//2\n focal = focal/2.\n\n imgs_half_res = np.zeros((imgs.shape[0], H, W, 4))\n for i, img in enumerate(imgs):\n imgs_half_res[i] = cv2.resize(img, (W, H), interpolation=cv2.INTER_AREA)\n imgs = imgs_half_res\n # imgs = tf.image.resize_area(imgs, [400, 400]).numpy()\n\n \n return imgs, poses, render_poses, [H, W, focal], i_split" }, { "identifier": "load_scene_blender_fixed_dist_new", "path": "load_blender.py", "snippet": "def load_scene_blender_fixed_dist_new(basedir, half_res=True, train_dist=1.0, test_dist=1.0, val_dist=1.0):\n splits = ['train', 'val', 'test']\n\n all_imgs = []\n\n all_poses = []\n all_intrinsics = []\n counts = [0]\n filenames = []\n\n for s in splits:\n\n if s == \"train\":\n folder = 'radius_{}_{}'.format(str(train_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(train_dist), s)\n elif s == \"val\":\n folder = 'radius_{}_{}'.format(str(val_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(val_dist), s) \n elif s == \"test\":\n folder = 'radius_{}_{}'.format(str(test_dist), s)\n transforms_file = 'transforms_radius{}_{}.json'.format(str(test_dist), s) \n else:\n ## dummy will return not exist\n transforms_file = \"blah\"\n\n if os.path.exists(os.path.join(basedir, transforms_file)):\n\n json_fname = os.path.join(basedir, transforms_file)\n\n with open(json_fname, 'r') as fp:\n meta = json.load(fp)\n\n # if 'train' in s:\n near = 2.\n far = 6.\n camera_angle_x = float(meta['camera_angle_x'])\n\n imgs = []\n poses = []\n intrinsics = []\n\n if s=='train':\n skip = 1\n elif s == \"val\":\n skip = 1\n elif s ==\"test\":\n skip = 4\n elif \"video\" in s:\n skip = 1\n \n for frame in meta['frames'][::skip]:\n if len(frame['file_path']) != 0 :\n if half_res :\n downsample = 2\n else:\n downsample = 1\n\n img = read_files(os.path.join(basedir, frame['file_path']+\".png\"), downsample_scale=downsample)\n\n filenames.append(frame['file_path'])\n imgs.append(img)\n\n # poses.append(np.array(frame['transform_matrix'])@ BLENDER2OPENCV)\n poses.append(np.array(frame['transform_matrix']))\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x) \n\n fx, fy, cx, cy = focal, focal, W/2.0, H/2.0\n intrinsics.append(np.array((fx, fy, cx, cy)))\n\n counts.append(counts[-1] + len(poses))\n if len(imgs) > 0:\n all_imgs.append(np.array(imgs))\n all_poses.append(np.array(poses).astype(np.float32))\n all_intrinsics.append(np.array(intrinsics).astype(np.float32))\n\n else:\n counts.append(counts[-1])\n\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]\n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n intrinsics = np.concatenate(all_intrinsics, 0)\n \n return imgs, poses, render_poses, [H, W, focal], i_split" }, { "identifier": "load_scene_blender2", "path": "load_blender.py", "snippet": "def load_scene_blender2(basedir, train_json = \"transforms_train.json\", half_res=True):\n splits = ['train', 'val', 'test']\n # splits = ['test']\n\n all_imgs = []\n\n all_poses = []\n all_intrinsics = []\n counts = [0]\n filenames = []\n for s in splits:\n if os.path.exists(os.path.join(basedir, '{}_transforms.json'.format(s))):\n\n json_fname = os.path.join(basedir, '{}_transforms.json'.format(s))\n\n with open(json_fname, 'r') as fp:\n meta = json.load(fp)\n\n if 'train' in s:\n near = 2.\n far = 6.\n camera_angle_x = float(meta['camera_angle_x'])\n\n imgs = []\n poses = []\n intrinsics = []\n\n if s=='train':\n skip = 1\n elif s ==\"test\":\n skip = 8\n elif \"video\" in s:\n skip = 1\n \n for frame in meta['frames'][::skip]:\n if len(frame['file_path']) != 0 :\n if half_res :\n downsample = 2\n else:\n downsample = 1\n\n img = read_files(os.path.join(basedir, frame['file_path']+\".png\"), downsample_scale=downsample)\n\n filenames.append(frame['file_path'])\n imgs.append(img)\n\n # poses.append(np.array(frame['transform_matrix'])@ BLENDER2OPENCV)\n poses.append(np.array(frame['transform_matrix']))\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x) \n\n fx, fy, cx, cy = focal, focal, W/2.0, H/2.0\n intrinsics.append(np.array((fx, fy, cx, cy)))\n\n counts.append(counts[-1] + len(poses))\n if len(imgs) > 0:\n all_imgs.append(np.array(imgs))\n all_poses.append(np.array(poses).astype(np.float32))\n all_intrinsics.append(np.array(intrinsics).astype(np.float32))\n\n else:\n counts.append(counts[-1])\n\n render_poses = torch.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180,180,40+1)[:-1]], 0)\n\n i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))]\n imgs = np.concatenate(all_imgs, 0)\n poses = np.concatenate(all_poses, 0)\n intrinsics = np.concatenate(all_intrinsics, 0)\n \n return imgs, poses, render_poses, [H, W, focal], i_split" } ]
import os, sys import numpy as np import imageio import json import random import time import torch import torch.nn as nn import torch.nn.functional as F import torchvision import configargparse import datetime import math import cv2 import shutil import trimesh import mcubes import configargparse from tqdm import tqdm, trange from torch.utils.tensorboard import SummaryWriter from skimage.metrics import structural_similarity from lpips import LPIPS from run_nerf_helpers import * from load_llff import load_llff_data from load_deepvoxels import load_dv_data from load_blender import load_blender_data, load_scene_blender_fixed_dist_new, load_scene_blender2 from load_LINEMOD import load_LINEMOD_data from natsort import natsorted from argparse import Namespace from tqdm import tqdm, trange
6,404
parser.add_argument('--test_dist', default= 1.0, type=float) parser.add_argument("--eval_scene_id", type=str, default="chair_rgba_fixdist_nv100_dist0.25-1.0-4_depth_sfn", help='scene identifier for eval') parser.add_argument("--eval_data_dir", type=str, default="../nerf_synthetic/fixed_dist_new-rgba/", help='directory containing the scenes for eval') ### DTU flags parser.add_argument("--dtu_scene_id", type=int, default=21, help='scan id for DTU dataset to render') parser.add_argument("--num_train", type=int, default=40, help='number of training views to use (1 - 49)') parser.add_argument("--dtu_split", type=str, default=None, help='number of training views to use (1 - 49)') ################## return parser def train(): parser = config_parser() args = parser.parse_args() print(args.white_bkgd) tmp_task = args.task tmp_data_dir = args.data_dir tmp_scene_id = args.scene_id tmp_dataset = args.dataset tmp_test_dist = args.test_dist tmp_ckpt_dir = args.ckpt_dir tmp_set_near_plane = args.set_near_plane tmp_white_bkgd = args.white_bkgd tmp_eval_scene_id = args.eval_scene_id tmp_eval_data_dir = args.eval_data_dir # tmp_white_bkgd = False tmp_test_skip = args.testskip # tmp_mode = args.mode # tmp_N_samples = args.N_samples # tmp_N_importance = args.N_importance # load nerf parameters from training args_file = os.path.join(args.ckpt_dir, args.expname, 'args.json') with open(args_file, 'r') as af: args_dict = json.load(af) args = Namespace(**args_dict) # task and paths are not overwritten args.task = tmp_task args.data_dir = tmp_data_dir args.ckpt_dir = tmp_ckpt_dir # args.mode = tmp_mode args.train_jsonfile = 'transforms_train.json' args.set_near_plane = tmp_set_near_plane # args.N_samples = tmp_N_samples # args.N_importance = tmp_N_importance args.dataset = tmp_dataset args.test_dist = tmp_test_dist args.scene_id = tmp_scene_id args.white_bkgd = tmp_white_bkgd args.eval_scene_id = tmp_eval_scene_id args.eval_data_dir = tmp_eval_data_dir args.testskip = tmp_test_skip print('\n'.join(f'{k}={v}' for k, v in vars(args).items())) args.n_gpus = torch.cuda.device_count() print(f"Using {args.n_gpus} GPU(s).") # Load data scene_data_dir = os.path.join(args.data_dir, args.scene_id) K = None if args.dataset == 'llff': images, poses, bds, render_poses, i_test = load_llff_data(scene_data_dir, args.factor, recenter=True, bd_factor=.75, spherify=args.spherify) hwf = poses[0,:3,-1] poses = poses[:,:3,:4] print('Loaded llff', images.shape, render_poses.shape, hwf, scene_data_dir) if not isinstance(i_test, list): i_test = [i_test] if args.llffhold > 0: print('Auto LLFF holdout,', args.llffhold) i_test = np.arange(images.shape[0])[::args.llffhold] i_val = i_test i_train = np.array([i for i in np.arange(int(images.shape[0])) if (i not in i_test and i not in i_val)]) print('DEFINING BOUNDS') if args.no_ndc: near = np.ndarray.min(bds) * .9 far = np.ndarray.max(bds) * 1. else: near = 0. far = 1. print('NEAR FAR', near, far) elif args.dataset == 'blender': images, poses, render_poses, hwf, i_split = load_blender_data(scene_data_dir, args.half_res, args.testskip) print('Loaded blender', images.shape, render_poses.shape, hwf, scene_data_dir) i_train, i_val, i_test = i_split # near = 2. near = args.set_near_plane print("Set near plane to: " + str(near)) far = 6. if args.white_bkgd: images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:]) else: images = images[...,:3] elif args.dataset == "blender2":
''' Use a different learning rate for the coarse network Use constant aggregation for the first few iterations ''' # from load_dtu import load_dtu, load_dtu2 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") np.random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed(0) DEBUG = False def build_json_for_dtu(splits, intrinsics, poses, near, far): out_dict = {} out_dict = {"near": near, "far": far} i_train, i_test = splits train_dicts = [] test_dicts = [] for i in i_train: train_dict = {} train_dict["extrinsic"] = poses[i].tolist() train_dict["intrinsic"] = intrinsics[i].tolist() train_dict["pose_id"] = int(i) train_dicts.append(train_dict) for i in i_test: test_dict = {} test_dict["extrinsic"] = poses[i].tolist() test_dict["intrinsic"] = intrinsics[i].tolist() test_dict["pose_id"] = int(i) test_dicts.append(test_dict) out_dict["train_frames"] = train_dicts out_dict["test_frames"] = test_dicts return out_dict def batchify(fn, chunk): """Constructs a version of 'fn' that applies to smaller batches. """ if chunk is None: return fn def ret(inputs): return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0) return ret def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*64): """Prepares inputs and applies network 'fn'. """ inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]]) embedded = embed_fn(inputs_flat) if viewdirs is not None: # input_dirs = viewdirs[:,None].expand(inputs.shape) # input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]]) input_dirs_flat = viewdirs embedded_dirs = embeddirs_fn(input_dirs_flat) embedded = torch.cat([embedded, embedded_dirs], -1) outputs_flat = batchify(fn, netchunk)(embedded) outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]]) return outputs def batchify_rays(rays_flat, chunk=1024*32, **kwargs): """Render rays in smaller minibatches to avoid OOM. """ all_ret = {} for i in range(0, rays_flat.shape[0], chunk): ret = render_rays(rays_flat[i:i+chunk], **kwargs) for k in ret: if k not in all_ret: all_ret[k] = [] all_ret[k].append(ret[k]) all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret} return all_ret def render(H, W, K, chunk=1024*32, rays=None, c2w=None, ndc=True, near=0., far=1., use_viewdirs=False, c2w_staticcam=None, **kwargs): """Render rays Args: H: int. Height of image in pixels. W: int. Width of image in pixels. focal: float. Focal length of pinhole camera. chunk: int. Maximum number of rays to process simultaneously. Used to control maximum memory usage. Does not affect final results. rays: array of shape [2, batch_size, 3]. Ray origin and direction for each example in batch. c2w: array of shape [3, 4]. Camera-to-world transformation matrix. ndc: bool. If True, represent ray origin, direction in NDC coordinates. near: float or array of shape [batch_size]. Nearest distance for a ray. far: float or array of shape [batch_size]. Farthest distance for a ray. use_viewdirs: bool. If True, use viewing direction of a point in space in model. c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for camera while using other c2w argument for viewing directions. Returns: rgb_map: [batch_size, 3]. Predicted RGB values for rays. disp_map: [batch_size]. Disparity map. Inverse of depth. acc_map: [batch_size]. Accumulated opacity (alpha) along a ray. extras: dict with everything returned by render_rays(). """ if c2w is not None: # special case to render full image rays_o, rays_d = get_rays(H, W, K, c2w) else: # use provided ray batch rays_o, rays_d = rays if use_viewdirs: # provide ray directions as input viewdirs = rays_d if c2w_staticcam is not None: # special case to visualize effect of viewdirs rays_o, rays_d = get_rays(H, W, K, c2w_staticcam) viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True) viewdirs = torch.reshape(viewdirs, [-1,3]).float() sh = rays_d.shape # [..., 3] if ndc: # for forward facing scenes rays_o, rays_d = ndc_rays(H, W, K[0][0], 1., rays_o, rays_d) # Create ray batch rays_o = torch.reshape(rays_o, [-1,3]).float() rays_d = torch.reshape(rays_d, [-1,3]).float() near, far = near * torch.ones_like(rays_d[...,:1]), far * torch.ones_like(rays_d[...,:1]) rays = torch.cat([rays_o, rays_d, near, far], -1) if use_viewdirs: rays = torch.cat([rays, viewdirs], -1) # Render and reshape all_ret = batchify_rays(rays, chunk, **kwargs) for k in all_ret: k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:]) all_ret[k] = torch.reshape(all_ret[k], k_sh) k_extract = ['rgb_map', 'disp_map', 'acc_map'] ret_list = [all_ret[k] for k in k_extract] ret_dict = {k : all_ret[k] for k in all_ret if k not in k_extract} return ret_list + [ret_dict] def create_nerf(args): """Instantiate NeRF's MLP model. """ embed_fn, input_ch = get_embedder(args.multires, args.i_embed) input_ch_views = 0 embeddirs_fn = None if args.use_viewdirs: embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed) output_ch = 5 if args.N_importance > 0 else 4 skips = [4] model = NeRF(D=args.netdepth, W=args.netwidth, input_ch=input_ch, output_ch=output_ch, skips=skips, input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device) coarse_grad_vars = list(model.parameters()) model_fine = None if args.N_importance > 0: model_fine = NeRF(D=args.netdepth_fine, W=args.netwidth_fine, input_ch=input_ch, output_ch=output_ch, skips=skips, input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device) grad_vars = list(model_fine.parameters()) network_query_fn = lambda inputs, viewdirs, network_fn : run_network(inputs, viewdirs, network_fn, embed_fn=embed_fn, embeddirs_fn=embeddirs_fn, netchunk=args.netchunk) # Create optimizer optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999)) optimizer_coarse = torch.optim.Adam(params=coarse_grad_vars, lr=args.lrate, betas=(0.9, 0.999)) start = 0 ########################## # Load checkpoints if args.ft_path is not None and args.ft_path!='None': ckpts = [args.ft_path] else: ckpts = [os.path.join(args.ckpt_dir, args.expname, f) for f in sorted(os.listdir(os.path.join(args.ckpt_dir, args.expname))) if 'tar' in f] print('Found ckpts', ckpts) if len(ckpts) > 0 and not args.no_reload: ckpt_path = ckpts[-1] print('Reloading from', ckpt_path) ckpt = torch.load(ckpt_path) start = ckpt['global_step'] # optimizer.load_state_dict(ckpt['optimizer_state_dict']) # Load model model.load_state_dict(ckpt['network_fn_state_dict']) if model_fine is not None: model_fine.load_state_dict(ckpt['network_fine_state_dict']) ########################## render_kwargs_train = { 'network_query_fn' : network_query_fn, 'perturb' : args.perturb, 'N_importance' : args.N_importance, 'network_fine' : model_fine, 'N_samples' : args.N_samples, 'network_fn' : model, 'use_viewdirs' : args.use_viewdirs, 'white_bkgd' : args.white_bkgd, 'raw_noise_std' : args.raw_noise_std, 'mode' : args.mode, 'color_mode': args.color_mode, 'farcolorfix': args.farcolorfix } # NDC only good for LLFF-style forward facing data if args.dataset != 'llff' or args.no_ndc: print('Not ndc!') render_kwargs_train['ndc'] = False render_kwargs_train['lindisp'] = args.lindisp render_kwargs_test = {k : render_kwargs_train[k] for k in render_kwargs_train} ### set to True for linear # render_kwargs_test['perturb'] = False render_kwargs_test['perturb'] = True render_kwargs_test['raw_noise_std'] = 0. return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, optimizer_coarse def compute_weights(raw, z_vals, rays_d, noise=0.): raw2alpha = lambda raw, dists, act_fn=F.relu: 1.-torch.exp(-act_fn(raw)*dists) dists = z_vals[...,1:] - z_vals[...,:-1] dists = torch.cat([dists, torch.full_like(dists[...,:1], 1e10, device=device)], -1) # [N_rays, N_samples] dists = dists * torch.norm(rays_d[...,None,:], dim=-1) alpha = raw2alpha(raw[...,3] + noise, dists) # [N_rays, N_samples] weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1), device=device), 1.-alpha + 1e-10], -1), -1)[:, :-1] return weights ### Our reformulation to piecewise linear def compute_weights_piecewise_linear(raw, z_vals, near, far, rays_d, noise=0., return_tau=False): raw2expr = lambda raw, dists: torch.exp(-raw*dists) ### Concat z_vals = torch.cat([near, z_vals, far], -1) dists = z_vals[...,1:] - z_vals[...,:-1] ### Original code dists = dists * torch.norm(rays_d[...,None,:], dim=-1) tau = torch.cat([torch.ones((raw.shape[0], 1), device=device)*1e-10, raw[...,3] + noise, torch.ones((raw.shape[0], 1), device=device)*1e10], -1) ### tau(near) = 0, tau(far) = very big (will hit an opaque surface) tau = F.relu(tau) ## Make positive from proof of DS-NeRF interval_ave_tau = 0.5 * (tau[...,1:] + tau[...,:-1]) ''' Evaluating exp(-0.5 (tau_{i+1}+tau_i) (s_{i+1}-s_i) ) ''' expr = raw2expr(interval_ave_tau, dists) # [N_rays, N_samples+1] ### Transmittance until s_n T = torch.cumprod(torch.cat([torch.ones((expr.shape[0], 1), device=device), expr], -1), -1) # [N_rays, N_samples+2], T(near)=1, starts off at 1 ### Factor to multiply transmittance with factor = (1 - expr) weights = factor * T[:, :-1] # [N_rays, N_samples+1] if return_tau: return weights, tau, T else: return weights def raw2outputs(raw, z_vals, near, far, rays_d, mode, color_mode, raw_noise_std=0, pytest=False, white_bkgd=False, farcolorfix=False): """Transforms model's predictions to semantically meaningful values. Args: raw: [num_rays, num_samples along ray, 4]. Prediction from model. z_vals: [num_rays, num_samples along ray]. Integration time. rays_d: [num_rays, 3]. Direction of each ray. Returns: rgb_map: [num_rays, 3]. Estimated RGB color of a ray. disp_map: [num_rays]. Disparity map. Inverse of depth map. acc_map: [num_rays]. Sum of weights along each ray. weights: [num_rays, num_samples]. Weights assigned to each sampled color. depth_map: [num_rays]. Estimated distance to object. """ rgb = torch.sigmoid(raw[...,:3]) # [N_rays, N_samples, 3] noise = 0. if raw_noise_std > 0.: noise = torch.randn(raw[...,3].shape) * raw_noise_std # Overwrite randomly sampled data if pytest if pytest: np.random.seed(0) noise = np.random.rand(*list(raw[...,3].shape)) * raw_noise_std noise = torch.Tensor(noise) if mode == "linear": weights, tau, T = compute_weights_piecewise_linear(raw, z_vals, near, far, rays_d, noise, return_tau=True) if color_mode == "midpoint": if farcolorfix: rgb_concat = torch.cat([rgb[: ,0, :].unsqueeze(1), rgb, torch.zeros((rgb[:, -1].shape), device=device).unsqueeze(1)], 1) else: rgb_concat = torch.cat([rgb[: ,0, :].unsqueeze(1), rgb, rgb[: ,-1, :].unsqueeze(1)], 1) rgb_mid = .5 * (rgb_concat[:, 1:, :] + rgb_concat[:, :-1, :]) rgb_map = torch.sum(weights[...,None] * rgb_mid, -2) # [N_rays, 3] elif color_mode == "left": rgb_concat = torch.cat([rgb[: ,0, :].unsqueeze(1), rgb], 1) rgb_map = torch.sum(weights[...,None] * rgb_concat, -2) else: print("ERROR: Color mode unimplemented, please select left or midpoint.") ### Piecewise linear means take the midpoint z_vals = torch.cat([near, z_vals, far], -1) z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1]) depth_map = torch.sum(weights * z_vals_mid, -1) elif mode == "constant": weights = compute_weights(raw, z_vals, rays_d, noise) rgb_map = torch.sum(weights[...,None] * rgb, -2) # [N_rays, 3] depth_map = torch.sum(weights * z_vals, -1) tau = None T = None disp_map = 1./torch.max(1e-10 * torch.ones_like(depth_map), depth_map / torch.sum(weights, -1)) acc_map = torch.sum(weights, -1) if white_bkgd: rgb_map = rgb_map + (1.-acc_map[...,None]) return rgb_map, disp_map, acc_map, weights, depth_map, tau, T def render_rays(ray_batch, network_fn, network_query_fn, N_samples, mode, color_mode, retraw=False, lindisp=False, perturb=0., N_importance=0, network_fine=None, white_bkgd=False, raw_noise_std=0., verbose=False, pytest=False, quad_solution_v2=False, zero_tol = 1e-4, epsilon = 1e-3, farcolorfix = False, constant_init = False): """Volumetric rendering. Args: ray_batch: array of shape [batch_size, ...]. All information necessary for sampling along a ray, including: ray origin, ray direction, min dist, max dist, and unit-magnitude viewing direction. network_fn: function. Model for predicting RGB and density at each point in space. network_query_fn: function used for passing queries to network_fn. N_samples: int. Number of different times to sample along each ray. retraw: bool. If True, include model's raw, unprocessed predictions. lindisp: bool. If True, sample linearly in inverse depth rather than in depth. perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified random points in time. N_importance: int. Number of additional times to sample along each ray. These samples are only passed to network_fine. network_fine: "fine" network with same spec as network_fn. white_bkgd: bool. If True, assume a white background. raw_noise_std: ... verbose: bool. If True, print more debugging info. Returns: rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model. disp_map: [num_rays]. Disparity map. 1 / depth. acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model. raw: [num_rays, num_samples, 4]. Raw predictions from model. rgb0: See rgb_map. Output for coarse model. disp0: See disp_map. Output for coarse model. acc0: See acc_map. Output for coarse model. z_std: [num_rays]. Standard deviation of distances along ray for each sample. """ N_rays = ray_batch.shape[0] rays_o, rays_d = ray_batch[:,0:3], ray_batch[:,3:6] # [N_rays, 3] each viewdirs = ray_batch[:,-3:] if ray_batch.shape[-1] > 8 else None bounds = torch.reshape(ray_batch[...,6:8], [-1,1,2]) near, far = bounds[...,0], bounds[...,1] # [-1,1] t_vals = torch.linspace(0., 1., steps=N_samples) if not lindisp: z_vals = near * (1.-t_vals) + far * (t_vals) else: z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals)) z_vals = z_vals.expand([N_rays, N_samples]) if perturb > 0.: # get intervals between samples mids = .5 * (z_vals[...,1:] + z_vals[...,:-1]) upper = torch.cat([mids, z_vals[...,-1:]], -1) lower = torch.cat([z_vals[...,:1], mids], -1) # stratified samples in those intervals t_rand = torch.rand(z_vals.shape) # Pytest, overwrite u with numpy's fixed random numbers if pytest: np.random.seed(0) t_rand = np.random.rand(*list(z_vals.shape)) t_rand = torch.Tensor(t_rand) z_vals = lower + (upper - lower) * t_rand pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3] ### If constant init then overwrite mode for coarse model to constant first if constant_init: # coarse_mode = "constant" mode = "constant" # else: # coarse_mode = mode # print(mode) # raw = run_network(pts) raw = network_query_fn(pts, viewdirs, network_fn) rgb_map, disp_map, acc_map, weights, depth_map, tau, T = raw2outputs(raw, z_vals, near, far, rays_d, mode, color_mode, raw_noise_std, pytest=pytest, white_bkgd=white_bkgd, farcolorfix=farcolorfix) if N_importance > 0: rgb_map_0, disp_map_0, acc_map_0, depth_map_0, z_vals_0, weights_0 = rgb_map, disp_map, acc_map, depth_map, z_vals, weights z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1]) if mode == "linear": z_samples, _, _, _ = sample_pdf_reformulation(z_vals, weights, tau, T, near, far, N_importance, det=(perturb==0.), pytest=pytest, quad_solution_v2=quad_solution_v2, zero_threshold = zero_tol, epsilon_=epsilon) elif mode == "constant": z_samples = sample_pdf(z_vals_mid, weights[...,1:-1], N_importance, det=(perturb==0.), pytest=pytest) z_samples = z_samples.detach() ######## Clamping in quad solution should have fixed this z_samples = torch.clamp(z_samples, near, far) ######## z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1) pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples + N_importance, 3] run_fn = network_fn if network_fine is None else network_fine # raw = run_network(pts, fn=run_fn) raw = network_query_fn(pts, viewdirs, run_fn) rgb_map, disp_map, acc_map, weights, depth_map, tau, T = raw2outputs(raw, z_vals, near, far, rays_d, mode, color_mode, raw_noise_std, pytest=pytest, white_bkgd=white_bkgd, farcolorfix=farcolorfix) ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map, 'depth_map' : depth_map} if retraw: ret['raw'] = raw if N_importance > 0: ret['rgb0'] = rgb_map_0 ret['disp0'] = disp_map_0 ret['depth0'] = depth_map_0 ret['acc0'] = acc_map_0 ret['z_std'] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays] for k in ret: if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG: print(f"! [Numerical Error] {k} contains nan or inf.") return ret #### For mesh extraction #### def extract_fields(bound_min, bound_max, resolution, query_func, model): N = 64 X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N) Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N) Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N) u = np.zeros([resolution, resolution, resolution], dtype=np.float32) with torch.no_grad(): for xi, xs in enumerate(tqdm(X)): for yi, ys in enumerate(Y): for zi, zs in enumerate(Z): xx, yy, zz = torch.meshgrid(xs, ys, zs) pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) viewdirs = torch.zeros_like(pts, device=pts.device) # print(pts.shape) # print(viewdirs.shape) # print(query_func(pts, viewdirs, model).shape) val = query_func(pts, viewdirs, model).reshape(len(xs), len(ys), len(zs), -1) # print(val.shape) taus = F.relu(val[...,3]) # print(taus.shape) # print(torch.nonzero(taus)) # exit() u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = taus.detach().cpu().numpy() return u def extract_iso_level(density, threshold=25): # Density boundaries min_a, max_a, std_a = density.min(), density.max(), density.std() # Adaptive iso level iso_value = min(max(threshold, min_a + std_a), max_a - std_a) print(f"Min density {min_a}, Max density: {max_a}, Mean density {density.mean()}") print(f"Querying based on iso level: {iso_value}") return iso_value def extract_geometry(bound_min, bound_max, resolution, threshold, query_func, model, adaptive=False): print('threshold: {}'.format(threshold)) u = extract_fields(bound_min, bound_max, resolution, query_func, model) if not adaptive: vertices, triangles = mcubes.marching_cubes(u, threshold) else: vertices, triangles = mcubes.marching_cubes(u, extract_iso_level(u, threshold)) try: b_max_np = bound_max.detach().cpu().numpy() b_min_np = bound_min.detach().cpu().numpy() except: b_max_np = bound_max b_min_np = bound_min vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] return vertices, triangles ############################# def config_parser(): parser = configargparse.ArgumentParser() parser.add_argument('--task', default="train", type=str, help='one out of: "train", "test", "video"') parser.add_argument('--config', is_config_file=True, help='config file path') parser.add_argument("--expname", type=str, help='experiment name') parser.add_argument("--ckpt_dir", type=str, default="", help='checkpoint directory') parser.add_argument("--scene_id", type=str, default="lego", help='scene identifier') parser.add_argument("--data_dir", type=str, default="../nerf_synthetic", help='directory containing the scenes') parser.add_argument("--dataset", type=str, default="blender", help='dataset used -- selects which dataloader"') # training options parser.add_argument("--netdepth", type=int, default=8, help='layers in network') parser.add_argument("--netwidth", type=int, default=256, help='channels per layer') parser.add_argument("--netdepth_fine", type=int, default=8, help='layers in fine network') parser.add_argument("--netwidth_fine", type=int, default=256, help='channels per layer in fine network') parser.add_argument("--N_rand", type=int, default=32*32*4, help='batch size (number of random rays per gradient step)') parser.add_argument("--lrate", type=float, default=5e-4, help='learning rate') parser.add_argument("--coarse_lrate", type=float, default=1e-4, help='learning rate') parser.add_argument("--lrate_decay", type=int, default=250, help='exponential learning rate decay (in 1000 steps)') parser.add_argument("--chunk", type=int, default=1024*32, help='number of rays processed in parallel, decrease if running out of memory') parser.add_argument("--netchunk", type=int, default=1024*64, help='number of pts sent through network in parallel, decrease if running out of memory') parser.add_argument("--no_batching", action='store_true', help='only take random rays from 1 image at a time') parser.add_argument("--no_reload", action='store_true', help='do not reload weights from saved ckpt') parser.add_argument("--ft_path", type=str, default=None, help='specific weights npy file to reload for coarse network') # rendering options parser.add_argument("--N_samples", type=int, default=64, help='number of coarse samples per ray') parser.add_argument("--N_importance", type=int, default=128, help='number of additional fine samples per ray') parser.add_argument("--perturb", type=float, default=1., help='set to 0. for no jitter, 1. for jitter') parser.add_argument("--use_viewdirs", action='store_true', help='use full 5D input instead of 3D') parser.add_argument("--i_embed", type=int, default=0, help='set 0 for default positional encoding, -1 for none') parser.add_argument("--multires", type=int, default=10, help='log2 of max freq for positional encoding (3D location)') parser.add_argument("--multires_views", type=int, default=4, help='log2 of max freq for positional encoding (2D direction)') parser.add_argument("--raw_noise_std", type=float, default=0., help='std dev of noise added to regularize sigma_a output, 1e0 recommended') parser.add_argument("--render_only", action='store_true', help='do not optimize, reload weights and render out render_poses path') parser.add_argument("--render_test", action='store_true', help='render the test set instead of render_poses path') parser.add_argument("--render_factor", type=int, default=0, help='downsampling factor to speed up rendering, set 4 or 8 for fast preview') # training options parser.add_argument("--precrop_iters", type=int, default=0, help='number of steps to train on central crops') parser.add_argument("--precrop_frac", type=float, default=.5, help='fraction of img taken for central crops') # dataset options parser.add_argument("--testskip", type=int, default=1, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') ## blender flags parser.add_argument("--white_bkgd", action='store_true', help='set to render synthetic data on a white bkgd (always use for dvoxels)') # parser.add_argument('--white_bkgd', default= False, type=bool) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') ## llff flags parser.add_argument("--factor", type=int, default=8, help='downsample factor for LLFF images') parser.add_argument("--no_ndc", action='store_true', help='do not use normalized device coordinates (set for non-forward facing scenes)') parser.add_argument("--lindisp", action='store_true', help='sampling linearly in disparity rather than depth') parser.add_argument("--spherify", action='store_true', help='set for spherical 360 scenes') parser.add_argument("--llffhold", type=int, default=8, help='will take every 1/N images as LLFF test set, paper uses 8') # logging/saving options parser.add_argument("--num_iterations", type=int, default=200000, help='number of iterations for training') parser.add_argument("--i_print", type=int, default=100, help='frequency of console printout and metric loggin') parser.add_argument("--i_img", type=int, default=600000, help='frequency of tensorboard image logging') parser.add_argument("--i_weights", type=int, default=100000, help='frequency of weight ckpt saving') parser.add_argument("--i_testset", type=int, default=500000, help='frequency of testset saving') parser.add_argument("--i_video", type=int, default=500000, help='frequency of render_poses video saving') ### For PWL ### parser.add_argument("--mode", type=str, default="constant", help='rendering opacity aggregation mode -- whether to use piecewise constant (vanilla) or piecewise linear (reformulation)."') parser.add_argument("--color_mode", type=str, default="midpoint", help='rendering color aggregation mode -- whether to use left bin or midpoint."') parser.add_argument('--quad_solution_v2', default= True, type=bool) ### Epsilon and zero tol in quadratic solution parser.add_argument("--zero_tol", type=float, default=1e-4, help='zero tol to revert to piecewise constant assumption') parser.add_argument("--epsilon", type=float, default=1e-3, help='epsilon value in the increasing and decreasing cases or max(x,epsilon)') parser.add_argument('--set_near_plane', default= 2., type=float) parser.add_argument('--farcolorfix', default= False, type=bool) parser.add_argument("--constant_init", type=int, default=1000, help='number of iterations to use constant aggregation') parser.add_argument("--coarse_weight", type=float, default=1.0, help='zero tol to revert to piecewise constant assumption') parser.add_argument('--test_dist', default= 1.0, type=float) parser.add_argument("--eval_scene_id", type=str, default="chair_rgba_fixdist_nv100_dist0.25-1.0-4_depth_sfn", help='scene identifier for eval') parser.add_argument("--eval_data_dir", type=str, default="../nerf_synthetic/fixed_dist_new-rgba/", help='directory containing the scenes for eval') ### DTU flags parser.add_argument("--dtu_scene_id", type=int, default=21, help='scan id for DTU dataset to render') parser.add_argument("--num_train", type=int, default=40, help='number of training views to use (1 - 49)') parser.add_argument("--dtu_split", type=str, default=None, help='number of training views to use (1 - 49)') ################## return parser def train(): parser = config_parser() args = parser.parse_args() print(args.white_bkgd) tmp_task = args.task tmp_data_dir = args.data_dir tmp_scene_id = args.scene_id tmp_dataset = args.dataset tmp_test_dist = args.test_dist tmp_ckpt_dir = args.ckpt_dir tmp_set_near_plane = args.set_near_plane tmp_white_bkgd = args.white_bkgd tmp_eval_scene_id = args.eval_scene_id tmp_eval_data_dir = args.eval_data_dir # tmp_white_bkgd = False tmp_test_skip = args.testskip # tmp_mode = args.mode # tmp_N_samples = args.N_samples # tmp_N_importance = args.N_importance # load nerf parameters from training args_file = os.path.join(args.ckpt_dir, args.expname, 'args.json') with open(args_file, 'r') as af: args_dict = json.load(af) args = Namespace(**args_dict) # task and paths are not overwritten args.task = tmp_task args.data_dir = tmp_data_dir args.ckpt_dir = tmp_ckpt_dir # args.mode = tmp_mode args.train_jsonfile = 'transforms_train.json' args.set_near_plane = tmp_set_near_plane # args.N_samples = tmp_N_samples # args.N_importance = tmp_N_importance args.dataset = tmp_dataset args.test_dist = tmp_test_dist args.scene_id = tmp_scene_id args.white_bkgd = tmp_white_bkgd args.eval_scene_id = tmp_eval_scene_id args.eval_data_dir = tmp_eval_data_dir args.testskip = tmp_test_skip print('\n'.join(f'{k}={v}' for k, v in vars(args).items())) args.n_gpus = torch.cuda.device_count() print(f"Using {args.n_gpus} GPU(s).") # Load data scene_data_dir = os.path.join(args.data_dir, args.scene_id) K = None if args.dataset == 'llff': images, poses, bds, render_poses, i_test = load_llff_data(scene_data_dir, args.factor, recenter=True, bd_factor=.75, spherify=args.spherify) hwf = poses[0,:3,-1] poses = poses[:,:3,:4] print('Loaded llff', images.shape, render_poses.shape, hwf, scene_data_dir) if not isinstance(i_test, list): i_test = [i_test] if args.llffhold > 0: print('Auto LLFF holdout,', args.llffhold) i_test = np.arange(images.shape[0])[::args.llffhold] i_val = i_test i_train = np.array([i for i in np.arange(int(images.shape[0])) if (i not in i_test and i not in i_val)]) print('DEFINING BOUNDS') if args.no_ndc: near = np.ndarray.min(bds) * .9 far = np.ndarray.max(bds) * 1. else: near = 0. far = 1. print('NEAR FAR', near, far) elif args.dataset == 'blender': images, poses, render_poses, hwf, i_split = load_blender_data(scene_data_dir, args.half_res, args.testskip) print('Loaded blender', images.shape, render_poses.shape, hwf, scene_data_dir) i_train, i_val, i_test = i_split # near = 2. near = args.set_near_plane print("Set near plane to: " + str(near)) far = 6. if args.white_bkgd: images = images[...,:3]*images[...,-1:] + (1.-images[...,-1:]) else: images = images[...,:3] elif args.dataset == "blender2":
images, poses, render_poses, hwf, i_split = load_scene_blender2(scene_data_dir, half_res=args.half_res)
3
2023-10-30 06:38:00+00:00
8k
sehyunkwon/ICTC
step1/llava/model/language_model/mpt/modeling_mpt.py
[ { "identifier": "attn_bias_shape", "path": "step1/llava/model/language_model/mpt/attention.py", "snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n if (prefix_lm or not causal) or use_sequence_id:\n return (1, n_heads, seq_len, seq_len)\n return (1, n_heads, 1, seq_len)\n elif prefix_lm or use_sequence_id:\n return (1, 1, seq_len, seq_len)\n return None\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')" }, { "identifier": "build_attn_bias", "path": "step1/llava/model/language_model/mpt/attention.py", "snippet": "def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if alibi:\n (device, dtype) = (attn_bias.device, attn_bias.dtype)\n attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))\n return attn_bias\n else:\n raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')" }, { "identifier": "MPTBlock", "path": "step1/llava/model/language_model/mpt/blocks.py", "snippet": "class MPTBlock(nn.Module):\n\n def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):\n del kwargs\n super().__init__()\n norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]\n attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]\n self.norm_1 = norm_class(d_model, device=device)\n self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)\n self.norm_2 = norm_class(d_model, device=device)\n self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)\n self.resid_attn_dropout = nn.Dropout(resid_pdrop)\n self.resid_ffn_dropout = nn.Dropout(resid_pdrop)\n\n def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:\n a = self.norm_1(x)\n (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)\n x = x + self.resid_attn_dropout(b)\n m = self.norm_2(x)\n n = self.ffn(m)\n x = x + self.resid_ffn_dropout(n)\n return (x, attn_weights, past_key_value)" }, { "identifier": "SharedEmbedding", "path": "step1/llava/model/language_model/mpt/custom_embedding.py", "snippet": "class SharedEmbedding(nn.Embedding):\n\n def forward(self, input: Tensor, unembed: bool=False) -> Tensor:\n if unembed:\n return F.linear(input, self.weight)\n return super().forward(input)" }, { "identifier": "NORM_CLASS_REGISTRY", "path": "step1/llava/model/language_model/mpt/norm.py", "snippet": "NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}" }, { "identifier": "MPTConfig", "path": "step1/llava/model/language_model/mpt/configuration_mpt.py", "snippet": "class MPTConfig(PretrainedConfig):\n model_type = 'mpt'\n\n def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, verbose: int=0, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, **kwargs):\n \"\"\"The MPT configuration class.\n\n Args:\n d_model (int): The size of the embedding dimension of the model.\n n_heads (int): The number of attention heads.\n n_layers (int): The number of layers in the model.\n expansion_ratio (int): The ratio of the up/down scale in the MLP.\n max_seq_len (int): The maximum sequence length of the model.\n vocab_size (int): The size of the vocabulary.\n resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.\n emb_pdrop (float): The dropout probability for the embedding layer.\n learned_pos_emb (bool): Whether to use learned positional embeddings\n attn_config (Dict): A dictionary used to configure the model's attention module:\n attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention\n attn_pdrop (float): The dropout probability for the attention layers.\n attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'.\n qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer.\n clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to\n this value.\n softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None,\n use the default scale of ``1/sqrt(d_keys)``.\n prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an\n extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix\n can attend to one another bi-directionally. Tokens outside the prefix use causal attention.\n attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id.\n When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates\n which sub-sequence each token belongs to.\n Defaults to ``False`` meaning any provided `sequence_id` will be ignored.\n alibi (bool): Whether to use the alibi bias instead of position embeddings.\n alibi_bias_max (int): The maximum value of the alibi bias.\n init_device (str): The device to use for parameter initialization.\n logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.\n no_bias (bool): Whether to use bias in all layers.\n verbose (int): The verbosity level. 0 is silent.\n embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.\n norm_type (str): choose type of norm to use\n multiquery_attention (bool): Whether to use multiquery attention implementation.\n use_cache (bool): Whether or not the model should return the last key/values attentions\n init_config (Dict): A dictionary used to configure the model initialization:\n init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_',\n 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or\n 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch.\n init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True.\n emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer.\n emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution\n used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``.\n init_std (float): The standard deviation of the normal distribution used to initialize the model,\n if using the baseline_ parameter initialization scheme.\n init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes.\n fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes.\n init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes.\n ---\n See llmfoundry.models.utils.param_init_fns.py for info on other param init config options\n \"\"\"\n self.d_model = d_model\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.expansion_ratio = expansion_ratio\n self.max_seq_len = max_seq_len\n self.vocab_size = vocab_size\n self.resid_pdrop = resid_pdrop\n self.emb_pdrop = emb_pdrop\n self.learned_pos_emb = learned_pos_emb\n self.attn_config = attn_config\n self.init_device = init_device\n self.logit_scale = logit_scale\n self.no_bias = no_bias\n self.verbose = verbose\n self.embedding_fraction = embedding_fraction\n self.norm_type = norm_type\n self.use_cache = use_cache\n self.init_config = init_config\n if 'name' in kwargs:\n del kwargs['name']\n if 'loss_fn' in kwargs:\n del kwargs['loss_fn']\n super().__init__(**kwargs)\n self._validate_config()\n\n def _set_config_defaults(self, config, config_defaults):\n for (k, v) in config_defaults.items():\n if k not in config:\n config[k] = v\n return config\n\n def _validate_config(self):\n self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults)\n self.init_config = self._set_config_defaults(self.init_config, init_config_defaults)\n if self.d_model % self.n_heads != 0:\n raise ValueError('d_model must be divisible by n_heads')\n if any((prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop])):\n raise ValueError(\"self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1\")\n if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']:\n raise ValueError(f\"Unknown attn_impl={self.attn_config['attn_impl']}\")\n if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')\n if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('alibi only implemented with torch and triton attention.')\n if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:\n raise NotImplementedError('attn_uses_sequence_id only implemented with torch and triton attention.')\n if self.embedding_fraction > 1 or self.embedding_fraction <= 0:\n raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')\n if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':\n raise ValueError(f\"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.\")\n if self.init_config.get('name', None) is None:\n raise ValueError(f\"self.init_config={self.init_config!r} 'name' needs to be set.\")\n if not self.learned_pos_emb and (not self.attn_config['alibi']):\n raise ValueError(f'Positional information must be provided to the model using either learned_pos_emb or alibi.')" }, { "identifier": "AutoTokenizerForMOD", "path": "step1/llava/model/language_model/mpt/adapt_tokenizer.py", "snippet": "class AutoTokenizerForMOD(AutoTokenizer):\n \"\"\"AutoTokenizer + Adaptation for MOD.\n\n A simple wrapper around AutoTokenizer to make instantiating\n an MOD-adapted tokenizer a bit easier.\n\n MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),\n a padding token, and a property to get the token ids of the\n sentinel tokens.\n \"\"\"\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n \"\"\"See `AutoTokenizer.from_pretrained` docstring.\"\"\"\n tokenizer = super().from_pretrained(*args, **kwargs)\n adapt_tokenizer_for_denoising(tokenizer)\n return tokenizer" }, { "identifier": "adapt_tokenizer_for_denoising", "path": "step1/llava/model/language_model/mpt/adapt_tokenizer.py", "snippet": "def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):\n \"\"\"Adds sentinel tokens and padding token (if missing).\n\n Expands the tokenizer vocabulary to include sentinel tokens\n used in mixture-of-denoiser tasks as well as a padding token.\n\n All added tokens are added as special tokens. No tokens are\n added if sentinel tokens and padding token already exist.\n \"\"\"\n sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]\n tokenizer.add_tokens(sentinels_to_add, special_tokens=True)\n if tokenizer.pad_token is None:\n tokenizer.add_tokens('<pad>', special_tokens=True)\n tokenizer.pad_token = '<pad>'\n assert tokenizer.pad_token_id is not None\n sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])\n _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids\n tokenizer.sentinel_token_ids = _sentinel_token_ids" }, { "identifier": "add_bidirectional_mask_if_missing", "path": "step1/llava/model/language_model/mpt/hf_prefixlm_converter.py", "snippet": "def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):\n \"\"\"Attempts to add bidirectional_mask to batch if missing.\n\n Raises:\n KeyError if bidirectional_mask is missing and can't be inferred\n \"\"\"\n if 'bidirectional_mask' not in batch:\n if batch.get('mode', None) == 'icl_task':\n batch['bidirectional_mask'] = batch['attention_mask'].clone()\n for (i, continuation_indices) in enumerate(batch['continuation_indices']):\n batch['bidirectional_mask'][i, continuation_indices] = 0\n elif 'labels' in batch and 'attention_mask' in batch:\n batch['bidirectional_mask'] = torch.logical_and(torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100)).type_as(batch['attention_mask'])\n else:\n raise KeyError('No bidirectional_mask in batch and not sure how to construct one.')" }, { "identifier": "convert_hf_causal_lm_to_prefix_lm", "path": "step1/llava/model/language_model/mpt/hf_prefixlm_converter.py", "snippet": "def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:\n \"\"\"Converts a HuggingFace Causal LM to a Prefix LM.\n\n Supported HuggingFace model classes:\n - `GPT2LMHeadModel`\n - `GPTNeoForCausalLM`\n - `GPTNeoXForCausalLM`\n - `GPTJForCausalLM`\n - `BloomForCausalLM`\n - `OPTForCausalLM`\n\n Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the\n `generate` method and/or select underlying methods depending on the model class.\n\n These changes preserve the model API, but add a new input to `forward`: \"bidirectional_mask\".\n\n Notes on training:\n To actually train the converted model as a Prefix LM, training batches will need to indicate\n the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.\n\n **This is not a standard input and requires custom layers either within or after your dataloader.**\n\n In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`\n such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.\n That is, the prefix portion of the sequence should not generate any loss. Loss should only be\n generated by the target portion of the sequence.\n\n Notes on `GPTNeoForCausalLM`:\n To simplify the implementation, \"global\" and \"local\" attention layers are handled differently.\n For \"global\" layers, we handle conversion as described above. For \"local\" layers, which use a\n causal attention mask within a restricted local window, we do not alter the masking.\n\n Notes on `forward` method conversion:\n After conversion, the `forward` method will handle a new input, `bidirectional_mask`,\n which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions\n belonging to the prefix (prefix tokens can attend to one another bidirectionally), and\n 0 indicates token positions belonging to the target.\n\n The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing\n causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset\n the causal masks before returning the result.\n\n Notes on `generate` method conversion:\n After conversion, the `generate` method will have the same signature but will internally\n convert all causal masks to be purely bidirectional, call the original `generate` method, and\n (where appropriate) reset the causal masks before returning the result.\n\n This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token\n \"prompt\" passed to `generate` (which is treated as the prefix) and then sequentially generates\n each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one\n another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and\n previously-generated tokens (also as expected in a Prefix LM).\n\n To preserve the API, the original methods are renamed to `_original_forward` and\n `_original_generate`, and replaced with new `forward` and `generate` methods that wrap\n them, respectively. Although implementation details vary by model class.\n \"\"\"\n if isinstance(model, _SUPPORTED_GPT_MODELS):\n return _convert_gpt_causal_lm_to_prefix_lm(model)\n elif isinstance(model, BloomForCausalLM):\n return _convert_bloom_causal_lm_to_prefix_lm(model)\n elif isinstance(model, OPTForCausalLM):\n return _convert_opt_causal_lm_to_prefix_lm(model)\n else:\n raise TypeError(f'Cannot convert model to Prefix LM. ' + f'Model does not belong to set of supported HF models:' + f'\\n{_SUPPORTED_HF_MODELS}')" }, { "identifier": "init_empty_weights", "path": "step1/llava/model/language_model/mpt/meta_init_context.py", "snippet": "@contextmanager\ndef init_empty_weights(include_buffers: bool=False):\n \"\"\"Meta initialization context manager.\n\n A context manager under which models are initialized with all parameters\n on the meta device, therefore creating an empty model. Useful when just\n initializing the model would blow the available RAM.\n\n Args:\n include_buffers (`bool`, *optional*, defaults to `False`): Whether or\n not to also put all buffers on the meta device while initializing.\n\n Example:\n ```python\n import torch.nn as nn\n\n # Initialize a model with 100 billions parameters in no time and without using any RAM.\n with init_empty_weights():\n tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])\n ```\n\n <Tip warning={true}>\n\n Any model created under this context manager has no weights. As such you can't do something like\n `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].\n\n </Tip>\n \"\"\"\n with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:\n yield f" }, { "identifier": "MODEL_INIT_REGISTRY", "path": "step1/llava/model/language_model/mpt/param_init_fns.py", "snippet": "MODEL_INIT_REGISTRY = {'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_}" }, { "identifier": "generic_param_init_fn_", "path": "step1/llava/model/language_model/mpt/param_init_fns.py", "snippet": "def generic_param_init_fn_(module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int]=None, init_div_is_residual: Union[int, float, str, bool]=True, emb_init_std: Optional[float]=None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]]=None, verbose: int=0, **kwargs):\n del kwargs\n if verbose > 1:\n warnings.warn(f'If model has bias parameters they are initialized to 0.')\n init_div_is_residual = init_div_is_residual\n if init_div_is_residual is False:\n div_is_residual = 1.0\n elif init_div_is_residual is True:\n div_is_residual = math.sqrt(2 * n_layers)\n elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):\n div_is_residual = init_div_is_residual\n elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():\n div_is_residual = float(init_div_is_residual)\n else:\n div_is_residual = 1.0\n raise ValueError(f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}')\n if init_div_is_residual is not False:\n if verbose > 1:\n warnings.warn(f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' + f'Set `init_div_is_residual: false` in init config to disable this.')\n if isinstance(module, nn.Linear):\n if hasattr(module, '_fused'):\n fused_init_helper_(module, init_fn_)\n else:\n init_fn_(module.weight)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n if init_div_is_residual is not False and getattr(module, '_is_residual', False):\n with torch.no_grad():\n module.weight.div_(div_is_residual)\n elif isinstance(module, nn.Embedding):\n if emb_init_std is not None:\n std = emb_init_std\n if std == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using normal distribution with mean=0 and std={std!r}.')\n elif emb_init_uniform_lim is not None:\n lim = emb_init_uniform_lim\n if isinstance(lim, Sequence):\n if len(lim) > 2:\n raise ValueError(f'Uniform init requires a min and a max limit. User input: {lim}.')\n if lim[0] == lim[1]:\n warnings.warn(f'Embedding layer initialized to {lim[0]}.')\n else:\n if lim == 0:\n warnings.warn(f'Embedding layer initialized to 0.')\n lim = [-lim, lim]\n (a, b) = lim\n emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)\n if verbose > 1:\n warnings.warn(f'Embedding layer initialized using uniform distribution in range {lim}.')\n else:\n emb_init_fn_ = init_fn_\n emb_init_fn_(module.weight)\n elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):\n if verbose > 1:\n warnings.warn(f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.')\n if hasattr(module, 'weight') and module.weight is not None:\n torch.nn.init.ones_(module.weight)\n if hasattr(module, 'bias') and module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.MultiheadAttention):\n if module._qkv_same_embed_dim:\n assert module.in_proj_weight is not None\n assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)\n assert d_model is not None\n _d = d_model\n splits = (0, _d, 2 * _d, 3 * _d)\n for (s, e) in zip(splits[:-1], splits[1:]):\n init_fn_(module.in_proj_weight[s:e])\n else:\n assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)\n assert module.in_proj_weight is None\n init_fn_(module.q_proj_weight)\n init_fn_(module.k_proj_weight)\n init_fn_(module.v_proj_weight)\n if module.in_proj_bias is not None:\n torch.nn.init.zeros_(module.in_proj_bias)\n if module.bias_k is not None:\n torch.nn.init.zeros_(module.bias_k)\n if module.bias_v is not None:\n torch.nn.init.zeros_(module.bias_v)\n init_fn_(module.out_proj.weight)\n if init_div_is_residual is not False and getattr(module.out_proj, '_is_residual', False):\n with torch.no_grad():\n module.out_proj.weight.div_(div_is_residual)\n if module.out_proj.bias is not None:\n torch.nn.init.zeros_(module.out_proj.bias)\n else:\n for _ in module.parameters(recurse=False):\n raise NotImplementedError(f'{module.__class__.__name__} parameters are not initialized by param_init_fn.')" } ]
import math import warnings import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Optional, Tuple, Union from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from .attention import attn_bias_shape, build_attn_bias from .blocks import MPTBlock from .custom_embedding import SharedEmbedding from .norm import NORM_CLASS_REGISTRY from .configuration_mpt import MPTConfig from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm from .meta_init_context import init_empty_weights from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_ from .flash_attn_triton import flash_attn_func
6,796
"""A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ try: except: pass Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class MPTPreTrainedModel(PreTrainedModel): config_class = MPTConfig base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, config: MPTConfig): config._validate_config() super().__init__(config) self.attn_impl = config.attn_config['attn_impl'] self.prefix_lm = config.attn_config['prefix_lm'] self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id'] self.alibi = config.attn_config['alibi'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] if config.init_device == 'mixed': if dist.get_local_rank() == 0: config.init_device = 'cpu' else: config.init_device = 'meta'
"""A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ try: except: pass Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class MPTPreTrainedModel(PreTrainedModel): config_class = MPTConfig base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, config: MPTConfig): config._validate_config() super().__init__(config) self.attn_impl = config.attn_config['attn_impl'] self.prefix_lm = config.attn_config['prefix_lm'] self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id'] self.alibi = config.attn_config['alibi'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] if config.init_device == 'mixed': if dist.get_local_rank() == 0: config.init_device = 'cpu' else: config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
4
2023-10-27 05:00:14+00:00
8k
Trustworthy-AI-Group/TransferAttack
transferattack/model_related/dhf.py
[ { "identifier": "dhf_inception_v3", "path": "transferattack/model_related/dhf_networks/inception.py", "snippet": "def dhf_inception_v3(mixup_weight_max: float, random_keep_prob: float, dhf_modules = None, weights: Optional[Inception_V3_Weights] = None, progress: bool = True, **kwargs: Any) -> Inception3:\n dhf_model = inception_v3(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = Inception_V3_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model" }, { "identifier": "dhf_inc_res_v2", "path": "transferattack/model_related/dhf_networks/inc_res_v2.py", "snippet": "def dhf_inc_res_v2(mixup_weight_max: float, random_keep_prob: float, dhf_modules = None, weights = None, progress: bool = True, **kwargs: Any):\n dhf_model = timm.create_model('inception_resnet_v2', pretrained=True)\n if dhf_modules is None:\n dhf_modules = Inc_Res_V2_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model" }, { "identifier": "dhf_resnet18", "path": "transferattack/model_related/dhf_networks/resnet.py", "snippet": "def dhf_resnet18(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet18(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet18_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model" }, { "identifier": "dhf_resnet50", "path": "transferattack/model_related/dhf_networks/resnet.py", "snippet": "def dhf_resnet50(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet50(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet50_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model" }, { "identifier": "dhf_resnet101", "path": "transferattack/model_related/dhf_networks/resnet.py", "snippet": "def dhf_resnet101(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet101(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet101_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model" }, { "identifier": "dhf_resnet152", "path": "transferattack/model_related/dhf_networks/resnet.py", "snippet": "def dhf_resnet152(mixup_weight_max: float, random_keep_prob: float, dhf_modules=None, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:\n dhf_model = resnet152(weights, progress, **kwargs)\n if dhf_modules is None:\n dhf_modules = ResNet152_Default_DHF_Modules\n utils.convert_to_DHF_model_inplace_(dhf_model, mixup_weight_max=mixup_weight_max, random_keep_prob=random_keep_prob, dhf_modules=dhf_modules)\n return dhf_model" }, { "identifier": "MIFGSM", "path": "transferattack/gradient/mifgsm.py", "snippet": "class MIFGSM(Attack):\n \"\"\"\n MI-FGSM Attack\n 'Boosting Adversarial Attacks with Momentum (CVPR 2018)'(https://arxiv.org/abs/1710.06081)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n\n Example script:\n python main.py --attack mifgsm --output_dir adv_data/mifgsm/resnet18\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='MI-FGSM', **kwargs):\n super().__init__(attack, model_name, epsilon, targeted, random_start, norm, loss, device)\n self.alpha = alpha\n self.epoch = epoch\n self.decay = decay" }, { "identifier": "NIFGSM", "path": "transferattack/gradient/nifgsm.py", "snippet": "class NIFGSM(MIFGSM):\n \"\"\"\n NI-FGSM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1.\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., targeted=False, random_start=False,\n norm='linfty', loss='crossentropy', device=None, attack='NI-FGSM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n\n def transform(self, x, momentum, **kwargs):\n \"\"\"\n look ahead for NI-FGSM\n \"\"\"\n return x + self.alpha*self.decay*momentum" }, { "identifier": "DIM", "path": "transferattack/input_transformation/dim.py", "snippet": "class DIM(MIFGSM):\n \"\"\"\n DIM Attack\n 'Improving Transferability of Adversarial Examples with Input Diversity (CVPR 2019)'(https://arxiv.org/abs/1803.06978)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n resize_rate (float): the relative size of the resized image\n diversity_prob (float): the probability for transforming the input image\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1, resize_rate=1.1, diversity_prob=0.5\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., resize_rate=1.1, diversity_prob=0.5, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='DIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n if resize_rate < 1:\n raise Exception(\"Error! The resize rate should be larger than 1.\")\n self.resize_rate = resize_rate\n self.diversity_prob = diversity_prob\n \n def transform(self, x, **kwargs):\n \"\"\"\n Random transform the input images\n \"\"\"\n # do not transform the input image\n if torch.rand(1) > self.diversity_prob:\n return x\n \n img_size = x.shape[-1]\n img_resize = int(img_size * self.resize_rate)\n\n # resize the input image to random size\n rnd = torch.randint(low=min(img_size, img_resize), high=max(img_size, img_resize), size=(1,), dtype=torch.int32)\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\n\n # randomly add padding\n h_rem = img_resize - rnd\n w_rem = img_resize - rnd\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\n pad_bottom = h_rem - pad_top\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\n pad_right = w_rem - pad_left\n\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\n\n # resize the image back to img_size\n return F.interpolate(padded, size=[img_size, img_size], mode='bilinear', align_corners=False)" }, { "identifier": "TIM", "path": "transferattack/input_transformation/tim.py", "snippet": "class TIM(MIFGSM):\n \"\"\"\n TIM Attack\n 'Evading Defenses to Transferable Adversarial Examples by Translation-Invariant Attacks (CVPR 2019)'(https://arxiv.org/abs/1904.02884)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n kernel_type (str): the type of kernel (gaussian/uniform/linear).\n kernel_size (int): the size of kernel.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n \n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15\n\n Example script:\n python main.py --attack tim --output_dir adv_data/tim/resnet18\n \"\"\"\n \n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., kernel_type='gaussian', kernel_size=15, targeted=False, \n random_start=False, norm='linfty', loss='crossentropy', device=None, attack='TIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.kernel = self.generate_kernel(kernel_type, kernel_size)\n\n def generate_kernel(self, kernel_type, kernel_size, nsig=3):\n \"\"\"\n Generate the gaussian/uniform/linear kernel\n\n Arguments:\n kernel_type (str): the method for initilizing the kernel\n kernel_size (int): the size of kernel\n \"\"\"\n if kernel_type.lower() == 'gaussian':\n x = np.linspace(-nsig, nsig, kernel_size)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n elif kernel_type.lower() == 'uniform':\n kernel = np.ones((kernel_size, kernel_size)) / (kernel_size ** 2)\n elif kernel_type.lower() == 'linear':\n kern1d = 1 - np.abs(np.linspace((-kernel_size+1)//2, (kernel_size-1)//2, kernel_size)/(kernel_size**2))\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n else:\n raise Exception(\"Unspported kernel type {}\".format(kernel_type))\n \n stack_kernel = np.stack([kernel, kernel, kernel])\n stack_kernel = np.expand_dims(stack_kernel, 1)\n return torch.from_numpy(stack_kernel.astype(np.float32)).to(self.device)\n\n def get_grad(self, loss, delta, **kwargs):\n \"\"\"\n Overridden for TIM attack.\n \"\"\"\n grad = torch.autograd.grad(loss, delta, retain_graph=False, create_graph=False)[0]\n grad = F.conv2d(grad, self.kernel, stride=1, padding='same', groups=3)\n return grad" }, { "identifier": "SIM", "path": "transferattack/input_transformation/sim.py", "snippet": "class SIM(MIFGSM):\n \"\"\"\n SIM Attack\n 'Nesterov Accelerated Gradient and Scale Invariance for Adversarial Attacks (ICLR 2020)'(https://arxiv.org/abs/1908.06281)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='SIM', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n\n def transform(self, x, **kwargs):\n \"\"\"\n Scale the input for SIM\n \"\"\"\n return torch.cat([x / (2**i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale)) if self.targeted else self.loss(logits, label.repeat(self.num_scale))" }, { "identifier": "Admix", "path": "transferattack/input_transformation/admix.py", "snippet": "class Admix(MIFGSM):\n \"\"\"\n Admix Attack\n 'Admix: Enhancing the Transferability of Adversarial Attacks (ICCV 2021)'(https://arxiv.org/abs/2102.00436)\n\n Arguments:\n model_name (str): the name of surrogate model for attack.\n epsilon (float): the perturbation budget.\n alpha (float): the step size.\n epoch (int): the number of iterations.\n decay (float): the decay factor for momentum calculation.\n num_scale (int): the number of scaled copies in each iteration.\n num_admix (int): the number of admixed images in each iteration.\n admix_strength (float): the strength of admixed images.\n targeted (bool): targeted/untargeted attack.\n random_start (bool): whether using random initialization for delta.\n norm (str): the norm of perturbation, l2/linfty.\n loss (str): the loss function.\n device (torch.device): the device for data. If it is None, the device would be same as model\n\n Official arguments:\n epsilon=16/255, alpha=epsilon/epoch=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2\n \"\"\"\n\n def __init__(self, model_name, epsilon=16/255, alpha=1.6/255, epoch=10, decay=1., num_scale=5, num_admix=3, admix_strength=0.2, targeted=False, random_start=False, norm='linfty', loss='crossentropy', device=None, attack='Admix', **kwargs):\n super().__init__(model_name, epsilon, alpha, epoch, decay, targeted, random_start, norm, loss, device, attack)\n self.num_scale = num_scale\n self.num_admix = num_admix\n self.admix_strength = admix_strength\n\n def transform(self, x, **kwargs):\n \"\"\"\n Admix the input for Admix Attack\n \"\"\"\n admix_images = torch.concat([(x + self.admix_strength * x[torch.randperm(x.size(0))].detach()) for _ in range(self.num_admix)], dim=0)\n return torch.concat([admix_images / (2 ** i) for i in range(self.num_scale)])\n\n def get_loss(self, logits, label):\n \"\"\"\n Calculate the loss\n \"\"\"\n return -self.loss(logits, label.repeat(self.num_scale*self.num_admix)) if self.targeted else self.loss(logits, label.repeat(self.num_scale*self.num_admix))" }, { "identifier": "utils", "path": "transferattack/model_related/dhf_networks/utils.py", "snippet": "class DHFUnit(nn.Module):\n def __init__(self, mixup_weight_max=0.2, random_keep_prob=0.9) -> None:\n def set_dhf_params(self, if_dhf: bool, update_mf: bool, dhf_indicator: Tensor):\n def forward(self, x: Tensor):\n def _forward(self, x):\n def uniform_random_like(x, minval, maxval):\ndef get_layer(model, name):\ndef set_layer(model, name, layer):\ndef convert_to_DHF_model_inplace_(model, mixup_weight_max: float, random_keep_prob: float, dhf_modules):\ndef turn_on_dhf_update_mf_setting(model: nn.Module):\ndef trun_off_dhf_update_mf_setting(model: nn.Module):\ndef turn_on_dhf_attack_setting(model: nn.Module, dhf_indicator: Tensor):\ndef preview_model(model: nn.Module):" } ]
from torch import Tensor from ..utils import * from .dhf_networks.inception import dhf_inception_v3 from .dhf_networks.inc_res_v2 import dhf_inc_res_v2 from .dhf_networks.resnet import dhf_resnet18, dhf_resnet50, dhf_resnet101, dhf_resnet152 from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix from .dhf_networks import utils
4,891
# example bash: python main.py --attack=mifgsm_dhf support_models = { "inc_v3": dhf_inception_v3, "inc_res": dhf_inc_res_v2, 'resnet18': dhf_resnet18, "resnet50": dhf_resnet50, "resnet101": dhf_resnet101,
# example bash: python main.py --attack=mifgsm_dhf support_models = { "inc_v3": dhf_inception_v3, "inc_res": dhf_inc_res_v2, 'resnet18': dhf_resnet18, "resnet50": dhf_resnet50, "resnet101": dhf_resnet101,
"resnet152": dhf_resnet152,
5
2023-10-31 03:43:26+00:00
8k
phineas-pta/comfy-trt-test
convert_unet.py
[ { "identifier": "export_onnx", "path": "comfy_trt/exporter.py", "snippet": "def export_onnx(onnx_path: str, modelobj: UNetModel, profile: ProfileSettings, opset: int = 17, disable_optimizations: bool = False):\n\tlogging.info(\"Exporting to ONNX...\")\n\tinputs = modelobj.get_sample_input(profile.bs_opt * 2, profile.h_opt // 8, profile.w_opt // 8, profile.t_opt)\n\tif not os.path.exists(onnx_path):\n\t\t_export_onnx(\n\t\t\tmodelobj.unet,\n\t\t\tinputs,\n\t\t\tonnx_path,\n\t\t\topset,\n\t\t\tmodelobj.get_input_names(),\n\t\t\tmodelobj.get_output_names(),\n\t\t\tmodelobj.get_dynamic_axes(),\n\t\t\tmodelobj.optimize if not disable_optimizations else None,\n\t\t)" }, { "identifier": "export_trt", "path": "comfy_trt/exporter.py", "snippet": "def export_trt(trt_path: str, onnx_path: str, timing_cache: str, profile: dict, use_fp16: bool) -> int:\n\tengine = Engine(trt_path)\n\ts = time.time()\n\tret = engine.build(onnx_path, use_fp16, enable_refit=True, timing_cache=timing_cache, input_profile=[profile])\n\te = time.time()\n\tlogging.info(f\"Time taken to build: {e-s}s\")\n\treturn ret" }, { "identifier": "UNetModel", "path": "comfy_trt/model_helper.py", "snippet": "class UNetModel:\n\tdef __init__(self, unet, version: str, unet_dim: int, embedding_dim: int, text_minlen: int = 77) -> None:\n\t\tsuper().__init__()\n\t\tself.unet = unet\n\t\tself.is_xl = version in [\"SDXL\", \"SDXLRefiner\", \"SSD1B\", \"Segmind_Vega\"]\n\n\t\tself.text_minlen = text_minlen\n\t\tself.embedding_dim = embedding_dim\n\t\tmatch version:\n\t\t\tcase \"SDXL\" | \"SSD1B\" | \"Segmind_Vega\":\n\t\t\t\tself.num_xl_classes = 2816\n\t\t\tcase \"SDXLRefiner\":\n\t\t\t\tself.num_xl_classes = 2560\n\t\t\tcase _:\n\t\t\t\tself.num_xl_classes = 0\n\t\tself.emb_chn = 1280\n\t\tself.in_channels = unet_dim\n\n\t\tself.dyn_axes = {\n\t\t\t\"sample\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n\t\t\t\"encoder_hidden_states\": {0: \"2B\", 1: \"77N\"},\n\t\t\t\"timesteps\": {0: \"2B\"},\n\t\t\t\"latent\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n\t\t\t\"y\": {0: \"2B\"},\n\t\t}\n\n\tdef get_input_names(self) -> list[str]:\n\t\tnames = [\"sample\", \"timesteps\", \"encoder_hidden_states\"]\n\t\tif self.is_xl:\n\t\t\tnames.append(\"y\")\n\t\treturn names\n\n\tdef get_output_names(self) -> list[str]:\n\t\treturn [\"latent\"]\n\n\tdef get_dynamic_axes(self) -> dict:\n\t\tio_names = self.get_input_names() + self.get_output_names()\n\t\treturn {name: self.dyn_axes[name] for name in io_names}\n\n\tdef get_sample_input(\n\t\tself,\n\t\tbatch_size: int,\n\t\tlatent_height: int,\n\t\tlatent_width: int,\n\t\ttext_len: int,\n\t\tdevice: str = \"cuda\",\n\t\tdtype: torch.dtype = torch.float32,\n\t) -> tuple[torch.Tensor]:\n\t\treturn (\n\t\t\ttorch.randn(batch_size, self.in_channels, latent_height, latent_width, dtype=dtype, device=device),\n\t\t\ttorch.randn(batch_size, dtype=dtype, device=device),\n\t\t\ttorch.randn(batch_size, text_len, self.embedding_dim, dtype=dtype, device=device),\n\t\t\ttorch.randn(batch_size, self.num_xl_classes, dtype=dtype, device=device) if self.is_xl else None,\n\t\t)\n\n\tdef get_input_profile(self, profile: ProfileSettings) -> dict:\n\t\tmin_batch, opt_batch, max_batch = profile.get_batch_dim()\n\t\t(\n\t\t\tmin_latent_height, latent_height, max_latent_height,\n\t\t\tmin_latent_width, latent_width, max_latent_width,\n\t\t) = profile.get_latent_dim()\n\n\t\tshape_dict = {\n\t\t\t\"sample\": [\n\t\t\t\t(min_batch, self.in_channels, min_latent_height, min_latent_width),\n\t\t\t\t(opt_batch, self.in_channels, latent_height, latent_width),\n\t\t\t\t(max_batch, self.in_channels, max_latent_height, max_latent_width),\n\t\t\t],\n\t\t\t\"timesteps\": [(min_batch,), (opt_batch,), (max_batch,)],\n\t\t\t\"encoder_hidden_states\": [\n\t\t\t\t(min_batch, profile.t_min, self.embedding_dim),\n\t\t\t\t(opt_batch, profile.t_opt, self.embedding_dim),\n\t\t\t\t(max_batch, profile.t_max, self.embedding_dim),\n\t\t\t],\n\t\t}\n\t\tif self.is_xl:\n\t\t\tshape_dict[\"y\"] = [\n\t\t\t\t(min_batch, self.num_xl_classes),\n\t\t\t\t(opt_batch, self.num_xl_classes),\n\t\t\t\t(max_batch, self.num_xl_classes),\n\t\t\t]\n\n\t\treturn shape_dict\n\n\t# Helper utility for weights map\n\tdef export_weights_map(self, onnx_opt_path: str, weights_map_path: dict):\n\t\tonnx_opt_dir = onnx_opt_path\n\t\tstate_dict = self.unet.state_dict()\n\t\tonnx_opt_model = onnx.load(onnx_opt_path)\n\n\t\t# Create initializer data hashes\n\t\tdef init_hash_map(onnx_opt_model):\n\t\t\tinitializer_hash_mapping = {}\n\t\t\tfor initializer in onnx_opt_model.graph.initializer:\n\t\t\t\tinitializer_data = onnx.numpy_helper.to_array(initializer, base_dir=onnx_opt_dir).astype(np.float16)\n\t\t\t\tinitializer_hash = hash(initializer_data.data.tobytes())\n\t\t\t\tinitializer_hash_mapping[initializer.name] = (initializer_hash, initializer_data.shape)\n\t\t\treturn initializer_hash_mapping\n\n\t\tinitializer_hash_mapping = init_hash_map(onnx_opt_model)\n\n\t\tweights_name_mapping, weights_shape_mapping = {}, {}\n\t\t# set to keep track of initializers already added to the name_mapping dict\n\t\tinitializers_mapped = set()\n\t\tfor wt_name, wt in state_dict.items():\n\t\t\t# get weight hash\n\t\t\twt = wt.cpu().detach().numpy().astype(np.float16)\n\t\t\twt_hash = hash(wt.data.tobytes())\n\t\t\twt_t_hash = hash(np.transpose(wt).data.tobytes())\n\n\t\t\tfor initializer_name, (initializer_hash, initializer_shape) in initializer_hash_mapping.items():\n\t\t\t\t# Due to constant folding, some weights are transposed during export\n\t\t\t\t# To account for the transpose op, we compare the initializer hash to the hash for the weight and its transpose\n\t\t\t\tif wt_hash == initializer_hash or wt_t_hash == initializer_hash:\n\t\t\t\t\t# The assert below ensures there is a 1:1 mapping between PyTorch and ONNX weight names.\n\t\t\t\t\t# It can be removed in cases where 1:many mapping is found and name_mapping[wt_name] = list()\n\t\t\t\t\tassert initializer_name not in initializers_mapped\n\t\t\t\t\tweights_name_mapping[wt_name] = initializer_name\n\t\t\t\t\tinitializers_mapped.add(initializer_name)\n\t\t\t\t\tis_transpose = wt_hash != initializer_hash\n\t\t\t\t\tweights_shape_mapping[wt_name] = (initializer_shape, is_transpose)\n\n\t\t\t# Sanity check: Were any weights not matched\n\t\t\tif wt_name not in weights_name_mapping:\n\t\t\t\tprint(f\"[I] PyTorch weight {wt_name} not matched with any ONNX initializer\")\n\t\tprint(f\"[I] UNet: {len(weights_name_mapping.keys())} PyTorch weights were matched with ONNX initializers\")\n\n\t\tassert weights_name_mapping.keys() == weights_shape_mapping.keys()\n\t\twith open(weights_map_path, \"w\") as fp:\n\t\t\tjson.dump([weights_name_mapping, weights_shape_mapping], fp, indent=\"\\t\")\n\n\t@staticmethod\n\tdef optimize(name, onnx_graph, verbose=False):\n\t\topt = Optimizer(onnx_graph, verbose=verbose)\n\t\topt.info(f\"{name}: original\")\n\t\topt.cleanup()\n\t\topt.info(f\"{name}: cleanup\")\n\t\topt.fold_constants()\n\t\topt.info(f\"{name}: fold constants\")\n\t\topt.infer_shapes()\n\t\topt.info(f\"{name}: shape inference\")\n\t\tonnx_opt_graph = opt.cleanup(return_onnx=True)\n\t\topt.info(f\"{name}: finished\")\n\t\treturn onnx_opt_graph" }, { "identifier": "modelmanager", "path": "comfy_trt/model_manager.py", "snippet": "BASE_PATH = os.path.dirname(os.path.realpath(__file__))\nONNX_MODEL_DIR = os.path.join(BASE_PATH, \"Unet-onnx\")\nTRT_MODEL_DIR = os.path.join(BASE_PATH, \"Unet-trt\")\nMODEL_FILE = os.path.join(TRT_MODEL_DIR, \"model.json\")\ndef get_cc() -> tuple[int]:\n\tdef __init__(self, model_file: str = MODEL_FILE):\n\tdef get_onnx_path(model_name: str) -> tuple[str]:\n\tdef get_trt_path(self, model_name: str, profile: dict, static_shape: bool) -> tuple[str]:\n\tdef get_weights_map_path(self, model_name: str):\n\tdef update(self) -> None:\n\tdef add_entry(\n\t\tself,\n\t\tmodel_name: str,\n\t\tprofile: dict,\n\t\tstatic_shapes: bool,\n\t\tfp32: bool,\n\t\tbaseline_model: str,\n\t\tprediction_type: str,\n\t\tinpaint: bool,\n\t\trefit: bool,\n\t\tunet_hidden_dim: int,\n\t\tlora: bool\n\t) -> None:\n\tdef add_lora_entry(\n\t\tself,\n\t\tbase_model: str,\n\t\tlora_name: str,\n\t\ttrt_lora_path: str,\n\t\tfp32: bool,\n\t\tbaseline_model: str,\n\t\tprediction_type: str,\n\t\tinpaint: bool,\n\t\tunet_hidden_dim: int\n\t) -> None:\n\tdef write_json(self) -> None:\n\tdef read_json(self, encode_config: bool = True) -> dict:\n\tdef available_models(self) -> dict:\n\tdef available_loras(self):\n\tdef get_timing_cache(self) -> str:\n\tdef get_valid_models_from_dict(self, base_model: str, feed_dict: dict) -> tuple[list[bool], list[float]]:\n\tdef get_valid_models(self, base_model: str, width: int, height: int, batch_size: int, max_embedding: int) -> tuple[list[bool], list[float]]:\nclass ModelManager:" }, { "identifier": "ProfileSettings", "path": "comfy_trt/datastructures.py", "snippet": "class ProfileSettings:\n\tbs_min: int # batch size\n\tbs_opt: int\n\tbs_max: int\n\th_min: int # height\n\th_opt: int\n\th_max: int\n\tw_min: int # width\n\tw_opt: int\n\tw_max: int\n\tt_min: int # token count\n\tt_opt: int\n\tt_max: int\n\tstatic_shapes: bool = False\n\n\tdef __str__(self) -> str:\n\t\treturn \"\\n\\t- \".join([\n\t\t\t\"[I] size & shape parameters:\",\n\t\t\tf\"batch size: min={self.bs_min}, opt={self.bs_opt}, max={self.bs_max}\",\n\t\t\tf\"height: min={self.h_min}, opt={self.h_opt}, max={self.h_max}\",\n\t\t\tf\"width: min={self.w_min}, opt={self.w_opt}, max={self.w_max}\",\n\t\t\tf\"token count: min={self.t_min}, opt={self.t_opt}, max={self.t_max}\",\n\t\t])\n\n\tdef out(self) -> tuple[int]:\n\t\treturn (\n\t\t\tself.bs_min, self.bs_opt, self.bs_max,\n\t\t\tself.h_min, self.h_opt, self.h_max,\n\t\t\tself.w_min, self.w_opt, self.w_max,\n\t\t\tself.t_min, self.t_opt, self.t_max,\n\t\t)\n\n\tdef token_to_dim(self) -> None:\n\t\tself.t_min = (self.t_min // 75) * 77\n\t\tself.t_opt = (self.t_opt // 75) * 77\n\t\tself.t_max = (self.t_max // 75) * 77\n\n\t\tif self.static_shapes:\n\t\t\tself.t_min = self.t_max = self.t_opt\n\t\t\tself.bs_min = self.bs_max = self.bs_opt\n\t\t\tself.h_min = self.h_max = self.h_opt\n\t\t\tself.w_min = self.w_max = self.w_opt\n\t\t\tself.static_shapes = True\n\n\tdef get_latent_dim(self) -> tuple[int]:\n\t\treturn (\n\t\t\tself.h_min // 8, self.h_opt // 8, self.h_max // 8,\n\t\t\tself.w_min // 8, self.w_opt // 8, self.w_max // 8,\n\t\t)\n\n\tdef get_batch_dim(self) -> tuple[int]:\n\t\tis_static_batch = self.bs_min == self.bs_max == self.bs_opt\n\t\tif self.t_max <= 77:\n\t\t\treturn (self.bs_min * 2, self.bs_opt * 2, self.bs_max * 2)\n\t\telif self.t_max > 77 and is_static_batch:\n\t\t\treturn (self.bs_opt, self.bs_opt, self.bs_opt)\n\t\telif self.t_max > 77 and not is_static_batch:\n\t\t\tif self.t_opt > 77:\n\t\t\t\treturn (self.bs_min, self.bs_opt, self.bs_max * 2)\n\t\t\telse:\n\t\t\t\treturn (self.bs_min, self.bs_opt * 2, self.bs_max * 2)\n\t\telse:\n\t\t\traise Exception(\"Uncovered case in get_batch_dim\")" } ]
import argparse import sys import os.path import gc import torch from comfy_trt.exporter import export_onnx, export_trt from comfy_trt.model_helper import UNetModel from comfy_trt.model_manager import modelmanager, cc_major from comfy_trt.datastructures import ProfileSettings from comfy.utils import load_torch_file, calculate_parameters from comfy.supported_models import models as LIST_MODELS from comfy.model_detection import detect_unet_config from comfy.model_management import unet_dtype as get_unet_dtype
4,667
# -*- coding: utf-8 -*- # modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/ui_trt.py # CHANGE: remove lora, make script as CLI command # STATUS: ok i guess sys.path.append(os.path.join("..", "..")) def parseArgs(): parser = argparse.ArgumentParser(description="test: convert Stable Diffusion checkpoint to TensorRT engine") parser.add_argument("--ckpt_path", required=True) parser.add_argument("--output_name", help=".onnx & .trt file name, default to ckpt file name") parser.add_argument("--batch_min", type=int, default=1, help="default 1") parser.add_argument("--batch_opt", type=int, default=1, help="default 1") parser.add_argument("--batch_max", type=int, default=1, help="limit 16") parser.add_argument("--height_min", type=int, help="default 768 if sdxl else 512, limit 256") parser.add_argument("--height_opt", type=int, help="default 1024 if sdxl else 512") parser.add_argument("--height_max", type=int, help="default 1024 if sdxl else 768, limit 4096") parser.add_argument("--width_min", type=int, help="default 768 if sdxl else 512, limit 256") parser.add_argument("--width_opt", type=int, help="default 768 if sdxl else 512") parser.add_argument("--width_max", type=int, help="default 1024 if sdxl else 768, limit 4096") parser.add_argument("--token_count_min", type=int, default=75, help="default 75, cannot go lower") parser.add_argument("--token_count_opt", type=int, default=75, help="default 75") parser.add_argument("--token_count_max", type=int, default=150, help="default 150, limit 750") parser.add_argument("--force_export", action="store_true") parser.add_argument("--static_shapes", action="store_true", help="may cause weird error (?) if enable") parser.add_argument("--float32", action="store_true") return parser.parse_args() def get_config_from_checkpoint(ckpt_path: str) -> dict: """see comfy/sd.py >>> load_checkpoint_guess_config""" tmp0 = "model.diffusion_model." sd = load_torch_file(ckpt_path) parameters = calculate_parameters(sd, tmp0) unet_dtype = get_unet_dtype(model_params=parameters) unet_config = detect_unet_config(sd, tmp0, unet_dtype) for model_config in LIST_MODELS: if model_config.matches(unet_config): tmp1 = model_config(unet_config) model = tmp1.get_model(sd, tmp0, device="cuda") model.load_model_weights(sd, tmp0) return { "model": model.diffusion_model, "baseline_model": model_config.__qualname__, "prediction_type": str(model.model_type), "unet_hidden_dim": unet_config["in_channels"], "embedding_dim": unet_config["context_dim"], } if __name__ == "__main__": args = parseArgs() ckpt_config = get_config_from_checkpoint(args.ckpt_path) if cc_major < 7: args.float32 = True print("FP16 has been disabled because your GPU does not support it.") baseline_model = ckpt_config["baseline_model"] print(f"\ndetected baseline model version: {baseline_model}") is_sdxl = baseline_model in ["SDXL", "SDXLRefiner", "SSD1B", "Segmind_Vega"] # re-used later if is_sdxl: if args.height_min is None: args.height_min = 768 if args.height_opt is None: args.height_opt = 1024 if args.height_max is None: args.height_max = 1024 if args.width_min is None: args.width_min = 768 if args.width_opt is None: args.width_opt = 1024 if args.width_max is None: args.width_max = 1024 elif baseline_model in ["SD15", "SD20", "SD21UnclipL", "SD21UnclipH"]: if args.height_min is None: args.height_min = 512 if args.height_opt is None: args.height_opt = 512 if args.height_max is None: args.height_max = 768 if args.width_min is None: args.width_min = 512 if args.width_opt is None: args.width_opt = 512 if args.width_max is None: args.width_max = 768 else: # ["SVD_img2vid", "Stable_Zero123", "SD_X4Upscaler"] raise ValueError(f"{baseline_model} not yet supported") if args.height_min % 64 != 0 or args.height_opt % 64 != 0 or args.height_max % 64 != 0 or args.width_min % 64 != 0 or args.width_opt % 64 != 0 or args.width_max % 64 != 0: raise ValueError("height and width must be divisible by 64") if not (args.height_min <= args.height_opt <= args.height_max and args.width_min <= args.width_opt <= args.width_max): raise ValueError("need min ≤ opt ≤ max") if args.height_min < 256 or args.height_max > 4096 or args.width_min < 256 or args.width_max > 4096: raise ValueError("height and width out of limit") ckpt_file = os.path.basename(args.ckpt_path) if args.output_name is None: # default to ckpt file name args.output_name = os.path.splitext(ckpt_file)[0] onnx_filename, onnx_path = modelmanager.get_onnx_path(args.output_name) print(f"Exporting {ckpt_file} to TensorRT") timing_cache = modelmanager.get_timing_cache() profile_settings = ProfileSettings( args.batch_min, args.batch_opt, args.batch_max, args.height_min, args.height_opt, args.height_max, args.width_min, args.width_opt, args.width_max, args.token_count_min, args.token_count_opt, args.token_count_max, args.static_shapes ) print(profile_settings, end="\n\n") profile_settings.token_to_dim()
# -*- coding: utf-8 -*- # modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/ui_trt.py # CHANGE: remove lora, make script as CLI command # STATUS: ok i guess sys.path.append(os.path.join("..", "..")) def parseArgs(): parser = argparse.ArgumentParser(description="test: convert Stable Diffusion checkpoint to TensorRT engine") parser.add_argument("--ckpt_path", required=True) parser.add_argument("--output_name", help=".onnx & .trt file name, default to ckpt file name") parser.add_argument("--batch_min", type=int, default=1, help="default 1") parser.add_argument("--batch_opt", type=int, default=1, help="default 1") parser.add_argument("--batch_max", type=int, default=1, help="limit 16") parser.add_argument("--height_min", type=int, help="default 768 if sdxl else 512, limit 256") parser.add_argument("--height_opt", type=int, help="default 1024 if sdxl else 512") parser.add_argument("--height_max", type=int, help="default 1024 if sdxl else 768, limit 4096") parser.add_argument("--width_min", type=int, help="default 768 if sdxl else 512, limit 256") parser.add_argument("--width_opt", type=int, help="default 768 if sdxl else 512") parser.add_argument("--width_max", type=int, help="default 1024 if sdxl else 768, limit 4096") parser.add_argument("--token_count_min", type=int, default=75, help="default 75, cannot go lower") parser.add_argument("--token_count_opt", type=int, default=75, help="default 75") parser.add_argument("--token_count_max", type=int, default=150, help="default 150, limit 750") parser.add_argument("--force_export", action="store_true") parser.add_argument("--static_shapes", action="store_true", help="may cause weird error (?) if enable") parser.add_argument("--float32", action="store_true") return parser.parse_args() def get_config_from_checkpoint(ckpt_path: str) -> dict: """see comfy/sd.py >>> load_checkpoint_guess_config""" tmp0 = "model.diffusion_model." sd = load_torch_file(ckpt_path) parameters = calculate_parameters(sd, tmp0) unet_dtype = get_unet_dtype(model_params=parameters) unet_config = detect_unet_config(sd, tmp0, unet_dtype) for model_config in LIST_MODELS: if model_config.matches(unet_config): tmp1 = model_config(unet_config) model = tmp1.get_model(sd, tmp0, device="cuda") model.load_model_weights(sd, tmp0) return { "model": model.diffusion_model, "baseline_model": model_config.__qualname__, "prediction_type": str(model.model_type), "unet_hidden_dim": unet_config["in_channels"], "embedding_dim": unet_config["context_dim"], } if __name__ == "__main__": args = parseArgs() ckpt_config = get_config_from_checkpoint(args.ckpt_path) if cc_major < 7: args.float32 = True print("FP16 has been disabled because your GPU does not support it.") baseline_model = ckpt_config["baseline_model"] print(f"\ndetected baseline model version: {baseline_model}") is_sdxl = baseline_model in ["SDXL", "SDXLRefiner", "SSD1B", "Segmind_Vega"] # re-used later if is_sdxl: if args.height_min is None: args.height_min = 768 if args.height_opt is None: args.height_opt = 1024 if args.height_max is None: args.height_max = 1024 if args.width_min is None: args.width_min = 768 if args.width_opt is None: args.width_opt = 1024 if args.width_max is None: args.width_max = 1024 elif baseline_model in ["SD15", "SD20", "SD21UnclipL", "SD21UnclipH"]: if args.height_min is None: args.height_min = 512 if args.height_opt is None: args.height_opt = 512 if args.height_max is None: args.height_max = 768 if args.width_min is None: args.width_min = 512 if args.width_opt is None: args.width_opt = 512 if args.width_max is None: args.width_max = 768 else: # ["SVD_img2vid", "Stable_Zero123", "SD_X4Upscaler"] raise ValueError(f"{baseline_model} not yet supported") if args.height_min % 64 != 0 or args.height_opt % 64 != 0 or args.height_max % 64 != 0 or args.width_min % 64 != 0 or args.width_opt % 64 != 0 or args.width_max % 64 != 0: raise ValueError("height and width must be divisible by 64") if not (args.height_min <= args.height_opt <= args.height_max and args.width_min <= args.width_opt <= args.width_max): raise ValueError("need min ≤ opt ≤ max") if args.height_min < 256 or args.height_max > 4096 or args.width_min < 256 or args.width_max > 4096: raise ValueError("height and width out of limit") ckpt_file = os.path.basename(args.ckpt_path) if args.output_name is None: # default to ckpt file name args.output_name = os.path.splitext(ckpt_file)[0] onnx_filename, onnx_path = modelmanager.get_onnx_path(args.output_name) print(f"Exporting {ckpt_file} to TensorRT") timing_cache = modelmanager.get_timing_cache() profile_settings = ProfileSettings( args.batch_min, args.batch_opt, args.batch_max, args.height_min, args.height_opt, args.height_max, args.width_min, args.width_opt, args.width_max, args.token_count_min, args.token_count_opt, args.token_count_max, args.static_shapes ) print(profile_settings, end="\n\n") profile_settings.token_to_dim()
modelobj = UNetModel(
2
2023-10-25 23:58:12+00:00
8k
hydrogram/hydrogram
hydrogram/dispatcher.py
[ { "identifier": "utils", "path": "hydrogram/utils.py", "snippet": "async def ainput(prompt: str = \"\", *, hide: bool = False):\ndef get_input_media_from_file_id(\n file_id: str, expected_file_type: FileType = None, ttl_seconds: Optional[int] = None\n) -> Union[\"raw.types.InputMediaPhoto\", \"raw.types.InputMediaDocument\"]:\nasync def parse_messages(\n client, messages: \"raw.types.messages.Messages\", replies: int = 1\n) -> list[\"types.Message\"]:\ndef parse_deleted_messages(client, update) -> list[\"types.Message\"]:\ndef pack_inline_message_id(msg_id: \"raw.base.InputBotInlineMessageID\"):\ndef unpack_inline_message_id(inline_message_id: str) -> \"raw.base.InputBotInlineMessageID\":\ndef get_raw_peer_id(peer: raw.base.Peer) -> Optional[int]:\ndef get_peer_id(peer: raw.base.Peer) -> int:\ndef get_peer_type(peer_id: int) -> str:\ndef get_channel_id(peer_id: int) -> int:\ndef btoi(b: bytes) -> int:\ndef itob(i: int) -> bytes:\ndef sha256(data: bytes) -> bytes:\ndef xor(a: bytes, b: bytes) -> bytes:\ndef compute_password_hash(\n algo: raw.types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,\n password: str,\n) -> bytes:\ndef compute_password_check(\n r: raw.types.account.Password, password: str\n) -> raw.types.InputCheckPasswordSRP:\nasync def parse_text_entities(\n client: \"hydrogram.Client\",\n text: str,\n parse_mode: enums.ParseMode,\n entities: list[\"types.MessageEntity\"],\n) -> dict[str, Union[str, list[raw.base.MessageEntity]]]:\ndef zero_datetime() -> datetime:\ndef timestamp_to_datetime(ts: Optional[int]) -> Optional[datetime]:\ndef datetime_to_timestamp(dt: Optional[datetime]) -> Optional[int]:\ndef get_reply_head_fm(\n message_thread_id: int, reply_to_message_id: int\n) -> raw.types.InputReplyToMessage:\nMIN_CHANNEL_ID = -1002147483647\nMAX_CHANNEL_ID = -1000000000000\nMIN_CHAT_ID = -2147483647\nMAX_USER_ID_OLD = 2147483647\nMAX_USER_ID = 999999999999\n B = btoi(B_bytes)\n A = pow(g, a, p)\n S = pow(g_b, a_ux, p)" }, { "identifier": "CallbackQueryHandler", "path": "hydrogram/handlers/callback_query_handler.py", "snippet": "class CallbackQueryHandler(Handler):\n \"\"\"The CallbackQuery handler class. Used to handle callback queries coming from inline buttons.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_callback_query` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new CallbackQuery arrives. It takes *(client, callback_query)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of callback queries to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n callback_query (:obj:`~hydrogram.types.CallbackQuery`):\n The received callback query.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n self.original_callback = callback\n super().__init__(self.resolve_future_or_callback, filters)\n\n def compose_data_identifier(self, query: CallbackQuery) -> Identifier:\n \"\"\"\n Composes an Identifier object from a CallbackQuery object.\n\n Parameters:\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to compose of.\n\n Returns:\n :obj:`~hydrogram.types.Identifier`: An Identifier object.\n \"\"\"\n from_user = query.from_user\n from_user_id = from_user.id if from_user else None\n from_user_username = from_user.username if from_user else None\n\n chat_id = None\n message_id = None\n\n if query.message:\n message_id = getattr(query.message, \"id\", getattr(query.message, \"message_id\", None))\n\n if query.message.chat:\n chat_id = [query.message.chat.id, query.message.chat.username]\n\n return Identifier(\n message_id=message_id,\n chat_id=chat_id,\n from_user_id=[from_user_id, from_user_username],\n inline_message_id=query.inline_message_id,\n )\n\n async def check_if_has_matching_listener(\n self, client: \"hydrogram.Client\", query: CallbackQuery\n ) -> tuple[bool, Optional[Listener]]:\n \"\"\"\n Checks if the CallbackQuery object has a matching listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to check with.\n\n Returns:\n A tuple of whether the CallbackQuery object has a matching listener and its filters does match with the\n CallbackQuery and the matching listener;\n \"\"\"\n data = self.compose_data_identifier(query)\n\n listener = client.get_listener_matching_with_data(data, ListenerTypes.CALLBACK_QUERY)\n\n listener_does_match = False\n\n if listener:\n filters = listener.filters\n if callable(filters):\n if iscoroutinefunction(filters.__call__):\n listener_does_match = await filters(client, query)\n else:\n listener_does_match = await client.loop.run_in_executor(\n None, filters, client, query\n )\n else:\n listener_does_match = True\n\n return listener_does_match, listener\n\n async def check(self, client: \"hydrogram.Client\", query: CallbackQuery) -> bool:\n \"\"\"\n Checks if the CallbackQuery object has a matching listener or handler.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to check with.\n\n Returns:\n ``bool``: A boolean indicating whether the CallbackQuery object has a matching listener or the handler filter matches.\n \"\"\"\n listener_does_match, listener = await self.check_if_has_matching_listener(client, query)\n\n if callable(self.filters):\n if iscoroutinefunction(self.filters.__call__):\n handler_does_match = await self.filters(client, query)\n else:\n handler_does_match = await client.loop.run_in_executor(\n None, self.filters, client, query\n )\n else:\n handler_does_match = True\n\n data = self.compose_data_identifier(query)\n\n if PyromodConfig.unallowed_click_alert:\n # matches with the current query but from any user\n permissive_identifier = Identifier(\n chat_id=data.chat_id,\n message_id=data.message_id,\n inline_message_id=data.inline_message_id,\n from_user_id=None,\n )\n\n matches = permissive_identifier.matches(data)\n\n if (\n listener\n and (matches and not listener_does_match)\n and listener.unallowed_click_alert\n ):\n alert = (\n listener.unallowed_click_alert\n if isinstance(listener.unallowed_click_alert, str)\n else PyromodConfig.unallowed_click_alert_text\n )\n await query.answer(alert)\n return False\n\n # let handler get the chance to handle if listener\n # exists but its filters doesn't match\n return listener_does_match or handler_does_match\n\n async def resolve_future_or_callback(\n self, client: \"hydrogram.Client\", query: CallbackQuery, *args\n ) -> None:\n \"\"\"\n Resolves the future or calls the callback of the listener. Will call the original handler if no listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to resolve or call with.\n\n query (:obj:`~hydrogram.types.CallbackQuery`):\n The CallbackQuery object to resolve or call with.\n\n args:\n The arguments to call the callback with.\n\n Returns:\n ``None``\n \"\"\"\n listener_does_match, listener = await self.check_if_has_matching_listener(client, query)\n\n if listener and listener_does_match:\n client.remove_listener(listener)\n\n if listener.future and not listener.future.done():\n listener.future.set_result(query)\n\n raise hydrogram.StopPropagation\n if listener.callback:\n if iscoroutinefunction(listener.callback):\n await listener.callback(client, query, *args)\n else:\n listener.callback(client, query, *args)\n\n raise hydrogram.StopPropagation\n\n raise ValueError(\"Listener must have either a future or a callback\")\n\n await self.original_callback(client, query, *args)" }, { "identifier": "ChatJoinRequestHandler", "path": "hydrogram/handlers/chat_join_request_handler.py", "snippet": "class ChatJoinRequestHandler(Handler):\n \"\"\"The ChatJoinRequest handler class. Used to handle join chat requests.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`.\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_chat_join_request` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new ChatJoinRequest event arrives. It takes\n *(client, chat_join_request)* as positional arguments (look at the section below for a detailed\n description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of updates to be passed in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the handler.\n\n chat_join_request (:obj:`~hydrogram.types.ChatJoinRequest`):\n The received chat join request.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)" }, { "identifier": "ChatMemberUpdatedHandler", "path": "hydrogram/handlers/chat_member_updated_handler.py", "snippet": "class ChatMemberUpdatedHandler(Handler):\n \"\"\"The ChatMemberUpdated handler class. Used to handle changes in the status of a chat member.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`.\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_chat_member_updated` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new ChatMemberUpdated event arrives. It takes\n *(client, chat_member_updated)* as positional arguments (look at the section below for a detailed\n description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of updates to be passed in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the handler.\n\n chat_member_updated (:obj:`~hydrogram.types.ChatMemberUpdated`):\n The received chat member update.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)" }, { "identifier": "ChosenInlineResultHandler", "path": "hydrogram/handlers/chosen_inline_result_handler.py", "snippet": "class ChosenInlineResultHandler(Handler):\n \"\"\"The ChosenInlineResultHandler handler class. Used to handle chosen inline results coming from inline queries.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_chosen_inline_result` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new chosen inline result arrives.\n It takes *(client, chosen_inline_result)* as positional arguments (look at the section below for a\n detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of chosen inline results to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n chosen_inline_result (:obj:`~hydrogram.types.ChosenInlineResult`):\n The received chosen inline result.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)" }, { "identifier": "DeletedMessagesHandler", "path": "hydrogram/handlers/deleted_messages_handler.py", "snippet": "class DeletedMessagesHandler(Handler):\n \"\"\"The deleted messages handler class. Used to handle deleted messages coming from any chat\n (private, group, channel). It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_deleted_messages` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when one or more messages have been deleted.\n It takes *(client, messages)* as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of messages to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n messages (List of :obj:`~hydrogram.types.Message`):\n The deleted messages, as list.\n \"\"\"\n\n def __init__(self, callback: Callable, filters: Filter = None):\n super().__init__(callback, filters)\n\n async def check(self, client: \"hydrogram.Client\", messages: list[Message]):\n # Every message should be checked, if at least one matches the filter True is returned\n # otherwise, or if the list is empty, False is returned\n for message in messages:\n if await super().check(client, message):\n return True\n return False" }, { "identifier": "EditedMessageHandler", "path": "hydrogram/handlers/edited_message_handler.py", "snippet": "class EditedMessageHandler(Handler):\n \"\"\"The EditedMessage handler class. Used to handle edited messages.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_edited_message` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new edited message arrives. It takes *(client, message)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of messages to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n edited_message (:obj:`~hydrogram.types.Message`):\n The received edited message.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)" }, { "identifier": "InlineQueryHandler", "path": "hydrogram/handlers/inline_query_handler.py", "snippet": "class InlineQueryHandler(Handler):\n \"\"\"The InlineQuery handler class. Used to handle inline queries.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_inline_query` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new InlineQuery arrives. It takes *(client, inline_query)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of inline queries to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the inline query handler.\n\n inline_query (:obj:`~hydrogram.types.InlineQuery`):\n The received inline query.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)" }, { "identifier": "MessageHandler", "path": "hydrogram/handlers/message_handler.py", "snippet": "class MessageHandler(Handler):\n \"\"\"The Message handler class. Used to handle new messages.\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_message` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new Message arrives. It takes *(client, message)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of messages to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the message handler.\n\n message (:obj:`~hydrogram.types.Message`):\n The received message.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n self.original_callback = callback\n super().__init__(self.resolve_future_or_callback, filters)\n\n async def check_if_has_matching_listener(\n self, client: \"hydrogram.Client\", message: Message\n ) -> tuple[bool, Optional[Listener]]:\n \"\"\"\n Checks if the message has a matching listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n message (:obj:`~hydrogram.types.Message`):\n The Message object to check with.\n\n Returns:\n ``tuple``: A tuple of two elements, the first one is whether the message has a matching listener or not,\n the second one is the matching listener if exists.\n \"\"\"\n from_user = message.from_user\n from_user_id = from_user.id if from_user else None\n from_user_username = from_user.username if from_user else None\n\n message_id = getattr(message, \"id\", getattr(message, \"message_id\", None))\n\n data = Identifier(\n message_id=message_id,\n chat_id=[message.chat.id, message.chat.username],\n from_user_id=[from_user_id, from_user_username],\n )\n\n listener = client.get_listener_matching_with_data(data, ListenerTypes.MESSAGE)\n\n listener_does_match = False\n\n if listener:\n filters = listener.filters\n if callable(filters):\n if iscoroutinefunction(filters.__call__):\n listener_does_match = await filters(client, message)\n else:\n listener_does_match = await client.loop.run_in_executor(\n None, filters, client, message\n )\n else:\n listener_does_match = True\n\n return listener_does_match, listener\n\n async def check(self, client: \"hydrogram.Client\", message: Message) -> bool:\n \"\"\"\n Checks if the message has a matching listener or handler and its filters does match with the Message.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to check with.\n\n message (:obj:`~hydrogram.types.Message`):\n The Message object to check with.\n\n Returns:\n ``bool``: Whether the message has a matching listener or handler and its filters does match with the Message.\n \"\"\"\n listener_does_match = (await self.check_if_has_matching_listener(client, message))[0]\n\n if callable(self.filters):\n if iscoroutinefunction(self.filters.__call__):\n handler_does_match = await self.filters(client, message)\n else:\n handler_does_match = await client.loop.run_in_executor(\n None, self.filters, client, message\n )\n else:\n handler_does_match = True\n\n # let handler get the chance to handle if listener\n # exists but its filters doesn't match\n return listener_does_match or handler_does_match\n\n async def resolve_future_or_callback(\n self, client: \"hydrogram.Client\", message: Message, *args\n ):\n \"\"\"\n Resolves the future or calls the callback of the listener if the message has a matching listener.\n\n Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client object to resolve or call with.\n\n message (:obj:`~hydrogram.types.Message`):\n The Message object to resolve or call with.\n\n args (``tuple``):\n Arguments to call the callback with.\n \"\"\"\n listener_does_match, listener = await self.check_if_has_matching_listener(client, message)\n\n if listener and listener_does_match:\n client.remove_listener(listener)\n\n if listener.future and not listener.future.done():\n listener.future.set_result(message)\n\n raise hydrogram.StopPropagation\n if listener.callback:\n if iscoroutinefunction(listener.callback):\n await listener.callback(client, message, *args)\n else:\n listener.callback(client, message, *args)\n\n raise hydrogram.StopPropagation\n\n raise ValueError(\"Listener must have either a future or a callback\")\n\n await self.original_callback(client, message, *args)" }, { "identifier": "PollHandler", "path": "hydrogram/handlers/poll_handler.py", "snippet": "class PollHandler(Handler):\n \"\"\"The Poll handler class. Used to handle polls updates.\n\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_poll` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new poll update arrives. It takes *(client, poll)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of polls to be passed\n in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the poll handler.\n\n poll (:obj:`~hydrogram.types.Poll`):\n The received poll.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)" }, { "identifier": "RawUpdateHandler", "path": "hydrogram/handlers/raw_update_handler.py", "snippet": "class RawUpdateHandler(Handler):\n \"\"\"The Raw Update handler class. Used to handle raw updates. It is intended to be used with\n :meth:`~hydrogram.Client.add_handler`\n\n For a nicer way to register this handler, have a look at the\n :meth:`~hydrogram.Client.on_raw_update` decorator.\n\n Parameters:\n callback (``Callable``):\n A function that will be called when a new update is received from the server. It takes\n *(client, update, users, chats)* as positional arguments (look at the section below for\n a detailed description).\n\n Other Parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the update handler.\n\n update (``Update``):\n The received update, which can be one of the many single Updates listed in the\n :obj:`~hydrogram.raw.base.Update` base type.\n\n users (``dict``):\n Dictionary of all :obj:`~hydrogram.types.User` mentioned in the update.\n You can access extra info about the user (such as *first_name*, *last_name*, etc...) by using\n the IDs you find in the *update* argument (e.g.: *users[1768841572]*).\n\n chats (``dict``):\n Dictionary of all :obj:`~hydrogram.types.Chat` and\n :obj:`~hydrogram.raw.types.Channel` mentioned in the update.\n You can access extra info about the chat (such as *title*, *participants_count*, etc...)\n by using the IDs you find in the *update* argument (e.g.: *chats[1701277281]*).\n\n Note:\n The following Empty or Forbidden types may exist inside the *users* and *chats* dictionaries.\n They mean you have been blocked by the user or banned from the group/channel.\n\n - :obj:`~hydrogram.raw.types.UserEmpty`\n - :obj:`~hydrogram.raw.types.ChatEmpty`\n - :obj:`~hydrogram.raw.types.ChatForbidden`\n - :obj:`~hydrogram.raw.types.ChannelForbidden`\n \"\"\"\n\n def __init__(self, callback: Callable):\n super().__init__(callback)" }, { "identifier": "UserStatusHandler", "path": "hydrogram/handlers/user_status_handler.py", "snippet": "class UserStatusHandler(Handler):\n \"\"\"The UserStatus handler class. Used to handle user status updates (user going online or offline).\n It is intended to be used with :meth:`~hydrogram.Client.add_handler`.\n\n For a nicer way to register this handler, have a look at the :meth:`~hydrogram.Client.on_user_status` decorator.\n\n Parameters:\n callback (``Callable``):\n Pass a function that will be called when a new user status update arrives. It takes *(client, user)*\n as positional arguments (look at the section below for a detailed description).\n\n filters (:obj:`Filters`):\n Pass one or more filters to allow only a subset of users to be passed in your callback function.\n\n Other parameters:\n client (:obj:`~hydrogram.Client`):\n The Client itself, useful when you want to call other API methods inside the user status handler.\n\n user (:obj:`~hydrogram.types.User`):\n The user containing the updated status.\n \"\"\"\n\n def __init__(self, callback: Callable, filters=None):\n super().__init__(callback, filters)" } ]
import asyncio import inspect import logging import hydrogram from collections import OrderedDict from hydrogram import utils from hydrogram.handlers import ( CallbackQueryHandler, ChatJoinRequestHandler, ChatMemberUpdatedHandler, ChosenInlineResultHandler, DeletedMessagesHandler, EditedMessageHandler, InlineQueryHandler, MessageHandler, PollHandler, RawUpdateHandler, UserStatusHandler, ) from hydrogram.raw.types import ( UpdateBotCallbackQuery, UpdateBotChatInviteRequester, UpdateBotInlineQuery, UpdateBotInlineSend, UpdateChannelParticipant, UpdateChatParticipant, UpdateDeleteChannelMessages, UpdateDeleteMessages, UpdateEditChannelMessage, UpdateEditMessage, UpdateInlineBotCallbackQuery, UpdateMessagePoll, UpdateNewChannelMessage, UpdateNewMessage, UpdateNewScheduledMessage, UpdateUserStatus, )
6,938
# Hydrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2023 Dan <https://github.com/delivrance> # Copyright (C) 2023-present Hydrogram <https://hydrogram.org> # # This file is part of Hydrogram. # # Hydrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Hydrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Hydrogram. If not, see <http://www.gnu.org/licenses/>. log = logging.getLogger(__name__) class Dispatcher: NEW_MESSAGE_UPDATES = ( UpdateNewMessage, UpdateNewChannelMessage, UpdateNewScheduledMessage, ) EDIT_MESSAGE_UPDATES = (UpdateEditMessage, UpdateEditChannelMessage) DELETE_MESSAGES_UPDATES = (UpdateDeleteMessages, UpdateDeleteChannelMessages) CALLBACK_QUERY_UPDATES = (UpdateBotCallbackQuery, UpdateInlineBotCallbackQuery) CHAT_MEMBER_UPDATES = (UpdateChatParticipant, UpdateChannelParticipant) USER_STATUS_UPDATES = (UpdateUserStatus,) BOT_INLINE_QUERY_UPDATES = (UpdateBotInlineQuery,) POLL_UPDATES = (UpdateMessagePoll,) CHOSEN_INLINE_RESULT_UPDATES = (UpdateBotInlineSend,) CHAT_JOIN_REQUEST_UPDATES = (UpdateBotChatInviteRequester,) def __init__(self, client: "hydrogram.Client"): self.client = client self.loop = asyncio.get_event_loop() self.handler_worker_tasks = [] self.locks_list = [] self.updates_queue = asyncio.Queue() self.groups = OrderedDict() async def message_parser(update, users, chats): return ( await hydrogram.types.Message._parse( client=self.client, message=update.message, users=users, chats=chats, is_scheduled=isinstance(update, UpdateNewScheduledMessage), ), MessageHandler, ) async def edited_message_parser(update, users, chats): # Edited messages are parsed the same way as new messages, but the handler is different parsed, _ = await message_parser(update, users, chats) return (parsed, EditedMessageHandler) async def deleted_messages_parser(update, users, chats): return (
# Hydrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2023 Dan <https://github.com/delivrance> # Copyright (C) 2023-present Hydrogram <https://hydrogram.org> # # This file is part of Hydrogram. # # Hydrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Hydrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Hydrogram. If not, see <http://www.gnu.org/licenses/>. log = logging.getLogger(__name__) class Dispatcher: NEW_MESSAGE_UPDATES = ( UpdateNewMessage, UpdateNewChannelMessage, UpdateNewScheduledMessage, ) EDIT_MESSAGE_UPDATES = (UpdateEditMessage, UpdateEditChannelMessage) DELETE_MESSAGES_UPDATES = (UpdateDeleteMessages, UpdateDeleteChannelMessages) CALLBACK_QUERY_UPDATES = (UpdateBotCallbackQuery, UpdateInlineBotCallbackQuery) CHAT_MEMBER_UPDATES = (UpdateChatParticipant, UpdateChannelParticipant) USER_STATUS_UPDATES = (UpdateUserStatus,) BOT_INLINE_QUERY_UPDATES = (UpdateBotInlineQuery,) POLL_UPDATES = (UpdateMessagePoll,) CHOSEN_INLINE_RESULT_UPDATES = (UpdateBotInlineSend,) CHAT_JOIN_REQUEST_UPDATES = (UpdateBotChatInviteRequester,) def __init__(self, client: "hydrogram.Client"): self.client = client self.loop = asyncio.get_event_loop() self.handler_worker_tasks = [] self.locks_list = [] self.updates_queue = asyncio.Queue() self.groups = OrderedDict() async def message_parser(update, users, chats): return ( await hydrogram.types.Message._parse( client=self.client, message=update.message, users=users, chats=chats, is_scheduled=isinstance(update, UpdateNewScheduledMessage), ), MessageHandler, ) async def edited_message_parser(update, users, chats): # Edited messages are parsed the same way as new messages, but the handler is different parsed, _ = await message_parser(update, users, chats) return (parsed, EditedMessageHandler) async def deleted_messages_parser(update, users, chats): return (
utils.parse_deleted_messages(self.client, update),
0
2023-10-29 16:16:37+00:00
8k
iwatake2222/rotop
src/rotop/gui_main.py
[ { "identifier": "DataContainer", "path": "src/rotop/data_container.py", "snippet": "class DataContainer:\n MAX_ROW_CSV = 600\n MAX_NUM_HISTORY = 100\n\n def __init__(self, write_csv=False):\n now = datetime.datetime.now()\n if write_csv:\n self.csv_dir_name = now.strftime('./rotop_%Y%m%d_%H%M%S')\n os.mkdir(self.csv_dir_name)\n else:\n self.csv_dir_name = None\n self.csv_index = 0\n self.df_cpu = pd.DataFrame()\n self.df_mem = pd.DataFrame()\n self.df_cpu_history = pd.DataFrame()\n self.df_mem_history = pd.DataFrame()\n\n def run(self, top_runner: TopRunner, lines: list[str], num_process: int):\n if top_runner.col_range_command and top_runner.col_range_command[0] > 0:\n df_cpu_current, df_mem_current = self.create_df_from_top(top_runner, lines, num_process)\n self.df_cpu = pd.concat([self.df_cpu, df_cpu_current], axis=0)\n self.df_mem = pd.concat([self.df_mem, df_mem_current], axis=0)\n self.df_cpu_history = pd.concat([self.df_cpu_history, df_cpu_current], axis=0, ignore_index=True)\n self.df_mem_history = pd.concat([self.df_mem_history, df_mem_current], axis=0, ignore_index=True)\n if self.csv_dir_name:\n self.df_cpu.to_csv(os.path.join(self.csv_dir_name, f'cpu_{self.csv_index:03d}.csv'), index=False)\n self.df_mem.to_csv(os.path.join(self.csv_dir_name, f'mem_{self.csv_index:03d}.csv'), index=False)\n if len(self.df_cpu) >= self.MAX_ROW_CSV:\n self.df_cpu = pd.DataFrame()\n self.df_mem = pd.DataFrame()\n self.csv_index += 1\n if len(self.df_cpu_history) >= self.MAX_NUM_HISTORY:\n self.df_cpu_history = self.df_cpu_history[1:]\n self.df_mem_history = self.df_mem_history[1:]\n\n self.df_cpu_history = self.sort_df_in_column(self.df_cpu_history)\n self.df_mem_history = self.sort_df_in_column(self.df_mem_history)\n\n return self.df_cpu_history, self.df_mem_history\n\n\n def reset_history(self):\n self.df_cpu_history = pd.DataFrame()\n self.df_mem_history = pd.DataFrame()\n\n\n @staticmethod\n def sort_df_in_column(df: pd.DataFrame):\n df = df.sort_values(by=len(df)-1, axis=1, ascending=False)\n return df\n\n\n @staticmethod\n def create_df_from_top(top_runner: TopRunner, lines: list[str], num_process: int):\n # now = datetime.datetime.now()\n now = int(time.time())\n for i, line in enumerate(lines):\n if 'PID' in line:\n lines = lines[i + 1:]\n break\n\n process_list = []\n cpu_list = []\n mem_list = []\n for i, line in enumerate(lines):\n if i >= num_process:\n break\n pid = line[top_runner.col_range_pid[0]:top_runner.col_range_pid[1]].strip()\n command = line[top_runner.col_range_command[0]:].strip()\n process_name = str(f'{command} ({pid})')\n process_list.append(process_name)\n cpu = float(line[top_runner.col_range_CPU[0]:top_runner.col_range_CPU[1]].strip())\n cpu_list.append(cpu)\n mem = float(line[top_runner.col_range_MEM[0]:top_runner.col_range_MEM[1]].strip())\n mem_list.append(mem)\n\n df_cpu_current = pd.DataFrame([[now] + cpu_list], columns=['datetime'] + process_list)\n df_mem_current = pd.DataFrame([[now] + mem_list], columns=['datetime'] + process_list)\n\n return df_cpu_current, df_mem_current" }, { "identifier": "TopRunner", "path": "src/rotop/top_runner.py", "snippet": "class TopRunner:\n def __init__(self, interval, filter):\n self.child = pexpect.spawn(f'top -cb -d {interval} -o %CPU -w 512')\n self.filter_re = self.create_filter_re(filter)\n self.ros_re = self.create_filter_re('--ros-arg|/opt/ros')\n self.col_range_list_to_display = None\n self.col_range_pid = None\n self.col_range_CPU = None\n self.col_range_MEM = None\n self.col_range_command = None\n self.next_after = ''\n\n\n def __del__(self):\n signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c while closing\n self.child.close()\n\n\n def run(self, max_num_process, show_all=False, only_ros=False):\n # get the result string of top command\n self.child.expect(r'top - .*load average:')\n before = self.child.before\n previous_after = self.next_after\n self.next_after = self.child.after\n if before == '' or previous_after == '' or self.next_after == '':\n return None, None\n top_str = (previous_after + before).decode('utf-8')\n orgial_lines = top_str.splitlines()\n\n result_lines = []\n result_show_all_lines = []\n row_process_info = 0\n\n # System Information\n for line in orgial_lines:\n result_lines.append(line)\n result_show_all_lines.append(line)\n if 'PID' in line:\n break\n\n # get layout information from process header line\n row_process_info = len(result_lines)\n process_header_org = result_lines[-1]\n self.analyze_cols(process_header_org, show_all)\n\n process_header = ''\n for range in self.col_range_list_to_display:\n process_header += process_header_org[range[0]:range[1]]\n result_lines[-1] = process_header\n\n # Process Information\n for line in orgial_lines[row_process_info:]:\n if self.col_range_command and self.col_range_command[0] > 0 and len(line) > self.col_range_command[0]:\n process_info_org = line[:self.col_range_command[0]]\n process_info = ''\n for range in self.col_range_list_to_display:\n process_info += process_info_org[range[0]:range[1]]\n command_str = line[self.col_range_command[0]:]\n if not self.filter_re.match(command_str):\n continue\n if only_ros and not self.ros_re.match(command_str):\n continue\n command_str = self.parse_command_str(command_str)\n\n line = process_info + command_str\n show_all_line = process_info_org + command_str\n\n result_lines.append(line)\n result_show_all_lines.append(show_all_line)\n if len(result_lines) >= row_process_info + max_num_process:\n break\n\n return result_lines, result_show_all_lines\n\n\n def analyze_cols(self, process_header: str, show_all: bool):\n if self.col_range_command is None or self.col_range_command[0] == -1:\n self.col_range_list_to_display = self.get_col_range_list_to_display(process_header, show_all)\n self.col_range_pid = TopRunner.get_col_range_PID(process_header)\n self.col_range_CPU = TopRunner.get_col_range_CPU(process_header)\n self.col_range_MEM = TopRunner.get_col_range_MEM(process_header)\n self.col_range_command = TopRunner.get_col_range_command(process_header)\n return\n\n\n\n @staticmethod\n def create_filter_re(filter_str):\n if '.*' not in filter_str:\n filter_str = '.*' + filter_str + '.*'\n filter_re = re.compile(filter_str)\n return filter_re\n\n\n @staticmethod\n def get_row_start_list(lines: list[str])->list[int]:\n row_list = []\n for i, line in enumerate(lines):\n if 'top' in line and 'load average' in line:\n row_list.append(i)\n return row_list\n\n\n @staticmethod\n def get_col_range_command(process_info_header_line: str):\n start_col = process_info_header_line.find('COMMAND')\n end_col = len(process_info_header_line) - 1\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_PID(process_info_header_line: str):\n start_col = 0\n end_col = process_info_header_line.find('PID') + len('PID')\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_CPU(process_info_header_line: str):\n start_col = process_info_header_line.find('SHR S') + len('SHR S')\n end_col = process_info_header_line.find('%CPU') + len('%CPU')\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_MEM(process_info_header_line: str):\n start_col = process_info_header_line.find('%CPU') + len('%CPU')\n end_col = process_info_header_line.find('%MEM') + len('%MEM')\n return (start_col, end_col)\n\n\n @staticmethod\n def get_col_range_list_to_display(process_info_header_line: str, show_all=False):\n range_list = []\n\n if show_all:\n range_list.append((0, len(process_info_header_line)))\n else:\n start_col = 0\n end_col = process_info_header_line.find('PID') + len('PID')\n range_list.append((start_col, end_col))\n\n start_col = process_info_header_line.find('NI') + len('NI')\n end_col = process_info_header_line.find('%MEM') + len('%MEM')\n range_list.append((start_col, end_col))\n\n start_col = process_info_header_line.find('COMMAND') - 1\n end_col = len(process_info_header_line)\n range_list.append((start_col, end_col))\n\n return range_list\n\n\n @staticmethod\n def parse_component_container_command(command):\n cmd = command.split()[0].split('/')[-1]\n idx_node = command.find('__node')\n if idx_node > 0:\n node = command[idx_node:].split()[0].split('=')[-1]\n cmd = node\n idx_ns = command.find('__ns')\n if idx_ns > 0:\n ns = command[idx_ns:].split()[0].split('=')[-1]\n # cmd = cmd + ', ' + node + ', ' + ns\n cmd += ', ' + ns\n return cmd\n\n\n @staticmethod\n def parse_python_command(command):\n cmd_list = command.split()\n cmd = cmd_list[0].split('/')[-1]\n if len(cmd_list) > 1:\n if cmd_list[1][0] == '-':\n python_file = cmd_list[-1]\n else:\n python_file = cmd_list[1]\n python_file = python_file.split('/')[-1]\n\n ros2_option = ''\n if 'ros2' == python_file:\n ros2_option = ' '.join(cmd_list[2:5])\n\n cmd = cmd + ' ' + python_file + ' ' + ros2_option\n return cmd\n\n\n @staticmethod\n def parse_command_str(command):\n param_for_ros2 = ['__node', '__ns']\n if '[' == command[0]:\n # kernel process\n command = command\n elif any(item in command for item in param_for_ros2):\n command = TopRunner.parse_component_container_command(command)\n elif 'python' in command:\n command = TopRunner.parse_python_command(command)\n else:\n # normal process\n command = command.split()[0].split('/')[-1]\n return command" }, { "identifier": "create_logger", "path": "src/rotop/utility.py", "snippet": "def create_logger(name, level: int=logging.DEBUG, log_filename: str=None) -> logging.Logger:\n handler_format = logging.Formatter('[%(asctime)s][%(levelname)-7s][%(filename)s:%(lineno)s] %(message)s')\n # stream_handler = logging .StreamHandler()\n # stream_handler.setLevel(level)\n # stream_handler.setFormatter(handler_format)\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(level)\n # logger.addHandler(stream_handler)\n # if log_filename:\n # file_handler = logging.FileHandler(log_filename)\n # file_handler.setLevel(level)\n # file_handler.setFormatter(handler_format)\n # logger.addHandler(file_handler)\n return logger" } ]
import pandas as pd import threading import time import dearpygui.dearpygui as dpg from .data_container import DataContainer from .top_runner import TopRunner from .utility import create_logger
4,806
def exit(self): self.is_exit = True def start_dpg(self): dpg.create_context() dpg.create_viewport(title='rotop', width=800, height=600) dpg.setup_dearpygui() with dpg.window(label='window', no_collapse=True, no_title_bar=True, no_move=True, no_resize=True) as self.dpg_window_id: with dpg.group(horizontal=True): self.dpg_button_cpumem = dpg.add_button(label='CPU/MEM', callback=self.cb_button_cpumem) self.dpg_button_reset = dpg.add_button(label='RESET', callback=self.cb_button_reset) self.dpg_button_pause = dpg.add_button(label='PAUSE', callback=self.cb_button_pause) dpg.add_text('Help(?)') with dpg.tooltip(dpg.last_item()): dpg.add_text('- CLick "Reset" to clear graph and history.') with dpg.plot(label=self.get_plot_title(), use_local_time=True, no_title=True) as self.dpg_plot_id: self.dpg_plot_axis_x_id = dpg.add_plot_axis(dpg.mvXAxis, label='datetime', time=True) self.dpg_text = dpg.add_text() dpg.set_viewport_resize_callback(self.cb_resize) self.cb_resize(None, [None, None, dpg.get_viewport_width(), dpg.get_viewport_height()]) dpg.show_viewport() # Manually control FPS (10fps), otherwise FPS becomes very high, which causes high CPU load # dpg.start_dearpygui() while dpg.is_dearpygui_running() and not self.is_exit: time.sleep(0.1) dpg.render_dearpygui_frame() dpg.destroy_context() def get_plot_title(self): return 'CPU [%]' if self.plot_is_cpu else 'MEM [%]' def cb_button_cpumem(self, sender, app_data, user_data): self.plot_is_cpu = not self.plot_is_cpu dpg.set_item_label(self.dpg_plot_id, self.get_plot_title()) def cb_button_reset(self, sender, app_data, user_data): global g_reset_history_df g_reset_history_df = True self.color_dict = {} self.theme_dict = {} def cb_button_pause(self, sender, app_data, user_data): self.pause = not self.pause def cb_resize(self, sender, app_data): window_width = app_data[2] window_height = app_data[3] dpg.set_item_width(self.dpg_window_id, window_width) dpg.set_item_height(self.dpg_window_id, window_height) dpg.set_item_width(self.dpg_plot_id, window_width) dpg.set_item_height(self.dpg_plot_id, window_height / 2) def update_gui(self, result_lines:list[str], df_cpu_history:pd.DataFrame, df_mem_history:pd.DataFrame): if self.pause: return if self.dpg_plot_axis_y_id: dpg.delete_item(self.dpg_plot_axis_y_id) self.dpg_plot_axis_y_id = dpg.add_plot_axis(dpg.mvYAxis, label=self.get_plot_title(), lock_min=True, parent=self.dpg_plot_id) df = df_cpu_history if self.plot_is_cpu else df_mem_history col_x = df.columns[0] cols_y = df.columns[1:] x = df[col_x].to_list() for col_y in cols_y: y = df[col_y].to_list() line_series = dpg.add_line_series(x, y, label=col_y[:min(40, len(col_y))].ljust(40), parent=self.dpg_plot_axis_y_id) theme = self.get_theme(col_y) dpg.bind_item_theme(line_series, theme) if self.plot_is_cpu: dpg.add_line_series([x[0]], [110], label='', parent=self.dpg_plot_axis_y_id) # dummy for ymax>=100 dpg.add_plot_legend(parent=self.dpg_plot_id, outside=True, location=dpg.mvPlot_Location_NorthEast) dpg.fit_axis_data(self.dpg_plot_axis_x_id) dpg.fit_axis_data(self.dpg_plot_axis_y_id) dpg.set_value(self.dpg_text, '\n'.join(result_lines)) def get_color(self, process_name)->tuple[int]: # return (0, 0, 0) if process_name in self.color_dict: return self.color_dict[process_name] else: color = COLOR_MAP[len(self.color_dict)%len(COLOR_MAP)] self.color_dict[process_name] = color return color def get_theme(self, process_name): if process_name in self.theme_dict: return self.theme_dict[process_name] else: with dpg.theme() as theme: with dpg.theme_component(dpg.mvLineSeries): dpg.add_theme_color(dpg.mvPlotCol_Line, self.get_color(process_name), category=dpg.mvThemeCat_Plots) self.theme_dict[process_name] = theme return theme def gui_loop(view: GuiView): view.start_dpg() def gui_main(args): global g_reset_history_df top_runner = TopRunner(args.interval, args.filter)
# Copyright 2023 iwatake2222 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations logger = create_logger(__name__, log_filename='rotop.log') g_reset_history_df = False # todo: add lock COLOR_MAP = ( # matplotlib.cm.tab20 (31, 119, 180), (174, 199, 232), (256, 127, 14), (256, 187, 120), (44, 160, 44), (152, 223, 138), (214, 39, 40), (256, 152, 150), (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229), # matplotlib.cm.tab20b (57, 59, 121), (82, 84, 163), (107, 110, 207), (156, 158, 222), (99, 121, 57), (140, 162, 82), (181, 207, 107), (206, 219, 156), (140, 109, 49), (189, 158, 57), (231, 186, 82), (231, 203, 148), (132, 60, 57), (173, 73, 74), (214, 97, 107), (231, 150, 156), (123, 65, 115), (165, 81, 148), (206, 109, 189), (222, 158, 214), ) class GuiView: def __init__(self): self.is_exit = False self.pause = False # todo: add lock self.plot_is_cpu = True self.dpg_plot_axis_x_id = None self.dpg_plot_axis_y_id = None self.color_dict = {} self.theme_dict = {} def exit(self): self.is_exit = True def start_dpg(self): dpg.create_context() dpg.create_viewport(title='rotop', width=800, height=600) dpg.setup_dearpygui() with dpg.window(label='window', no_collapse=True, no_title_bar=True, no_move=True, no_resize=True) as self.dpg_window_id: with dpg.group(horizontal=True): self.dpg_button_cpumem = dpg.add_button(label='CPU/MEM', callback=self.cb_button_cpumem) self.dpg_button_reset = dpg.add_button(label='RESET', callback=self.cb_button_reset) self.dpg_button_pause = dpg.add_button(label='PAUSE', callback=self.cb_button_pause) dpg.add_text('Help(?)') with dpg.tooltip(dpg.last_item()): dpg.add_text('- CLick "Reset" to clear graph and history.') with dpg.plot(label=self.get_plot_title(), use_local_time=True, no_title=True) as self.dpg_plot_id: self.dpg_plot_axis_x_id = dpg.add_plot_axis(dpg.mvXAxis, label='datetime', time=True) self.dpg_text = dpg.add_text() dpg.set_viewport_resize_callback(self.cb_resize) self.cb_resize(None, [None, None, dpg.get_viewport_width(), dpg.get_viewport_height()]) dpg.show_viewport() # Manually control FPS (10fps), otherwise FPS becomes very high, which causes high CPU load # dpg.start_dearpygui() while dpg.is_dearpygui_running() and not self.is_exit: time.sleep(0.1) dpg.render_dearpygui_frame() dpg.destroy_context() def get_plot_title(self): return 'CPU [%]' if self.plot_is_cpu else 'MEM [%]' def cb_button_cpumem(self, sender, app_data, user_data): self.plot_is_cpu = not self.plot_is_cpu dpg.set_item_label(self.dpg_plot_id, self.get_plot_title()) def cb_button_reset(self, sender, app_data, user_data): global g_reset_history_df g_reset_history_df = True self.color_dict = {} self.theme_dict = {} def cb_button_pause(self, sender, app_data, user_data): self.pause = not self.pause def cb_resize(self, sender, app_data): window_width = app_data[2] window_height = app_data[3] dpg.set_item_width(self.dpg_window_id, window_width) dpg.set_item_height(self.dpg_window_id, window_height) dpg.set_item_width(self.dpg_plot_id, window_width) dpg.set_item_height(self.dpg_plot_id, window_height / 2) def update_gui(self, result_lines:list[str], df_cpu_history:pd.DataFrame, df_mem_history:pd.DataFrame): if self.pause: return if self.dpg_plot_axis_y_id: dpg.delete_item(self.dpg_plot_axis_y_id) self.dpg_plot_axis_y_id = dpg.add_plot_axis(dpg.mvYAxis, label=self.get_plot_title(), lock_min=True, parent=self.dpg_plot_id) df = df_cpu_history if self.plot_is_cpu else df_mem_history col_x = df.columns[0] cols_y = df.columns[1:] x = df[col_x].to_list() for col_y in cols_y: y = df[col_y].to_list() line_series = dpg.add_line_series(x, y, label=col_y[:min(40, len(col_y))].ljust(40), parent=self.dpg_plot_axis_y_id) theme = self.get_theme(col_y) dpg.bind_item_theme(line_series, theme) if self.plot_is_cpu: dpg.add_line_series([x[0]], [110], label='', parent=self.dpg_plot_axis_y_id) # dummy for ymax>=100 dpg.add_plot_legend(parent=self.dpg_plot_id, outside=True, location=dpg.mvPlot_Location_NorthEast) dpg.fit_axis_data(self.dpg_plot_axis_x_id) dpg.fit_axis_data(self.dpg_plot_axis_y_id) dpg.set_value(self.dpg_text, '\n'.join(result_lines)) def get_color(self, process_name)->tuple[int]: # return (0, 0, 0) if process_name in self.color_dict: return self.color_dict[process_name] else: color = COLOR_MAP[len(self.color_dict)%len(COLOR_MAP)] self.color_dict[process_name] = color return color def get_theme(self, process_name): if process_name in self.theme_dict: return self.theme_dict[process_name] else: with dpg.theme() as theme: with dpg.theme_component(dpg.mvLineSeries): dpg.add_theme_color(dpg.mvPlotCol_Line, self.get_color(process_name), category=dpg.mvThemeCat_Plots) self.theme_dict[process_name] = theme return theme def gui_loop(view: GuiView): view.start_dpg() def gui_main(args): global g_reset_history_df top_runner = TopRunner(args.interval, args.filter)
data_container = DataContainer(args.csv)
0
2023-10-30 22:21:05+00:00
8k
chenruduan/OAReactDiff
oa_reactdiff/tests/model/test_equiv.py
[ { "identifier": "EGNN", "path": "oa_reactdiff/model/egnn.py", "snippet": "class EGNN(nn.Module):\n def __init__(\n self,\n in_node_nf: int = 8,\n in_edge_nf: int = 2,\n hidden_nf: int = 256,\n edge_hidden_nf: int = 32,\n act_fn: str = \"swish\",\n n_layers: int = 3,\n attention: int = False,\n out_node_nf: Optional[int] = None,\n tanh: bool = False,\n coords_range: float = 15.0,\n norm_constant: float = 1.0,\n inv_sublayers: int = 2,\n sin_embedding: bool = False,\n normalization_factor: float = 100.0,\n aggregation_method: str = \"sum\",\n reflect_equiv: bool = True,\n ):\n r\"\"\"_summary_\n\n Args:\n in_node_nf (int): number of input node feature. Defaults to 8.\n in_edge_nf (int): number of input edge feature. Defaults to 2.\n hidden_nf (int): number of hidden units. Defaults to 256.\n act_fn (str): activation function. Defaults to \"swish\".\n n_layers (int): number of equivariant update block. Defaults to 3.\n attention (int): whether to use self attention. Defaults to False.\n out_node_nf (Optional[int]): number of output node features.\n Defaults to None to set the same as in_node_nf\n coords_range (float): range factor, only used in tanh = True.\n Defaults to 15.0.\n norm_constant (float): distance normalizating factor. Defaults to 1.0.\n inv_sublayers (int): number of GCL in an equivariant update block.\n Defaults to 2.\n sin_embedding (Optional[nn.Module]): whether to use edge distance embedding.\n Defaults to None.\n normalization_factor (float): distance normalization used in coord2diff.\n Defaults to 1.0.\n aggregation_method (str): aggregation options in scattering.\n Defaults to \"sum\".\n reflect_equiv (bool): whether to ignore reflection.\n Defaults to True.\n \"\"\"\n super().__init__()\n if out_node_nf is None:\n out_node_nf = in_node_nf\n self.hidden_nf = hidden_nf\n self.edge_hidden_nf = edge_hidden_nf\n self.n_layers = n_layers\n self.coords_range_layer = float(coords_range / n_layers)\n self.normalization_factor = normalization_factor\n self.aggregation_method = aggregation_method\n self.reflect_equiv = reflect_equiv\n\n edge_feat_nf = in_edge_nf\n if sin_embedding:\n self.sin_embedding = SinusoidsEmbeddingNew()\n self.dist_dim = self.sin_embedding.dim\n else:\n self.sin_embedding = None\n self.dist_dim = 1\n\n self.edge_feat_nf = edge_feat_nf + self.dist_dim\n\n self.embedding = nn.Linear(in_node_nf, self.hidden_nf)\n self.embedding_out = nn.Linear(self.hidden_nf, out_node_nf)\n\n self.edge_embedding = nn.Linear(\n self.edge_feat_nf, self.hidden_nf - self.dist_dim\n )\n self.edge_embedding_out = nn.Linear(\n self.hidden_nf - self.dist_dim, self.edge_feat_nf\n )\n for i in range(0, n_layers):\n self.add_module(\n \"e_block_%d\" % i,\n EquivariantBlock(\n hidden_nf,\n edge_feat_nf=edge_feat_nf,\n act_fn=act_fn,\n n_layers=inv_sublayers,\n attention=attention,\n tanh=tanh,\n coords_range=coords_range,\n norm_constant=norm_constant,\n sin_embedding=self.sin_embedding,\n normalization_factor=normalization_factor,\n aggregation_method=aggregation_method,\n reflect_equiv=reflect_equiv,\n ),\n )\n\n def forward(\n self,\n h: Tensor,\n pos: Tensor,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n node_mask: Optional[Tensor] = None,\n edge_mask: Optional[Tensor] = None,\n update_coords_mask: Optional[Tensor] = None,\n subgraph_mask: Optional[Tensor] = None,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n\n Args:\n h (Tensor): [n_nodes, n_hidden], node features.\n pos (Tensor): [n_nodes, n_dim (3 in 3D space)], position tensor.\n edge_index (Tensor): [2, n_edge], edge index {ij}\n edge_attr (Optional[Tensor]): [n_edge, edge_feature_dim]. edge attributes.\n Defaults to None.\n node_mask (Optional[Tensor]): [n_node, 1], mask for node updates.\n Defaults to None.\n edge_mask (Optional[Tensor]): [n_edge, 1], mask for edge updates.\n Defaults to None.\n update_coords_mask (Optional[Tensor]): [n_node, 1], mask for position updates.\n Defaults to None.\n subgraph_mask (Optional[Tensor]): n_edge, 1], mask for positions aggregations.\n The idea is keep subgraph (i.e., fragment) level equivariance.\n Defaults to None.\n\n Returns:\n Tuple[Tensor, Tensor, Tensor]: updated h, pos, edge_attr\n \"\"\"\n # Edit Emiel: Remove velocity as input\n distances, _ = coord2diff(pos, edge_index)\n if subgraph_mask is not None:\n distances = distances * subgraph_mask\n\n if self.sin_embedding is not None:\n distances = self.sin_embedding(distances)\n if edge_attr is None or edge_attr.size(-1) == 0:\n edge_attr = distances\n else:\n edge_attr = torch.concat([distances, edge_attr], dim=-1)\n edge_attr = self.edge_embedding(edge_attr)\n h = self.embedding(h)\n # edge_index_ji = get_ji_bond_index(edge_index)\n # edge_attr = symmetrize_edge(edge_attr, edge_index_ji)\n\n for i in range(0, self.n_layers):\n h, pos, edge_attr = self._modules[\"e_block_%d\" % i](\n h,\n pos,\n edge_index,\n edge_attr=edge_attr,\n node_mask=node_mask,\n edge_mask=edge_mask,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask,\n )\n\n # edge_attr = symmetrize_edge(edge_attr, edge_index_ji)\n\n # Important, the bias of the last linear might be non-zero\n h = self.embedding_out(h)\n edge_attr = self.edge_embedding_out(edge_attr)\n\n if node_mask is not None:\n h = h * node_mask\n if edge_mask is not None:\n edge_attr = edge_attr * edge_mask\n return h, pos, edge_attr" }, { "identifier": "LEFTNet", "path": "oa_reactdiff/model/leftnet.py", "snippet": "class LEFTNet(torch.nn.Module):\n r\"\"\"\n LEFTNet\n\n Args:\n pos_require_grad (bool, optional): If set to :obj:`True`, will require to take derivative of model output with respect to the atomic positions. (default: :obj:`False`)\n cutoff (float, optional): Cutoff distance for interatomic interactions. (default: :obj:`5.0`)\n num_layers (int, optional): Number of building blocks. (default: :obj:`4`)\n hidden_channels (int, optional): Hidden embedding size. (default: :obj:`128`)\n num_radial (int, optional): Number of radial basis functions. (default: :obj:`96`)\n y_mean (float, optional): Mean value of the labels of training data. (default: :obj:`0`)\n y_std (float, optional): Standard deviation of the labels of training data. (default: :obj:`1`)\n\n \"\"\"\n\n def __init__(\n self,\n pos_require_grad=False,\n cutoff=10.0,\n num_layers=4,\n hidden_channels=128,\n num_radial=96,\n in_hidden_channels: int = 8,\n reflect_equiv: bool = True,\n legacy: bool = True,\n update: bool = True,\n pos_grad: bool = False,\n single_layer_output: bool = True,\n for_conf: bool = False,\n ff: bool = False,\n object_aware: bool = True,\n **kwargs,\n ):\n super(LEFTNet, self).__init__()\n self.num_layers = num_layers\n self.hidden_channels = hidden_channels\n self.cutoff = cutoff\n self.pos_require_grad = pos_require_grad\n self.reflect_equiv = reflect_equiv\n self.legacy = legacy\n self.update = update\n self.pos_grad = pos_grad\n self.for_conf = for_conf\n self.ff = ff\n self.object_aware = object_aware\n\n self.embedding = nn.Linear(in_hidden_channels, hidden_channels)\n self.embedding_out = nn.Linear(hidden_channels, in_hidden_channels)\n self.radial_emb = RBFEmb(num_radial, self.cutoff)\n self.neighbor_emb = NeighborEmb(hidden_channels, in_hidden_channels)\n self.s2v = CFConvS2V(hidden_channels)\n\n self.radial_lin = nn.Sequential(\n nn.Linear(num_radial, hidden_channels),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels, hidden_channels),\n )\n\n self.lin3 = nn.Sequential(\n nn.Linear(3, hidden_channels // 4),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 4, 1),\n )\n self.pos_expansion = MLP(\n in_dim=3,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n last_layer_no_activation=True,\n bias=False,\n )\n if self.legacy:\n self.distance_embedding = MLP(\n in_dim=num_radial,\n out_dims=[hidden_channels // 2, hidden_channels],\n activation=\"swish\",\n bias=False,\n )\n if self.pos_grad:\n self.dynamic_mlp_modules = nn.Sequential(\n nn.Linear(hidden_channels, hidden_channels // 2),\n nn.SiLU(inplace=True),\n nn.Linear(hidden_channels // 2, 3),\n )\n\n self.gcl_layers = nn.ModuleList()\n self.message_layers = nn.ModuleList()\n self.update_layers = nn.ModuleList()\n\n for _ in range(num_layers):\n self.gcl_layers.append(\n GCLMessage(hidden_channels, num_radial, legacy=legacy)\n )\n self.message_layers.append(\n EquiMessage(hidden_channels, num_radial, reflect_equiv).jittable()\n )\n self.update_layers.append(EquiUpdate(hidden_channels, reflect_equiv))\n\n self.last_layer = nn.Linear(hidden_channels, 1)\n\n self.inv_sqrt_2 = 1 / math.sqrt(2.0)\n self.out_pos = EquiOutput(\n hidden_channels,\n out_channels=1,\n single_layer_output=single_layer_output,\n )\n\n # for node-wise frame\n self.vec = vector()\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.radial_emb.reset_parameters()\n\n def scalarization(self, pos, edge_index):\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n coord_diff = pos[i] - pos[j]\n radial = torch.sum((coord_diff) ** 2, 1).unsqueeze(1)\n coord_cross = torch.cross(pos[i], pos[j])\n norm = torch.sqrt(radial) + EPS\n coord_diff = coord_diff / norm\n cross_norm = (torch.sqrt(torch.sum((coord_cross) ** 2, 1).unsqueeze(1))) + EPS\n coord_cross = coord_cross / cross_norm\n coord_vertical = torch.cross(coord_diff, coord_cross)\n\n return dist, coord_diff, coord_cross, coord_vertical\n\n @staticmethod\n def assemble_nodemask(edge_index: Tensor, pos: Tensor):\n node_mask = torch.zeros(pos.size(0), device=pos.device)\n node_mask[:] = -1\n _i, _j = edge_index\n _ind = 0\n for center in range(pos.size(0)):\n if node_mask[center] > -1:\n continue\n _connected = _j[torch.where(_i == center)]\n _connected = torch.concat(\n [_connected, torch.tensor([center], device=pos.device)]\n )\n node_mask[_connected] = _ind\n _ind += 1\n return node_mask\n\n def forward(\n self,\n h: Tensor,\n pos: Tensor,\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n node_mask: Optional[Tensor] = None,\n edge_mask: Optional[Tensor] = None,\n update_coords_mask: Optional[Tensor] = None,\n subgraph_mask: Optional[Tensor] = None,\n ):\n # if self.pos_require_grad:\n # pos.requires_grad_()\n\n if not self.object_aware:\n subgraph_mask = None\n\n i, j = edge_index\n\n # embed z, assuming last column is atom number\n z_emb = self.embedding(h)\n\n i, j = edge_index\n dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()\n inner_subgraph_mask = torch.zeros(edge_index.size(1), 1, device=dist.device)\n inner_subgraph_mask[torch.where(dist < self.cutoff)[0]] = 1\n\n all_edge_masks = inner_subgraph_mask\n if subgraph_mask is not None:\n all_edge_masks = all_edge_masks * subgraph_mask\n\n edge_index_w_cutoff = edge_index.T[torch.where(all_edge_masks > 0)[0]].T\n node_mask_w_cutoff = self.assemble_nodemask(\n edge_index=edge_index_w_cutoff, pos=pos\n )\n\n pos_frame = pos.clone()\n pos_frame = remove_mean_batch(pos_frame, node_mask_w_cutoff.long())\n\n # bulid edge-wise frame and scalarization vector features for edge update\n dist, coord_diff, coord_cross, coord_vertical = self.scalarization(\n pos_frame, edge_index\n )\n\n dist = dist * all_edge_masks.squeeze(-1)\n coord_diff = coord_diff * all_edge_masks\n coord_cross = coord_cross * all_edge_masks\n coord_vertical = coord_vertical * all_edge_masks\n\n frame = torch.cat(\n (\n coord_diff.unsqueeze(-1),\n coord_cross.unsqueeze(-1),\n coord_vertical.unsqueeze(-1),\n ),\n dim=-1,\n )\n radial_emb = self.radial_emb(dist)\n radial_emb = radial_emb * all_edge_masks\n\n f = self.radial_lin(radial_emb)\n rbounds = 0.5 * (torch.cos(dist * pi / self.cutoff) + 1.0)\n f = rbounds.unsqueeze(-1) * f\n\n # init node features\n s = self.neighbor_emb(h, z_emb, edge_index, f)\n\n NE1 = self.s2v(s, coord_diff.unsqueeze(-1), edge_index, f)\n scalrization1 = torch.sum(NE1[i].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n scalrization2 = torch.sum(NE1[j].unsqueeze(2) * frame.unsqueeze(-1), dim=1)\n if self.reflect_equiv:\n scalrization1[:, 1, :] = torch.abs(scalrization1[:, 1, :].clone())\n scalrization2[:, 1, :] = torch.abs(scalrization2[:, 1, :].clone())\n\n scalar3 = (\n self.lin3(torch.permute(scalrization1, (0, 2, 1)))\n + torch.permute(scalrization1, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n scalar4 = (\n self.lin3(torch.permute(scalrization2, (0, 2, 1)))\n + torch.permute(scalrization2, (0, 2, 1))[:, :, 0].unsqueeze(2)\n ).squeeze(-1)\n edgeweight = torch.cat((scalar3, scalar4), dim=-1) * rbounds.unsqueeze(-1)\n edgeweight = torch.cat((edgeweight, f), dim=-1)\n # add distance embedding\n edgeweight = torch.cat((edgeweight, radial_emb), dim=-1)\n\n # bulid node-wise frame for node-update\n a = pos_frame\n if self.legacy:\n b = self.vec(pos_frame, edge_index)\n else:\n # Added by Chenru: for new implementation of constructing node frame.\n eff_edge_ij = torch.where(all_edge_masks.squeeze(-1) == 1)[0]\n eff_edge_index = edge_index[:, eff_edge_ij]\n eff_dist = dist[eff_edge_ij]\n b = nn_vector(eff_dist, eff_edge_index, pos_frame)\n # assert_rot_equiv(nn_vector, dist_pad, edge_index, pos) # for debugging\n\n x1 = (a - b) / ((torch.sqrt(torch.sum((a - b) ** 2, 1).unsqueeze(1))) + EPS)\n y1 = torch.cross(a, b)\n normy = (torch.sqrt(torch.sum(y1**2, 1).unsqueeze(1))) + EPS\n y1 = y1 / normy\n # assert torch.trace(torch.matmul(x1, torch.transpose(y1, 0, 1))) < EPS # for debugging\n\n z1 = torch.cross(x1, y1)\n nodeframe = torch.cat(\n (x1.unsqueeze(-1), y1.unsqueeze(-1), z1.unsqueeze(-1)), dim=-1\n )\n\n pos_prjt = torch.sum(pos_frame.unsqueeze(-1) * nodeframe, dim=1)\n\n vec = torch.zeros(s.size(0), 3, s.size(1), device=s.device)\n gradient = torch.zeros(s.size(0), 3, device=s.device)\n for i in range(self.num_layers):\n # Added by Chenru: for letting multiple objects message passing.\n if self.legacy or i == 0:\n s = s + self.pos_expansion(pos_prjt)\n s, edgeweight = self.gcl_layers[i](\n s,\n edge_index,\n edgeweight,\n )\n\n dx, dvec = self.message_layers[i](\n s,\n vec,\n edge_index,\n radial_emb,\n edgeweight,\n coord_diff,\n coord_cross,\n )\n s = s + dx\n vec = vec + dvec\n s = s * self.inv_sqrt_2\n\n if self.update:\n dx, dvec = self.update_layers[i](s, vec, nodeframe)\n s = s + dx\n vec = vec + dvec\n\n if self.pos_grad:\n dynamic_coff = self.dynamic_mlp_modules(s) # (node, 3)\n basis_mix = (\n dynamic_coff[:, :1] * x1\n + dynamic_coff[:, 1:2] * y1\n + dynamic_coff[:, 2:3] * z1\n )\n gradient = gradient + basis_mix / self.num_layers\n\n if self.for_conf:\n return s\n\n _, dpos = self.out_pos(s, vec)\n\n if update_coords_mask is not None:\n dpos = update_coords_mask * dpos\n pos = pos + dpos + gradient\n\n if self.ff:\n return s, dpos\n\n h = self.embedding_out(s)\n if node_mask is not None:\n h = h * node_mask\n edge_attr = None\n return h, pos, edge_attr" }, { "identifier": "tensor_relative_diff", "path": "oa_reactdiff/tests/model/utils.py", "snippet": "def tensor_relative_diff(x1, x2):\ndef init_weights(m):\ndef generate_full_eij(n_atom: int):\ndef get_cut_graph_mask(edge_index, n_cut):" } ]
import unittest import torch import numpy as np from pytorch_lightning import seed_everything from oa_reactdiff.model import EGNN, LEFTNet from .utils import tensor_relative_diff, egnn_config, init_weights, left_config
5,581
"""Test model forward pass and equivariance.""" default_float = torch.float64 torch.set_default_dtype(default_float) EPS = 1e-8 TIGHT_EPS = 1e-8 theta = 0.4 alpha = 0.9 seed_everything(42, workers=True) def com(x): return x - torch.mean(x, dim=0) class TestModel(unittest.TestCase): @classmethod def setUpClass(cls) -> None: cls.egnn = EGNN(**egnn_config) cls.leftnet = LEFTNet(**left_config) cls.edge_index = torch.tensor( [[0, 1, 1, 2, 3, 0], [1, 0, 2, 1, 0, 3]], dtype=torch.long ) cls.h = torch.rand(4, egnn_config["in_node_nf"]) cls.pos = torch.rand(4, 3) cls.edge_attr = torch.rand(cls.edge_index.size(1), egnn_config["in_edge_nf"]) egnn_config.update({"reflect_equiv": False}) cls.egnn_no_reflect_equiv = EGNN(**egnn_config) egnn_config.update({"reflect_equiv": True}) left_config.update({"reflect_equiv": False}) cls.leftnet_no_reflect_equiv = LEFTNet(**left_config) left_config.update({"reflect_equiv": True}) egnn_config["in_edge_nf"] = 0 cls.egnn_no_edge_attr = EGNN(**egnn_config) cls.edge_attr_zeros = torch.rand( cls.edge_index.size(1), egnn_config["in_edge_nf"] ) cls.edge_attr_null = None rot_x = torch.tensor( [ [1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)], ], dtype=default_float, ) rot_y = torch.tensor( [ [np.cos(alpha), 0, np.sin(alpha)], [0, 1, 0], [-np.sin(alpha), 0, np.cos(alpha)], ], dtype=default_float, ) cls.rot = torch.matmul(rot_y, rot_x).double() cls.trans = torch.rand(3) * 100 cls.egnn.apply(init_weights) cls.egnn_no_edge_attr.apply(init_weights) cls.leftnet.apply(init_weights) cls.models = [cls.egnn, cls.leftnet] cls.models_no_edge_attr = [cls.egnn_no_edge_attr, cls.leftnet] cls.models_no_reflect_equiv = [ cls.egnn_no_reflect_equiv, cls.leftnet_no_reflect_equiv, ] def test_rotation(self): for model in self.models: _h, _pos, _edge_attr = model.forward( self.h, self.pos, self.edge_index, self.edge_attr ) _h_rot, _pos_rot, _edge_attr_rot = model.forward( self.h, torch.matmul(self.pos, self.rot).double(), self.edge_index, self.edge_attr, )
"""Test model forward pass and equivariance.""" default_float = torch.float64 torch.set_default_dtype(default_float) EPS = 1e-8 TIGHT_EPS = 1e-8 theta = 0.4 alpha = 0.9 seed_everything(42, workers=True) def com(x): return x - torch.mean(x, dim=0) class TestModel(unittest.TestCase): @classmethod def setUpClass(cls) -> None: cls.egnn = EGNN(**egnn_config) cls.leftnet = LEFTNet(**left_config) cls.edge_index = torch.tensor( [[0, 1, 1, 2, 3, 0], [1, 0, 2, 1, 0, 3]], dtype=torch.long ) cls.h = torch.rand(4, egnn_config["in_node_nf"]) cls.pos = torch.rand(4, 3) cls.edge_attr = torch.rand(cls.edge_index.size(1), egnn_config["in_edge_nf"]) egnn_config.update({"reflect_equiv": False}) cls.egnn_no_reflect_equiv = EGNN(**egnn_config) egnn_config.update({"reflect_equiv": True}) left_config.update({"reflect_equiv": False}) cls.leftnet_no_reflect_equiv = LEFTNet(**left_config) left_config.update({"reflect_equiv": True}) egnn_config["in_edge_nf"] = 0 cls.egnn_no_edge_attr = EGNN(**egnn_config) cls.edge_attr_zeros = torch.rand( cls.edge_index.size(1), egnn_config["in_edge_nf"] ) cls.edge_attr_null = None rot_x = torch.tensor( [ [1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)], ], dtype=default_float, ) rot_y = torch.tensor( [ [np.cos(alpha), 0, np.sin(alpha)], [0, 1, 0], [-np.sin(alpha), 0, np.cos(alpha)], ], dtype=default_float, ) cls.rot = torch.matmul(rot_y, rot_x).double() cls.trans = torch.rand(3) * 100 cls.egnn.apply(init_weights) cls.egnn_no_edge_attr.apply(init_weights) cls.leftnet.apply(init_weights) cls.models = [cls.egnn, cls.leftnet] cls.models_no_edge_attr = [cls.egnn_no_edge_attr, cls.leftnet] cls.models_no_reflect_equiv = [ cls.egnn_no_reflect_equiv, cls.leftnet_no_reflect_equiv, ] def test_rotation(self): for model in self.models: _h, _pos, _edge_attr = model.forward( self.h, self.pos, self.edge_index, self.edge_attr ) _h_rot, _pos_rot, _edge_attr_rot = model.forward( self.h, torch.matmul(self.pos, self.rot).double(), self.edge_index, self.edge_attr, )
print(tensor_relative_diff(_h, _h_rot))
2
2023-10-30 02:53:38+00:00
8k
Weitheskmt/WeiDMD
build/lib/weidmd/hodmd.py
[ { "identifier": "HankelDMD", "path": "build/lib/weidmd/hankeldmd.py", "snippet": "class HankelDMD(DMDBase):\n \"\"\"\n Hankel Dynamic Mode Decomposition\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: argument to control the computation of DMD modes amplitudes.\n See :class:`DMDBase`. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param int d: the new order for spatial dimension of the input snapshots.\n Default is 1.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param reconstruction_method: Method used to reconstruct the snapshots of\n the dynamical system from the multiple versions available due to how\n HankelDMD is conceived. If `'first'` (default) the first version\n available is selected (i.e. the nearest to the 0-th row in the\n augmented matrix). If `'mean'` we compute the element-wise mean. If\n `reconstruction_method` is an array of float values we compute the\n weighted average (for each snapshots) using the given values as weights\n (the number of weights must be equal to `d`).\n :type reconstruction_method: {'first', 'mean'} or array-like\n \"\"\"\n\n def __init__(\n self,\n svd_rank=0,\n tlsq_rank=0,\n exact=False,\n opt=False,\n rescale_mode=None,\n forward_backward=False,\n d=1,\n sorted_eigs=False,\n reconstruction_method=\"first\",\n tikhonov_regularization=None,\n ):\n super().__init__(\n svd_rank=svd_rank,\n tlsq_rank=tlsq_rank,\n exact=exact,\n opt=opt,\n rescale_mode=rescale_mode,\n sorted_eigs=sorted_eigs,\n )\n self._d = d\n\n if isinstance(reconstruction_method, list):\n if len(reconstruction_method) != d:\n raise ValueError(\n \"The length of the array of weights must be equal to d\"\n )\n elif isinstance(reconstruction_method, np.ndarray):\n if (\n reconstruction_method.ndim > 1\n or reconstruction_method.shape[0] != d\n ):\n raise ValueError(\n \"The length of the array of weights must be equal to d\"\n )\n self._reconstruction_method = reconstruction_method\n\n self._sub_dmd = DMD(\n svd_rank=svd_rank,\n tlsq_rank=tlsq_rank,\n exact=exact,\n opt=opt,\n rescale_mode=rescale_mode,\n forward_backward=forward_backward,\n sorted_eigs=sorted_eigs,\n tikhonov_regularization=tikhonov_regularization,\n )\n\n @property\n def d(self):\n \"\"\"The new order for spatial dimension of the input snapshots.\"\"\"\n return self._d\n\n def _hankel_first_occurrence(self, time):\n r\"\"\"\n For a given `t` such that there is :math:`k \\in \\mathbb{N}` such that\n :math:`t = t_0 + k dt`, return the index of the first column in Hankel\n pseudo matrix (see also :func:`_pseudo_hankel_matrix`) which contains\n the snapshot corresponding to `t`.\n\n :param time: The time corresponding to the requested snapshot.\n :return: The index of the first appeareance of `time` in the columns of\n Hankel pseudo matrix.\n :rtype: int\n \"\"\"\n assert isinstance(time, Number) or np.asarray(time).ndim == 1\n return max(\n 0,\n (time - self.original_time[\"t0\"]) // self.dmd_time[\"dt\"]\n - (self.original_time[\"t0\"] + self.d - 1),\n )\n\n def _update_sub_dmd_time(self):\n \"\"\"\n Update the time dictionaries (`dmd_time` and `original_time`) of\n the auxiliary DMD instance `HankelDMD._sub_dmd` after an update of the\n time dictionaries of the time dictionaries of this instance of the\n higher level instance of `HankelDMD`.\n \"\"\"\n self._sub_dmd.dmd_time[\"t0\"] = self._hankel_first_occurrence(\n self.dmd_time[\"t0\"]\n )\n self._sub_dmd.dmd_time[\"tend\"] = self._hankel_first_occurrence(\n self.dmd_time[\"tend\"]\n )\n\n def reconstructions_of_timeindex(self, timeindex=None):\n \"\"\"\n Build a collection of all the available versions of the given\n `timeindex`. The indexing of time instants is the same used for\n :func:`reconstructed_data`. For each time instant there are at least\n one and at most `d` versions. If `timeindex` is `None` the function\n returns the whole collection, for all the time instants.\n\n :param int timeindex: The index of the time snapshot.\n :return: a collection of all the available versions for the given\n time snapshot, or for all the time snapshots if `timeindex` is\n `None` (in the second case, time varies along the first dimension\n of the array returned).\n :rtype: numpy.ndarray or list\n \"\"\"\n self._update_sub_dmd_time()\n\n rec = self._sub_dmd.reconstructed_data\n space_dim = rec.shape[0] // self.d\n time_instants = rec.shape[1] + self.d - 1\n\n # for each time instance, we collect all its appearences. each\n # snapshot appears at most d times (for instance, the first appears\n # only once).\n reconstructed_snapshots = np.full(\n (time_instants, self.d, space_dim), np.nan, dtype=rec.dtype\n )\n\n c_idxes = (\n np.array(range(self.d))[:, None]\n .repeat(2, axis=1)[None, :]\n .repeat(rec.shape[1], axis=0)\n )\n c_idxes[:, :, 0] += np.array(range(rec.shape[1]))[:, None]\n\n reconstructed_snapshots[c_idxes[:, :, 0], c_idxes[:, :, 1]] = np.array(\n np.swapaxes(np.split(rec.T, self.d, axis=1), 0, 1)\n )\n\n if timeindex is None:\n return reconstructed_snapshots\n\n return reconstructed_snapshots[timeindex]\n\n def _first_reconstructions(self, reconstructions):\n \"\"\"Return the first occurrence of each snapshot available in the given\n matrix (which must be the result of `self._sub_dmd.reconstructed_data`,\n or have the same shape).\n\n :param reconstructions: A matrix of (higher-order) snapshots having\n shape `(space*self.d, time_instants)`\n :type reconstructions: np.ndarray\n :return: The first snapshot that occurs in `reconstructions` for each\n available time instant.\n :rtype: np.ndarray\n \"\"\"\n first_nonmasked_idx = np.repeat(\n np.array(range(reconstructions.shape[0]))[:, None], 2, axis=1\n )\n first_nonmasked_idx[self.d - 1 :, 1] = self.d - 1\n\n return reconstructions[\n first_nonmasked_idx[:, 0], first_nonmasked_idx[:, 1]\n ].T\n\n @property\n def reconstructed_data(self):\n self._update_sub_dmd_time()\n\n rec = self.reconstructions_of_timeindex()\n rec = np.ma.array(rec, mask=np.isnan(rec))\n\n if self._reconstruction_method == \"first\":\n result = self._first_reconstructions(rec)\n elif self._reconstruction_method == \"mean\":\n result = np.mean(rec, axis=1).T\n elif isinstance(self._reconstruction_method, (np.ndarray, list)):\n result = np.average(\n rec, axis=1, weights=self._reconstruction_method\n ).T\n else:\n raise ValueError(\n \"The reconstruction method wasn't recognized: {}\".format(\n self._reconstruction_method\n )\n )\n\n # we want to return only the requested timesteps\n time_index = min(\n self.d - 1,\n int(\n (self.dmd_time[\"t0\"] - self.original_time[\"t0\"])\n // self.dmd_time[\"dt\"]\n ),\n )\n result = result[:, time_index : time_index + len(self.dmd_timesteps)]\n\n return result.filled(fill_value=0)\n\n def _pseudo_hankel_matrix(self, X):\n \"\"\"\n Arrange the snapshot in the matrix `X` into the (pseudo) Hankel\n matrix. The attribute `d` controls the number of snapshot from `X` in\n each snapshot of the Hankel matrix.\n\n :Example:\n\n >>> from pydmd import HankelDMD\n >>> import numpy as np\n\n >>> dmd = HankelDMD(d=2)\n >>> a = np.array([[1, 2, 3, 4, 5]])\n >>> dmd._pseudo_hankel_matrix(a)\n array([[1, 2, 3, 4],\n [2, 3, 4, 5]])\n >>> dmd = HankelDMD(d=4)\n >>> dmd._pseudo_hankel_matrix(a)\n array([[1, 2],\n [2, 3],\n [3, 4],\n [4, 5]])\n\n >>> dmd = HankelDMD(d=2)\n >>> a = np.array([1,2,3,4,5,6]).reshape(2,3)\n >>> a\n array([[1, 2, 3],\n [4, 5, 6]])\n >>> dmd._pseudo_hankel_matrix(a)\n array([[1, 2],\n [4, 5],\n [2, 3],\n [5, 6]])\n \"\"\"\n return (\n swv(X.T, (self.d, X.shape[0]))[:, 0]\n .reshape(X.shape[1] - self.d + 1, -1)\n .T\n )\n\n @property\n def modes(self):\n return self._sub_dmd.modes\n\n @property\n def eigs(self):\n return self._sub_dmd.eigs\n\n @property\n def amplitudes(self):\n return self._sub_dmd.amplitudes\n\n @property\n def operator(self):\n return self._sub_dmd.operator\n\n @property\n def svd_rank(self):\n return self._sub_dmd.svd_rank\n\n @property\n def modes_activation_bitmask(self):\n return self._sub_dmd.modes_activation_bitmask\n\n @modes_activation_bitmask.setter\n def modes_activation_bitmask(self, value):\n self._sub_dmd.modes_activation_bitmask = value\n\n # due to how we implemented HankelDMD we need an alternative implementation\n # of __getitem__\n def __getitem__(self, key):\n \"\"\"\n Restrict the DMD modes used by this instance to a subset of indexes\n specified by keys. The value returned is a shallow copy of this DMD\n instance, with a different value in :func:`modes_activation_bitmask`.\n Therefore assignments to attributes are not reflected into the original\n instance.\n\n However the DMD instance returned should not be used for low-level\n manipulations on DMD modes, since the underlying DMD operator is shared\n with the original instance. For this reasons modifications to NumPy\n arrays may result in unwanted and unspecified situations which should\n be avoided in principle.\n\n :param key: An index (integer), slice or list of indexes.\n :type key: int or slice or list or np.ndarray\n :return: A shallow copy of this DMD instance having only a subset of\n DMD modes which are those indexed by `key`.\n :rtype: HankelDMD\n \"\"\"\n\n sub_dmd_copy = copy(self._sub_dmd)\n sub_dmd_copy._allocate_modes_bitmask_proxy()\n\n shallow_copy = copy(self)\n shallow_copy._sub_dmd = sub_dmd_copy\n return DMDBase.__getitem__(shallow_copy, key)\n\n def fit(self, X):\n \"\"\"\n Compute the Dynamic Modes Decomposition to the input data.\n\n :param X: the input snapshots.\n :type X: numpy.ndarray or iterable\n \"\"\"\n self._reset()\n\n self._snapshots_holder = Snapshots(X)\n\n n_samples = self.snapshots.shape[-1]\n if n_samples < self._d:\n msg = \"\"\"The number of snapshots provided is not enough for d={}.\nExpected at least d.\"\"\"\n raise ValueError(msg.format(self._d))\n\n ho_snapshots = Snapshots(\n self._pseudo_hankel_matrix(self.snapshots)\n ).snapshots\n self._sub_dmd.fit(ho_snapshots)\n\n # Default timesteps\n self._set_initial_time_dictionary(\n {\"t0\": 0, \"tend\": n_samples - 1, \"dt\": 1}\n )\n\n return self" }, { "identifier": "compute_svd", "path": "build/lib/weidmd/utils.py", "snippet": "def compute_svd(X, svd_rank=0):\n \"\"\"\n Truncated Singular Value Decomposition.\n\n :param numpy.ndarray X: the matrix to decompose.\n :param svd_rank: the rank for the truncation; If 0, the method computes\n the optimal rank and uses it for truncation; if positive interger,\n the method uses the argument for the truncation; if float between 0\n and 1, the rank is the number of the biggest singular values that\n are needed to reach the 'energy' specified by `svd_rank`; if -1,\n the method does not compute truncation. Default is 0.\n :type svd_rank: int or float\n :return: the truncated left-singular vectors matrix, the truncated\n singular values array, the truncated right-singular vectors matrix.\n :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray\n\n References:\n Gavish, Matan, and David L. Donoho, The optimal hard threshold for\n singular values is, IEEE Transactions on Information Theory 60.8\n (2014): 5040-5053.\n \"\"\"\n U, s, V = np.linalg.svd(X, full_matrices=False)\n V = V.conj().T\n\n def omega(x):\n return 0.56 * x**3 - 0.95 * x**2 + 1.82 * x + 1.43\n\n if svd_rank == 0:\n beta = np.divide(*sorted(X.shape))\n tau = np.median(s) * omega(beta)\n rank = np.sum(s > tau)\n if rank == 0:\n warnings.warn(\n \"SVD optimal rank is 0. The largest singular values are \"\n \"indistinguishable from noise. Setting rank truncation to 1.\",\n RuntimeWarning,\n )\n rank = 1\n elif 0 < svd_rank < 1:\n cumulative_energy = np.cumsum(s**2 / (s**2).sum())\n rank = np.searchsorted(cumulative_energy, svd_rank) + 1\n elif svd_rank >= 1 and isinstance(svd_rank, int):\n rank = min(svd_rank, U.shape[1])\n else:\n rank = X.shape[1]\n\n U = U[:, :rank]\n V = V[:, :rank]\n s = s[:rank]\n\n return U, s, V" }, { "identifier": "Snapshots", "path": "build/lib/weidmd/snapshots.py", "snippet": "class Snapshots:\n \"\"\"\n Utility class to preprocess snapshots shape for DMD.\n\n This class expects the time to be the last dimensions of the array.\n If a Python list is passed to the constructor, each element in the\n list is assumed to be a snapshot in time.\n\n Space dimensions are flattened (C-order) such that the\n matrix becomes 2D (time changes along the last axis).\n\n :param numpy.array | list(numpy.array) X: Training snapshots.\n \"\"\"\n\n def __init__(self, X):\n (\n self._snapshots,\n self._snapshots_shape,\n ) = Snapshots._unroll_space_dimensions(X)\n\n if self._snapshots.shape[-1] == 1:\n raise ValueError(\"Received only one time snapshot.\")\n\n Snapshots._check_condition_number(self._snapshots)\n\n logging.info(\n \"Snapshots: %s, snapshot shape: %s\",\n self._snapshots.shape,\n self._snapshots_shape,\n )\n\n @staticmethod\n def _unroll_space_dimensions(X):\n if hasattr(X, \"ndim\"):\n if X.ndim == 1:\n raise ValueError(\n \"Expected at least a 2D matrix (space x time).\"\n )\n snapshots = X.reshape((-1, X.shape[-1]))\n shapes = set((X.shape[:-1],))\n else:\n shapes, arrays = zip(\n *[(xarr.shape, xarr.flatten()) for xarr in map(np.asarray, X)]\n )\n\n shapes = set(shapes)\n if len(shapes) != 1:\n raise ValueError(\n f\"Snapshots must have the same size, found {len(shapes)}.\"\n )\n if len(next(iter(shapes))) == 0:\n raise ValueError(\"Expected at least a 2D matrix\")\n\n # move the time to the last axis\n snapshots = np.moveaxis(np.stack(arrays), 0, -1)\n\n return snapshots, shapes.pop()\n\n @staticmethod\n def _check_condition_number(X):\n cond_number = np.linalg.cond(X)\n if cond_number > 10e4:\n warnings.warn(\n f\"Input data condition number {cond_number}. \"\n \"\"\"Consider preprocessing data, passing in augmented data\nmatrix, or regularization methods.\"\"\"\n )\n\n @property\n def snapshots(self):\n \"\"\"\n Snapshots of the system (space flattened).\n \"\"\"\n return self._snapshots\n\n @property\n def snapshots_shape(self):\n \"\"\"\n Original (i.e. non-flattened) snapshot shape (time is ignored).\n \"\"\"\n return self._snapshots_shape" } ]
import warnings import numpy as np from .hankeldmd import HankelDMD from .utils import compute_svd from .snapshots import Snapshots
6,230
:param int tlsq_rank: rank truncation computing Total Least Square. Default is 0, that means no truncation. :param bool exact: flag to compute either exact DMD or projected DMD. Default is False. :param opt: argument to control the computation of DMD modes amplitudes. See :class:`DMDBase`. Default is False. :type opt: bool or int :param rescale_mode: Scale Atilde as shown in 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its eigendecomposition. None means no rescaling, 'auto' means automatic rescaling using singular values, otherwise the scaling factors. :type rescale_mode: {'auto'} or None or numpy.ndarray :param bool forward_backward: If True, the low-rank operator is computed like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is False. :param int d: the new order for spatial dimension of the input snapshots. Default is 1. :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary part to break ties) if `sorted_eigs='real'`. Default: False. :type sorted_eigs: {'real', 'abs'} or False :param reconstruction_method: Method used to reconstruct the snapshots of the dynamical system from the multiple versions available due to how HODMD is conceived. If `'first'` (default) the first version available is selected (i.e. the nearest to the 0-th row in the augmented matrix). If `'mean'` we compute the element-wise mean. If `reconstruction_method` is an array of float values we compute the weighted average (for each snapshots) using the given values as weights (the number of weights must be equal to `d`). :param svd_rank_extra: the rank for the initial reduction of the input data, performed before the rearrangement of the input data to the (pseudo) Hankel matrix format; If 0, the method computes the optimal rank and uses it for truncation; if positive interger, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float """ def __init__( self, svd_rank=0, tlsq_rank=0, exact=False, opt=False, rescale_mode=None, forward_backward=False, d=1, sorted_eigs=False, reconstruction_method="first", svd_rank_extra=0, ): super().__init__( svd_rank=svd_rank, tlsq_rank=tlsq_rank, exact=exact, opt=opt, rescale_mode=rescale_mode, forward_backward=forward_backward, d=d, sorted_eigs=sorted_eigs, reconstruction_method=reconstruction_method, ) self._svd_rank_extra = svd_rank_extra # TODO improve names self.U_extra = None def reconstructions_of_timeindex(self, timeindex=None): """ Build a collection of all the available versions of the given `timeindex`. The indexing of time instants is the same used for :func:`reconstructed_data`. For each time instant there are at least one and at most `d` versions. If `timeindex` is `None` the function returns the whole collection, for all the time instants. :param int timeindex: The index of the time snapshot. :return: A collection of all the available versions for the requested time instants, represented by a matrix (or tensor). Axes: 0. Number of time instants; 1. Copies of the snapshot; 2. Space dimension of the snapshot. The first axis is omitted if only one single time instant is selected, in this case the output becomes a 2D matrix. :rtype: numpy.ndarray """ snapshots = super().reconstructions_of_timeindex(timeindex) if snapshots.ndim == 2: # single time instant snapshots = self.U_extra.dot(snapshots.T).T elif snapshots.ndim == 3: # all time instants snapshots = np.array( [self.U_extra.dot(snapshot.T).T for snapshot in snapshots] ) else: raise RuntimeError return snapshots def fit(self, X): """ Compute the Dynamic Modes Decomposition to the input data. :param X: the input snapshots. :type X: numpy.ndarray or iterable """ snapshots_holder = Snapshots(X) snapshots = snapshots_holder.snapshots space_dim = snapshots.shape[0] if space_dim == 1: svd_rank_extra = -1 warnings.warn( ( f"The parameter 'svd_rank_extra={self._svd_rank_extra}' has " "been ignored because the given system is a scalar function" ) ) else: svd_rank_extra = self._svd_rank_extra
class HODMD(HankelDMD): """ Higher Order Dynamic Mode Decomposition :param svd_rank: the rank for the truncation; If 0, the method computes the optimal rank and uses it for truncation; if positive interger, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float :param int tlsq_rank: rank truncation computing Total Least Square. Default is 0, that means no truncation. :param bool exact: flag to compute either exact DMD or projected DMD. Default is False. :param opt: argument to control the computation of DMD modes amplitudes. See :class:`DMDBase`. Default is False. :type opt: bool or int :param rescale_mode: Scale Atilde as shown in 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its eigendecomposition. None means no rescaling, 'auto' means automatic rescaling using singular values, otherwise the scaling factors. :type rescale_mode: {'auto'} or None or numpy.ndarray :param bool forward_backward: If True, the low-rank operator is computed like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is False. :param int d: the new order for spatial dimension of the input snapshots. Default is 1. :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary part to break ties) if `sorted_eigs='real'`. Default: False. :type sorted_eigs: {'real', 'abs'} or False :param reconstruction_method: Method used to reconstruct the snapshots of the dynamical system from the multiple versions available due to how HODMD is conceived. If `'first'` (default) the first version available is selected (i.e. the nearest to the 0-th row in the augmented matrix). If `'mean'` we compute the element-wise mean. If `reconstruction_method` is an array of float values we compute the weighted average (for each snapshots) using the given values as weights (the number of weights must be equal to `d`). :param svd_rank_extra: the rank for the initial reduction of the input data, performed before the rearrangement of the input data to the (pseudo) Hankel matrix format; If 0, the method computes the optimal rank and uses it for truncation; if positive interger, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float """ def __init__( self, svd_rank=0, tlsq_rank=0, exact=False, opt=False, rescale_mode=None, forward_backward=False, d=1, sorted_eigs=False, reconstruction_method="first", svd_rank_extra=0, ): super().__init__( svd_rank=svd_rank, tlsq_rank=tlsq_rank, exact=exact, opt=opt, rescale_mode=rescale_mode, forward_backward=forward_backward, d=d, sorted_eigs=sorted_eigs, reconstruction_method=reconstruction_method, ) self._svd_rank_extra = svd_rank_extra # TODO improve names self.U_extra = None def reconstructions_of_timeindex(self, timeindex=None): """ Build a collection of all the available versions of the given `timeindex`. The indexing of time instants is the same used for :func:`reconstructed_data`. For each time instant there are at least one and at most `d` versions. If `timeindex` is `None` the function returns the whole collection, for all the time instants. :param int timeindex: The index of the time snapshot. :return: A collection of all the available versions for the requested time instants, represented by a matrix (or tensor). Axes: 0. Number of time instants; 1. Copies of the snapshot; 2. Space dimension of the snapshot. The first axis is omitted if only one single time instant is selected, in this case the output becomes a 2D matrix. :rtype: numpy.ndarray """ snapshots = super().reconstructions_of_timeindex(timeindex) if snapshots.ndim == 2: # single time instant snapshots = self.U_extra.dot(snapshots.T).T elif snapshots.ndim == 3: # all time instants snapshots = np.array( [self.U_extra.dot(snapshot.T).T for snapshot in snapshots] ) else: raise RuntimeError return snapshots def fit(self, X): """ Compute the Dynamic Modes Decomposition to the input data. :param X: the input snapshots. :type X: numpy.ndarray or iterable """ snapshots_holder = Snapshots(X) snapshots = snapshots_holder.snapshots space_dim = snapshots.shape[0] if space_dim == 1: svd_rank_extra = -1 warnings.warn( ( f"The parameter 'svd_rank_extra={self._svd_rank_extra}' has " "been ignored because the given system is a scalar function" ) ) else: svd_rank_extra = self._svd_rank_extra
self.U_extra, _, _ = compute_svd(snapshots, svd_rank_extra)
1
2023-10-30 12:37:40+00:00
8k
lewandofskee/DiAD
ldm/models/diffusion/ddim.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))\n if verbose:\n print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')\n print(f'For the chosen value of eta, which is {eta}, '\n f'this results in the following sigma_t schedule for ddim sampler {sigmas}')\n return sigmas, alphas, alphas_prev" }, { "identifier": "make_ddim_timesteps", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" } ]
import torch import numpy as np from tqdm import tqdm from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
4,046
assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [torch.cat([ unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))] else: c_in[k] = torch.cat([ unconditional_conditioning[k], c[k]]) elif isinstance(c, list): c_in = list() assert isinstance(unconditional_conditioning, list) for i in range(len(c)): c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) else: c_in = torch.cat([unconditional_conditioning, c]) model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) if self.model.parameterization == "v": e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) else: e_t = model_output if score_corrector is not None: assert self.model.parameterization == "eps", 'not implemented' e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 if self.model.parameterization != "v": pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() else: pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: raise NotImplementedError() # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc='Encoding Image'): t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) if unconditional_guidance_scale == 1.: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c))), 2) noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = alphas_next[i].sqrt() * ( (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred x_next = xt_weighted + weighted_noise_pred if return_intermediates and i % ( num_steps // return_intermediates) == 0 and i < num_steps - 1: intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) if callback: callback(i) out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} if return_intermediates: out.update({'intermediates': intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0)
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True,timesteps=1000): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta,verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, x_T=None, timesteps=1000, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, ucg_schedule=None, **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") elif isinstance(conditioning, list): for ctmp in conditioning: if ctmp.shape[0] != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose,timesteps=timesteps) # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DDIM sampling is {size}, eta {eta}') samples, intermediates = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, timesteps=timesteps, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ucg_schedule=ucg_schedule, ) return samples, intermediates @torch.no_grad() def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, ucg_schedule=None): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps elif timesteps is not None and not ddim_use_original_steps: subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) # subset_end = int(timesteps+1 * self.ddim_timesteps.shape[0] / self.ddpm_num_timesteps) timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] # print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? img = img_orig * mask + (1. - mask) * img if ucg_schedule is not None: assert len(ucg_schedule) == len(time_range) unconditional_guidance_scale = ucg_schedule[i] outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold) img, pred_x0 = outs if callback: callback(i) if img_callback: img_callback(pred_x0, i) if index % 500 == 0 or index == total_steps - 1: intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.: model_output = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [torch.cat([ unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))] else: c_in[k] = torch.cat([ unconditional_conditioning[k], c[k]]) elif isinstance(c, list): c_in = list() assert isinstance(unconditional_conditioning, list) for i in range(len(c)): c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) else: c_in = torch.cat([unconditional_conditioning, c]) model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) if self.model.parameterization == "v": e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) else: e_t = model_output if score_corrector is not None: assert self.model.parameterization == "eps", 'not implemented' e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 if self.model.parameterization != "v": pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() else: pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: raise NotImplementedError() # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc='Encoding Image'): t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) if unconditional_guidance_scale == 1.: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c))), 2) noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = alphas_next[i].sqrt() * ( (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred x_next = xt_weighted + weighted_noise_pred if return_intermediates and i % ( num_steps // return_intermediates) == 0 and i < num_steps - 1: intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) if callback: callback(i) out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} if return_intermediates: out.update({'intermediates': intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0)
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
3
2023-10-30 14:21:09+00:00
8k
nv-tlabs/trace
scripts/scene_editor.py
[ { "identifier": "set_global_trajdata_batch_env", "path": "tbsim/utils/trajdata_utils.py", "snippet": "def set_global_trajdata_batch_env(batch_env):\n global BATCH_ENV\n BATCH_ENV = batch_env.split('-')[0] # if split is specified, remove it" }, { "identifier": "set_global_trajdata_batch_raster_cfg", "path": "tbsim/utils/trajdata_utils.py", "snippet": "def set_global_trajdata_batch_raster_cfg(raster_cfg):\n global BATCH_RASTER_CFG\n assert \"include_hist\" in raster_cfg\n assert \"pixel_size\" in raster_cfg\n assert \"raster_size\" in raster_cfg\n assert \"ego_center\" in raster_cfg\n assert \"num_sem_layers\" in raster_cfg\n assert \"no_map_fill_value\" in raster_cfg\n assert \"drivable_layers\" in raster_cfg\n BATCH_RASTER_CFG = raster_cfg" }, { "identifier": "SceneEditingConfig", "path": "tbsim/configs/scene_edit_config.py", "snippet": "class SceneEditingConfig(EvaluationConfig):\n def __init__(self):\n super(SceneEditingConfig, self).__init__()\n #\n # The most relevant args from EvaluationConfig. For rest, see that file.\n #\n self.name = \"scene_edit_eval\"\n self.eval_class = \"Diffuser\" \n self.env = \"trajdata\" # only supported environment right now\n self.results_dir = \"scene_edit_eval/\"\n self.num_scenes_per_batch = 1\n\n # number of trajectories samples from the diffusion model\n self.policy.num_action_samples = 10\n\n # if True, computes guidance loss only after full denoising and only uses\n # to choose the action, not to get gradient to guide\n self.policy.guide_as_filter_only = False\n # if True, chooses the sample that's closest to GT at each planning step\n self.policy.guide_with_gt = False\n\n #\n # diffuser-only options\n #\n # if > 0.0 uses classifier-free guidance (mix of conditional and non-cond)\n # model at test time. Uses drop_fill value above.\n self.policy.class_free_guide_w = 0.0\n # whether to guide the predicted CLEAN or NOISY trajectory at each step\n self.policy.guide_clean = True # uses clean (\"reconstruction\") guidance if true\n\n self.metrics.compute_analytical_metrics = True\n\n self.trajdata.trajdata_cache_location = \"~/.unified_data_cache\"\n self.trajdata.trajdata_rebuild_cache = False\n # number of simulations to run in each scene\n # if > 1, each sim is running from a different starting point in the scene\n self.trajdata.num_sim_per_scene = 1\n\n #\n # NOTE: by default, the config for ORCA data is uncommented\n # comment this out and uncomment below for using ETH/UCY (eupeds) and nuscenes datasets instead\n\n #\n ## orca\n #\n self.trajdata.trajdata_source_test = [\"orca_maps-test\"]\n self.trajdata.trajdata_data_dirs = {\n \"orca_maps\" : \"./datasets/orca_sim\",\n \"orca_no_maps\" : \"./datasets/orca_sim\",\n }\n self.trajdata.num_scenes_to_evaluate = 100\n self.trajdata.eval_scenes = np.arange(100).tolist()\n self.trajdata.n_step_action = 50\n self.trajdata.num_simulation_steps = 50\n self.trajdata.skip_first_n = 0\n\n \n # ## eupeds\n # #\n # # self.trajdata.trajdata_source_test = [\"eupeds_eth-test_loo\"]\n # self.trajdata.trajdata_source_test = [\"eupeds_eth-val\", \n # \"eupeds_hotel-val\",\n # \"eupeds_univ-val\",\n # \"eupeds_zara1-val\",\n # \"eupeds_zara2-val\"]\n # self.trajdata.trajdata_data_dirs = {\n # \"eupeds_eth\" : \"./datasets/eth_ucy\", \n # \"eupeds_hotel\" : \"./datasets/eth_ucy\",\n # \"eupeds_univ\" : \"./datasets/eth_ucy\",\n # \"eupeds_zara1\" : \"./datasets/eth_ucy\",\n # \"eupeds_zara2\" : \"./datasets/eth_ucy\"\n # }\n # self.trajdata.num_scenes_to_evaluate = 6\n # self.trajdata.eval_scenes = np.arange(6).tolist()\n # self.trajdata.n_step_action = 50\n # self.trajdata.num_simulation_steps = 50\n # self.trajdata.skip_first_n = 0\n # self.trajdata.num_sim_per_scene = 20\n\n \n # ## nusc\n # #\n # self.trajdata.trajdata_source_test = [\"nusc_trainval-val\"]\n # self.trajdata.trajdata_data_dirs = {\n # \"nusc_trainval\" : \"./datasets/nuscenes\",\n # }\n # # 118 val scenes contain pedestrians\n # self.trajdata.eval_scenes = np.arange(118).tolist()\n # self.trajdata.num_scenes_to_evaluate = len(self.trajdata.eval_scenes)\n # self.trajdata.n_step_action = 10\n # self.trajdata.num_simulation_steps = 100\n # self.trajdata.skip_first_n = 0\n\n\n self.edits.editing_source = ['heuristic'] # [config, heuristic, None]\n # self.edits.editing_source = [None] # [config, heuristic, None]\n self.edits.guidance_config = []\n\n # \n # NOTE: Just an example for ORCA data, see configs for other ways to set this\n #\n self.edits.heuristic_config = [\n {\n 'name' : 'agent_collision',\n 'weight' : 1000.0,\n 'params' : {\n 'num_disks' : 1, # to approximate agents\n 'buffer_dist' : 0.2, # extra social distance\n }\n },\n {\n 'name' : 'map_collision',\n 'weight' : 10.0,\n 'params' : {\n 'num_points_lw' : (10, 10),\n }\n },\n {\n 'name' : 'target_pos_at_time',\n 'weight' : 30000.0,\n 'params' : {\n 'target_time' : 40\n },\n }\n ]\n\n def clone(self):\n return deepcopy(self)" }, { "identifier": "guided_rollout", "path": "tbsim/utils/scene_edit_utils.py", "snippet": "def guided_rollout(\n env,\n policy,\n policy_model,\n n_step_action=1,\n guidance_config=None,\n scene_indices=None,\n device=None,\n obs_to_torch=True,\n horizon=None,\n use_gt=False,\n start_frames=None,\n):\n \"\"\"\n Rollout an environment.\n Args:\n env (BaseEnv): a base simulation environment (gym-like)\n policy (RolloutWrapper): a policy that controls agents in the environment\n policy_model (LightningModule): the traffic model underlying the policy with set_guidance implemented.\n n_step_action (int): number of steps to take between querying models\n guidance_config: which guidance functions to use\n scene_indices (tuple, list): (Optional) scenes indices to rollout with\n device: device to cast observation to\n obs_to_torch: whether to cast observation to torch\n horizon (int): (Optional) override horizon of the simulation\n use_gt (bool) : whether the given policy is returning GT or not.\n start_frames (list) : (Optional) a list of starting frame index for each scene index.\n\n Returns:\n stats (dict): A dictionary of rollout stats for each episode (metrics, rewards, etc.)\n info (dict): A dictionary of environment info for each episode\n \"\"\"\n stats = {}\n info = {}\n is_batched_env = isinstance(env, BatchedEnv)\n\n # set up guidance and associated metrics\n added_metrics = [] # save for removal later\n if guidance_config is not None:\n # reset so that we can get an example batch to initialize guidance more efficiently\n env.reset(scene_indices=scene_indices, start_frame_index=start_frames)\n ex_obs = env.get_observation()\n if obs_to_torch:\n device = policy.device if device is None else device\n ex_obs = TensorUtils.to_torch(ex_obs, device=device, ignore_if_unspecified=True)\n if not use_gt:\n policy_model.set_guidance(guidance_config, ex_obs['agents'])\n guidance_metrics = guidance_metrics_from_config(guidance_config)\n env._metrics.update(guidance_metrics) \n added_metrics += guidance_metrics.keys()\n\n # metrics are reset here too, so have to run again after adding new metrics\n env.reset(scene_indices=scene_indices, start_frame_index=start_frames)\n\n done = env.is_done()\n counter = 0\n while not done:\n obs = env.get_observation()\n if obs_to_torch:\n device = policy.device if device is None else device\n obs_torch = TensorUtils.to_torch(obs, device=device, ignore_if_unspecified=True)\n else:\n obs_torch = obs\n action = policy.get_action(obs_torch, step_index=counter)\n\n env.step(action, num_steps_to_take=n_step_action, render=False) \n counter += n_step_action\n\n done = env.is_done()\n \n if horizon is not None and counter >= horizon:\n break\n\n metrics = env.get_metrics()\n\n for k, v in metrics.items():\n if k not in stats:\n stats[k] = []\n if is_batched_env: # concatenate by scene\n stats[k] = np.concatenate([stats[k], v], axis=0)\n else:\n stats[k].append(v)\n\n # remove all temporary added metrics\n for met_name in added_metrics:\n env._metrics.pop(met_name)\n\n if not use_gt:\n # and undo guidance setting\n policy_model.clear_guidance()\n\n env_info = env.get_info()\n for k, v in env_info.items():\n if k not in info:\n info[k] = []\n if is_batched_env:\n info[k].extend(v)\n else:\n info[k].append(v)\n\n env.reset_multi_episodes_metrics()\n\n return stats, info" }, { "identifier": "compute_heuristic_guidance", "path": "tbsim/utils/scene_edit_utils.py", "snippet": "def compute_heuristic_guidance(heuristic_config, env, scene_indices, start_frames):\n '''\n Creates guidance configs for each scene based on the given configuration.\n '''\n env.reset(scene_indices=scene_indices, start_frame_index=start_frames)\n heuristic_guidance_cfg = []\n for i, si in enumerate(scene_indices):\n scene_guidance = []\n cur_scene = env._current_scenes[i]\n dt = cur_scene.dataset.desired_dt\n for cur_heur in heuristic_config:\n assert set(('name', 'weight', 'params')).issubset(cur_heur.keys()), \"All heuristics must have these 3 fields\"\n assert cur_heur['name'] in HEURISTIC_FUNC, \"Unrecognized heuristic!\"\n dt = cur_heur['params'].pop('dt', dt) # some already include dt, don't want to duplicate\n cur_guidance = HEURISTIC_FUNC[cur_heur['name']](cur_scene, dt, **cur_heur['params'])\n if cur_guidance is not None:\n if not isinstance(cur_guidance, list):\n cur_guidance = [cur_guidance]\n for guide_el in cur_guidance:\n guide_el['weight'] = cur_heur['weight']\n scene_guidance.append(guide_el)\n heuristic_guidance_cfg.append(scene_guidance)\n\n return heuristic_guidance_cfg" }, { "identifier": "merge_guidance_configs", "path": "tbsim/utils/scene_edit_utils.py", "snippet": "def merge_guidance_configs(cfg1, cfg2):\n if cfg1 is None or len(cfg1) == 0:\n return cfg2\n if cfg2 is None or len(cfg2) == 0:\n return cfg1\n merge_cfg = deepcopy(cfg1)\n num_scenes = len(merge_cfg)\n for si in range(num_scenes):\n merge_cfg[si].extend(cfg2[si])\n return merge_cfg" }, { "identifier": "EnvUnifiedBuilder", "path": "tbsim/evaluation/env_builders.py", "snippet": "class EnvUnifiedBuilder(EnvironmentBuilder):\n def get_env(self):\n exp_cfg = self.exp_cfg.clone()\n exp_cfg.unlock()\n exp_cfg.env.simulation.num_simulation_steps = self.eval_cfg.num_simulation_steps\n exp_cfg.env.simulation.start_frame_index = exp_cfg.algo.history_num_frames + 1\n exp_cfg.lock()\n\n # the config used at training time\n data_cfg = translate_pass_trajdata_cfg(exp_cfg)\n\n future_sec = data_cfg.future_num_frames * data_cfg.step_time\n history_sec = data_cfg.history_num_frames * data_cfg.step_time\n neighbor_distance = data_cfg.max_agents_distance\n agent_only_types = [TRAJDATA_AGENT_TYPE_MAP[cur_type] for cur_type in data_cfg.trajdata_only_types]\n agent_predict_types = None\n if data_cfg.trajdata_predict_types is not None:\n agent_predict_types = [TRAJDATA_AGENT_TYPE_MAP[cur_type] for cur_type in data_cfg.trajdata_predict_types]\n\n kwargs = dict(\n cache_location=self.eval_cfg.trajdata_cache_location,\n desired_data=self.eval_cfg.trajdata_source_test,\n desired_dt=data_cfg.step_time,\n future_sec=(future_sec, future_sec),\n history_sec=(history_sec, history_sec),\n data_dirs=self.eval_cfg.trajdata_data_dirs,\n only_types=agent_only_types,\n only_predict=agent_predict_types,\n agent_interaction_distances=defaultdict(lambda: neighbor_distance),\n incl_map=data_cfg.trajdata_incl_map,\n map_params={\n \"px_per_m\": int(1 / data_cfg.pixel_size),\n \"map_size_px\": data_cfg.raster_size,\n \"return_rgb\": False,\n \"offset_frac_xy\": data_cfg.raster_center,\n \"no_map_fill_value\": data_cfg.no_map_fill_value,\n },\n centric=data_cfg.trajdata_centric,\n scene_description_contains=data_cfg.trajdata_scene_desc_contains,\n standardize_data=data_cfg.trajdata_standardize_data,\n verbose=True,\n num_workers=0, #os.cpu_count(),\n rebuild_cache=self.eval_cfg.trajdata_rebuild_cache,\n rebuild_maps=self.eval_cfg.trajdata_rebuild_cache,\n )\n\n env_dataset = UnifiedDataset(**kwargs)\n\n metrics = dict()\n if self.eval_cfg.metrics.compute_analytical_metrics:\n metrics.update(self._get_analytical_metrics())\n\n # if we don't have a map, can't compute map-based metrics\n if not data_cfg.trajdata_incl_map:\n metrics.pop(\"all_off_road_rate\", None)\n metrics.pop(\"all_sem_layer_rate\", None)\n metrics.pop(\"all_failure\", None)\n\n env = EnvUnifiedSimulation(\n exp_cfg.env,\n dataset=env_dataset,\n seed=self.eval_cfg.seed,\n num_scenes=self.eval_cfg.num_scenes_per_batch,\n prediction_only=False,\n metrics=metrics\n )\n\n return env" }, { "identifier": "RolloutWrapper", "path": "tbsim/policies/wrappers.py", "snippet": "class RolloutWrapper(object):\n \"\"\"A wrapper policy that can (optionally) control both ego and other agents in a scene\"\"\"\n\n def __init__(self, ego_policy=None, agents_policy=None, pass_agent_obs=True):\n self.device = ego_policy.device if agents_policy is None else agents_policy.device\n self.ego_policy = ego_policy\n self.agents_policy = agents_policy\n self.pass_agent_obs = pass_agent_obs\n\n def eval(self):\n self.ego_policy.eval()\n self.agents_policy.eval()\n\n def get_action(self, obs, step_index) -> RolloutAction:\n ego_action = None\n ego_action_info = None\n agents_action = None\n agents_action_info = None\n if self.ego_policy is not None:\n assert obs[\"ego\"] is not None\n with torch.no_grad():\n if self.pass_agent_obs:\n ego_action, ego_action_info = self.ego_policy.get_action(\n obs[\"ego\"], step_index = step_index,agent_obs = obs[\"agents\"])\n else:\n ego_action, ego_action_info = self.ego_policy.get_action(\n obs[\"ego\"], step_index = step_index)\n if self.agents_policy is not None:\n assert obs[\"agents\"] is not None\n with torch.no_grad():\n agents_action, agents_action_info = self.agents_policy.get_action(\n obs[\"agents\"], step_index = step_index)\n return RolloutAction(ego_action, ego_action_info, agents_action, agents_action_info)" }, { "identifier": "map_ndarray", "path": "tbsim/utils/tensor_utils.py", "snippet": "def map_ndarray(x, func):\n \"\"\"\n Apply function @func to np.ndarray objects in a nested dictionary or\n list or tuple.\n\n Args:\n x (dict or list or tuple): a possibly nested dictionary or list or tuple\n func (function): function to apply to each array\n\n Returns:\n y (dict or list or tuple): new nested dict-list-tuple\n \"\"\"\n return recursive_dict_list_tuple_apply(\n x,\n {\n np.ndarray: func,\n str: lambda x:x,\n type(None): lambda x: x,\n },\n )" } ]
import argparse import numpy as np import json import random import importlib import os import torch import h5py from pprint import pprint from tbsim.utils.trajdata_utils import set_global_trajdata_batch_env, set_global_trajdata_batch_raster_cfg from tbsim.configs.scene_edit_config import SceneEditingConfig from tbsim.utils.scene_edit_utils import guided_rollout, compute_heuristic_guidance, merge_guidance_configs from tbsim.evaluation.env_builders import EnvUnifiedBuilder from tbsim.policies.wrappers import RolloutWrapper from tbsim.utils.tensor_utils import map_ndarray from tbsim.policies.hardcoded import GTNaNPolicy from tbsim.utils.viz_utils import get_trajdata_renderer from tbsim.utils.viz_utils import visualize_guided_rollout
4,780
"""A script for evaluating closed-loop simulation""" def run_scene_editor(eval_cfg, save_cfg, data_to_disk, render_to_video, render_to_img, render_cfg, use_gt=False): # assumes all used trajdata datasets use share same map layers set_global_trajdata_batch_env(eval_cfg.trajdata_source_test[0]) print(eval_cfg) # for reproducibility np.random.seed(eval_cfg.seed) random.seed(eval_cfg.seed) torch.manual_seed(eval_cfg.seed) torch.cuda.manual_seed(eval_cfg.seed) # basic setup print('saving results to {}'.format(eval_cfg.results_dir)) os.makedirs(eval_cfg.results_dir, exist_ok=True) if render_to_video: os.makedirs(os.path.join(eval_cfg.results_dir, "videos/"), exist_ok=True) if render_to_video or render_to_img: os.makedirs(os.path.join(eval_cfg.results_dir, "viz/"), exist_ok=True) if save_cfg: json.dump(eval_cfg, open(os.path.join(eval_cfg.results_dir, "config.json"), "w+")) if data_to_disk and os.path.exists(eval_cfg.experience_hdf5_path): os.remove(eval_cfg.experience_hdf5_path) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # create policy and rollout wrapper policy_composers = importlib.import_module("tbsim.evaluation.policy_composers") composer_class = getattr(policy_composers, eval_cfg.eval_class) composer = composer_class(eval_cfg, device) policy, exp_config = composer.get_policy() policy_model = policy.model if use_gt: # overwrite policy with dummy that always returns GT policy = GTNaNPolicy(device=device) policy_model = None print('WARNING: Using GT data as the policy instead of the provided model!!') # determines cfg for rasterizing agents
"""A script for evaluating closed-loop simulation""" def run_scene_editor(eval_cfg, save_cfg, data_to_disk, render_to_video, render_to_img, render_cfg, use_gt=False): # assumes all used trajdata datasets use share same map layers set_global_trajdata_batch_env(eval_cfg.trajdata_source_test[0]) print(eval_cfg) # for reproducibility np.random.seed(eval_cfg.seed) random.seed(eval_cfg.seed) torch.manual_seed(eval_cfg.seed) torch.cuda.manual_seed(eval_cfg.seed) # basic setup print('saving results to {}'.format(eval_cfg.results_dir)) os.makedirs(eval_cfg.results_dir, exist_ok=True) if render_to_video: os.makedirs(os.path.join(eval_cfg.results_dir, "videos/"), exist_ok=True) if render_to_video or render_to_img: os.makedirs(os.path.join(eval_cfg.results_dir, "viz/"), exist_ok=True) if save_cfg: json.dump(eval_cfg, open(os.path.join(eval_cfg.results_dir, "config.json"), "w+")) if data_to_disk and os.path.exists(eval_cfg.experience_hdf5_path): os.remove(eval_cfg.experience_hdf5_path) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # create policy and rollout wrapper policy_composers = importlib.import_module("tbsim.evaluation.policy_composers") composer_class = getattr(policy_composers, eval_cfg.eval_class) composer = composer_class(eval_cfg, device) policy, exp_config = composer.get_policy() policy_model = policy.model if use_gt: # overwrite policy with dummy that always returns GT policy = GTNaNPolicy(device=device) policy_model = None print('WARNING: Using GT data as the policy instead of the provided model!!') # determines cfg for rasterizing agents
set_global_trajdata_batch_raster_cfg(exp_config.env.rasterizer)
1
2023-10-31 18:43:07+00:00
8k
AetherBlack/abuseACL
abuseACL/network/LDAP.py
[ { "identifier": "sAMAccountType", "path": "abuseACL/structures/sAMAccountType.py", "snippet": "class sAMAccountType:\n\n SAM_DOMAIN_OBJECT = 0x0\n SAM_GROUP_OBJECT = 0x10000000\n SAM_NON_SECURITY_GROUP_OBJECT = 0x10000001\n SAM_ALIAS_OBJECT = 0x20000000\n SAM_NON_SECURITY_ALIAS_OBJECT = 0x20000001\n SAM_USER_OBJECT = 0x30000000\n SAM_NORMAL_USER_ACCOUNT = 0x30000000\n SAM_MACHINE_ACCOUNT = 0x30000001\n SAM_TRUST_ACCOUNT = 0x30000002\n SAM_APP_BASIC_GROUP = 0x40000000\n SAM_APP_QUERY_GROUP = 0x40000001\n SAM_ACCOUNT_TYPE_MAX = 0x7fffffff" }, { "identifier": "Credentials", "path": "abuseACL/structures/Credentials.py", "snippet": "class Credentials:\n\n def __init__(self, username: str, password: str, domain: str, ntlmhash: str, aesKey: str, doKerberos: bool) -> None:\n self.username = username\n self.password = password\n self.domain = domain\n self.ntlmhash = ntlmhash\n self.aesKey = aesKey\n self.doKerberos = doKerberos\n\n def getAuthenticationSecret(self) -> str:\n return self.password or self.ntlmhash" }, { "identifier": "Target", "path": "abuseACL/structures/Target.py", "snippet": "class Target:\n\n tlsv1_2: bool = None\n tlsv1: bool = None\n\n def __init__(self, remote: str, port: int) -> None:\n self.remote = remote\n self.port = port\n\n def use_tls(self) -> bool:\n return self.tlsv1_2 or self.tlsv1" }, { "identifier": "ADCertificateTemplate", "path": "abuseACL/structures/ADObject/ADCertificateTemplate.py", "snippet": "class ADCertificateTemplate(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()" }, { "identifier": "ADAdminSDHolder", "path": "abuseACL/structures/ADObject/ADAdminSDHolder.py", "snippet": "class ADAdminSDHolder(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()" }, { "identifier": "ADComputer", "path": "abuseACL/structures/ADObject/ADComputer.py", "snippet": "class ADComputer(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes, userAccountControl: int) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n self.userAccountControl = int(userAccountControl[0].decode())\n\n self.isUserEnable = self.userAccountControl & 0x0002\n\n @staticmethod\n def getComputerSid(computers: list, computername: str) -> str:\n for computer in computers:\n computer: ADComputer\n\n if computer.sAMAccountName == computername:\n return computer.objectSid\n\n return None" }, { "identifier": "ADSchema", "path": "abuseACL/structures/ADObject/ADSchema.py", "snippet": "class ADSchema(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()" }, { "identifier": "ADGroup", "path": "abuseACL/structures/ADObject/ADGroup.py", "snippet": "class ADGroup(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n @staticmethod\n def getGroupSid(groups: list, groupname: str) -> str:\n for group in groups:\n group: ADGroup\n\n if group.sAMAccountName == groupname:\n return group.objectSid\n\n return None" }, { "identifier": "ADUser", "path": "abuseACL/structures/ADObject/ADUser.py", "snippet": "class ADUser(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, userPrincipalName: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes, userAccountControl: int) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.userPrincipalName = userPrincipalName[0].decode() if len(userPrincipalName) else userPrincipalName\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n self.userAccountControl = int(userAccountControl[0].decode())\n\n self.isUserEnable = self.userAccountControl & 0x0002\n\n @staticmethod\n def getUserSid(users: list, username: str) -> str:\n for user in users:\n user: ADUser\n\n if user.sAMAccountName == username:\n return user.objectSid\n\n return None" }, { "identifier": "ADgMSA", "path": "abuseACL/structures/ADObject/ADgMSA.py", "snippet": "class ADgMSA(ADObject):\n\n def __init__(self, distinguishedName: str, sAMAccountName: str, objectSid: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.sAMAccountName = sAMAccountName[0].decode().lower()\n self.objectSid = self.convertSid(objectSid[0])\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n @staticmethod\n def getgMSASid(gMSAs: list, principal: str) -> str:\n for gMSA in gMSAs:\n gMSA: ADgMSA\n\n print(gMSA.sAMAccountName, principal)\n\n if gMSA.sAMAccountName == principal:\n return gMSA.objectSid\n\n return None" }, { "identifier": "ADGPO", "path": "abuseACL/structures/ADObject/ADGPO.py", "snippet": "class ADGPO(ADObject):\n\n def __init__(self, distinguishedName: str, displayName: str, gPCFileSysPath: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.displayName = displayName[0].decode()\n self.gPCFileSysPath = gPCFileSysPath[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.displayName\n self.objectSid = str()" }, { "identifier": "ADOU", "path": "abuseACL/structures/ADObject/ADOU.py", "snippet": "class ADOU(ADObject):\n\n def __init__(self, distinguishedName: str, name: str, nTSecurityDescriptor: bytes) -> None:\n self.distinguishedName = distinguishedName[0].decode()\n self.name = name[0].decode()\n self.nTSecurityDescriptor = self.parseSecurityDescriptor(nTSecurityDescriptor[0])\n\n self.sAMAccountName = self.name\n self.objectSid = str()" }, { "identifier": "Kerberos", "path": "abuseACL/network/Kerberos.py", "snippet": "class Kerberos:\n\n @staticmethod\n def kerberosLogin(target: str, user: str, password: str, domain: str = \"\", hashes: str = \"\", aesKey: str = \"\",\n kdcHost: str = None, TGT=None, TGS=None, useCache: bool = False):\n\n if len(hashes):\n lmhash, nthash = hashes.split(\":\")\n\n if len(lmhash) % 2:\n lmhash = \"0\" + lmhash\n if len(nthash) % 2:\n nthash = \"0\" + nthash\n\n lmhash = bytes.fromhex(lmhash)\n nthash = bytes.fromhex(nthash)\n else:\n lmhash, nthash = \"\", \"\"\n\n if TGT is None or TGS is None:\n useCache = True\n\n targetName = \"ldap/%s\" % target\n if useCache:\n domain, user, TGT, TGS = CCache.parseFile(domain, user, targetName)\n\n # First of all, we need to get a TGT for the user\n userName = Principal(user, type=constants.PrincipalNameType.NT_PRINCIPAL.value)\n\n if TGT is None and TGS is None:\n tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, password, domain, lmhash, nthash,\n aesKey, kdcHost)\n else:\n tgt = TGT['KDC_REP']\n cipher = TGT['cipher']\n sessionKey = TGT['sessionKey']\n\n if TGS is None:\n serverName = Principal(targetName, type=constants.PrincipalNameType.NT_SRV_INST.value)\n tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, domain, kdcHost, tgt, cipher,\n sessionKey)\n else:\n tgs = TGS['KDC_REP']\n cipher = TGS['cipher']\n sessionKey = TGS['sessionKey']\n\n # Let's build a NegTokenInit with a Kerberos REQ_AP\n\n blob = SPNEGO_NegTokenInit()\n\n # Kerberos\n blob['MechTypes'] = [TypesMech['MS KRB5 - Microsoft Kerberos 5']]\n\n # Let's extract the ticket from the TGS\n tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0]\n ticket = Ticket()\n ticket.from_asn1(tgs['ticket'])\n\n # Now let's build the AP_REQ\n apReq = AP_REQ()\n apReq['pvno'] = 5\n apReq['msg-type'] = int(constants.ApplicationTagNumbers.AP_REQ.value)\n\n opts = []\n apReq['ap-options'] = constants.encodeFlags(opts)\n seq_set(apReq, 'ticket', ticket.to_asn1)\n\n authenticator = Authenticator()\n authenticator['authenticator-vno'] = 5\n authenticator['crealm'] = domain\n seq_set(authenticator, 'cname', userName.components_to_asn1)\n now = datetime.datetime.utcnow()\n\n authenticator['cusec'] = now.microsecond\n authenticator['ctime'] = KerberosTime.to_asn1(now)\n\n encodedAuthenticator = encoder.encode(authenticator)\n\n # Key Usage 11\n # AP-REQ Authenticator (includes application authenticator\n # subkey), encrypted with the application session key\n # (Section 5.5.1)\n encryptedEncodedAuthenticator = cipher.encrypt(sessionKey, 11, encodedAuthenticator, None)\n\n apReq['authenticator'] = noValue\n apReq['authenticator']['etype'] = cipher.enctype\n apReq['authenticator']['cipher'] = encryptedEncodedAuthenticator\n\n blob['MechToken'] = encoder.encode(apReq)\n\n return blob" }, { "identifier": "Logger", "path": "abuseACL/core/Logger.py", "snippet": "class Logger:\n\n def __init__(self, debug: bool, timestamp: bool) -> None:\n self.__debug = debug\n self.__timestamp = timestamp\n\n def __toStdout(self, color: str, title: str, msg: str) -> None:\n timestamp = str()\n\n if self.__timestamp:\n timestamp = time.strftime(\"[%Y/%m/%d %H:%M:%S] \")\n\n print(\"%s%s[%s] %s%s\" % (color, timestamp, title, msg, Style.RESET_ALL))\n\n def debug(self, msg: str) -> None:\n if self.__debug:\n self.__toStdout(Fore.BLUE, \"i\", msg)\n\n def error(self, msg: str) -> None:\n self.__toStdout(Fore.RED, \"!\", msg)\n\n def vuln(self, msg: str) -> None:\n self.__toStdout(Fore.GREEN, \"*\", msg)" } ]
from typing import List from abuseACL.structures.sAMAccountType import sAMAccountType from abuseACL.structures.Credentials import Credentials from abuseACL.structures.Target import Target from abuseACL.structures.ADObject.ADCertificateTemplate import ADCertificateTemplate from abuseACL.structures.ADObject.ADAdminSDHolder import ADAdminSDHolder from abuseACL.structures.ADObject.ADComputer import ADComputer from abuseACL.structures.ADObject.ADSchema import ADSchema from abuseACL.structures.ADObject.ADGroup import ADGroup from abuseACL.structures.ADObject.ADUser import ADUser from abuseACL.structures.ADObject.ADgMSA import ADgMSA from abuseACL.structures.ADObject.ADGPO import ADGPO from abuseACL.structures.ADObject.ADOU import ADOU from abuseACL.network.Kerberos import Kerberos from abuseACL.core.Logger import Logger import ssl as tls import ldap3
5,714
["namingContexts"] ) self.namingContexts = response[0]["attributes"]["namingContexts"] self.defaultNamingContext = self.namingContexts[0] self.configurationNamingContext = self.namingContexts[1] self.schemaNamingContext = self.namingContexts[2] self.domainDnsZonesNamingContext = self.namingContexts[3] self.forestDnsZonesNamingContext = self.namingContexts[4] def getAllUsers(self) -> List[ADUser]: if len(self.users): return self.users response = self.search( self.defaultNamingContext, "(sAMAccountType=%d)" % (sAMAccountType.SAM_NORMAL_USER_ACCOUNT), ldap3.SUBTREE, ["DistinguishedName", "name", "userPrincipalName", "sAMAccountName", "objectSid", "ntSecurityDescriptor", "userAccountControl"] ) self.users = self.__createArrayOfObject(response, ADUser) return self.users def getAllGroups(self) -> List[ADGroup]: if len(self.groups): return self.groups response = self.search( self.defaultNamingContext, "(|(sAMAccountType=%d)(sAMAccountType=%d)(sAMAccountType=%d)(sAMAccountType=%d))" % ( sAMAccountType.SAM_GROUP_OBJECT, sAMAccountType.SAM_NON_SECURITY_GROUP_OBJECT, sAMAccountType.SAM_ALIAS_OBJECT, sAMAccountType.SAM_NON_SECURITY_ALIAS_OBJECT ), ldap3.SUBTREE, ["DistinguishedName", "name", "sAMAccountName", "objectSid", "ntSecurityDescriptor"] ) self.groups = self.__createArrayOfObject(response, ADGroup) return self.groups def getAllComputers(self) -> List[ADComputer]: if len(self.computers): return self.computers response = self.search( self.defaultNamingContext, "(sAMAccountType=%d)" % (sAMAccountType.SAM_MACHINE_ACCOUNT), ldap3.SUBTREE, ["DistinguishedName", "name", "sAMAccountName", "objectSid", "ntSecurityDescriptor", "userAccountControl"] ) self.computers = self.__createArrayOfObject(response, ADComputer) return self.computers def getAllCertificatesTemplates(self) -> List[ADCertificateTemplate]: if len(self.certificatesTemplates): return self.certificatesTemplates response = self.search( f"CN=Certificate Templates,CN=Public Key Services,CN=Services,{self.configurationNamingContext}", "(objectClass=pkiCertificateTemplate)", ldap3.SUBTREE, ["DistinguishedName", "name", "ntSecurityDescriptor"] ) self.certificatesTemplates = self.__createArrayOfObject(response, ADCertificateTemplate) return self.certificatesTemplates def getAllGPOs(self) -> List[ADGPO]: if len(self.gpos): return self.gpos response = self.search( f"CN=Policies,CN=System,{self.defaultNamingContext}", "(objectClass=groupPolicyContainer)", ldap3.SUBTREE, ["DistinguishedName", "displayName", "gPCFileSysPath", "ntSecurityDescriptor"] ) self.gpos = self.__createArrayOfObject(response, ADGPO) return self.gpos def getAllOUs(self) -> List[ADGPO]: if len(self.ous): return self.ous response = self.search( self.defaultNamingContext, "(objectClass=organizationalUnit)", ldap3.SUBTREE, ["DistinguishedName", "name", "ntSecurityDescriptor"] ) self.ous = self.__createArrayOfObject(response, ADOU) return self.ous def getAdminSDHolder(self) -> List[ADAdminSDHolder]: if len(self.adminSDHolder): return self.adminSDHolder response = self.search( f"CN=AdminSDHolder,CN=System,{self.defaultNamingContext}", "(cn=AdminSDHolder)", ldap3.BASE, ["DistinguishedName", "name", "ntSecurityDescriptor"] ) self.adminSDHolder = self.__createArrayOfObject(response, ADAdminSDHolder) return self.adminSDHolder
class LDAP: users = list() groups = list() computers = list() certificatesTemplates = list() gpos = list() ous = list() adminSDHolder = list() schema = list() gMSA = list() def __init__(self, forest: str, target: Target, credentials: Credentials, logger: Logger) -> None: self.target = target self.credentials = credentials self.logger = logger self.__getPort() self.__checkAuthentication() def __getPort(self) -> None: if self.target.port: return self.target.port, self.target.tlsv1_2 = self.__tryLDAPS(tls.PROTOCOL_TLSv1_2, self.target.port) if self.target.tlsv1_2 is None: self.target.port, self.target.tlsv1 = self.__tryLDAPS(tls.PROTOCOL_TLSv1, self.target.port) if self.target.tlsv1 is None: self.target.port = self.__tryLDAP(self.target.port) if self.target.port is None: self.logger.error(f"Impossible to communicate with the target {self.target.remote} !") exit(1) def __checkAuthentication(self) -> None: self.logger.debug("Trying to connect to %s:%d" % (self.target.remote, self.target.port)) self.__Authentication() try: self.getNamingContexts() except IndexError: self.logger.error("Invalid credentials !") exit(1) self.logger.debug("Authentication success !") def __Authentication(self) -> ldap3.Connection: user = "%s\\%s" % (self.credentials.domain, self.credentials.username) ldapTls = None if self.target.tlsv1_2: ldapTls = ldap3.Tls(validate=tls.CERT_NONE, version=tls.PROTOCOL_TLSv1_2, ciphers='ALL:@SECLEVEL=0') elif self.target.tlsv1: ldapTls = ldap3.Tls(validate=tls.CERT_NONE, version=tls.PROTOCOL_TLSv1, ciphers='ALL:@SECLEVEL=0') ldapServer = ldap3.Server(self.target.remote, use_ssl=self.target.use_tls(), port=self.target.port, get_info=ldap3.ALL, tls=ldapTls) if self.credentials.doKerberos: ldapConn = ldap3.Connection(ldapServer) ldapConn = self.kerberosAuthentication(ldapConn) else: ldapConn = ldap3.Connection(ldapServer, user=user, password=self.credentials.getAuthenticationSecret(), authentication=ldap3.NTLM) ldapConn.bind() if ldapConn.result["description"] == "invalidCredentials": self.logger.error("Invalid credentials !") exit(1) return ldapConn def __tryLDAPS(self, proto: tls._SSLMethod, port: int) -> int: port = port or 636 ldapTls = ldap3.Tls(validate=tls.CERT_NONE, version=proto, ciphers="ALL:@SECLEVEL=0") ldapServer = ldap3.Server(self.target.remote, use_ssl=True, port=port, get_info=ldap3.ALL, tls=ldapTls) ldapConn = ldap3.Connection(ldapServer) try: ldapConn.bind() except ldap3.core.exceptions.LDAPSocketOpenError: return None, None except ldap3.core.exceptions.LDAPSocketReceiveError: pass return port, True def __tryLDAP(self, port: int) -> int: self.logger.debug("LDAPS failed, trying with LDAP.") port = port or 389 ldapServer = ldap3.Server(self.target.remote, use_ssl=False, port=port, get_info=ldap3.ALL) ldapConn = ldap3.Connection(ldapServer) try: ldapConn.bind() except ldap3.core.exceptions.LDAPSocketOpenError: return None except ldap3.core.exceptions.LDAPSocketReceiveError: return port return port def kerberosAuthentication(self, ldapConn: ldap3.Connection) -> None: blob = Kerberos.kerberosLogin(self.target.remote, self.credentials.username, self.credentials.password, self.credentials.domain, self.credentials.ntlmhash, self.credentials.aesKey, kdcHost=self.target.remote) request = ldap3.operation.bind.bind_operation(ldapConn.version, ldap3.SASL, self.credentials.username, None, "GSS-SPNEGO", blob.getData()) # Done with the Kerberos saga, now let's get into LDAP # try to open connection if closed if ldapConn.closed: ldapConn.open(read_server_info=False) ldapConn.sasl_in_progress = True response = ldapConn.post_send_single_response(ldapConn.send('bindRequest', request, None)) ldapConn.sasl_in_progress = False if response[0]['result'] != 0: raise Exception(response) ldapConn.bound = True return ldapConn def search(self, dn: str, filter: str, scope: str, attributes: list = ["*"]) -> list: ldapConn = self.__Authentication() ldapConn.search( search_base=dn, search_filter=filter, search_scope=scope, attributes=attributes, # Controls to get nTSecurityDescriptor from standard user # OWNER_SECURITY_INFORMATION + GROUP_SECURITY_INFORMATION + DACL_SECURITY_INFORMATION controls=[("1.2.840.113556.1.4.801", True, "%c%c%c%c%c" % (48, 3, 2, 1, 7), )] ) return ldapConn.response def __createArrayOfObject(self, response: list, obj: object) -> list: array = list() for entry in response: # Not a response object if entry["type"] != "searchResEntry": continue array.append( obj(**entry["raw_attributes"]) ) return array def getNamingContexts(self) -> list: response = self.search( "", "(objectClass=*)", ldap3.BASE, ["namingContexts"] ) self.namingContexts = response[0]["attributes"]["namingContexts"] self.defaultNamingContext = self.namingContexts[0] self.configurationNamingContext = self.namingContexts[1] self.schemaNamingContext = self.namingContexts[2] self.domainDnsZonesNamingContext = self.namingContexts[3] self.forestDnsZonesNamingContext = self.namingContexts[4] def getAllUsers(self) -> List[ADUser]: if len(self.users): return self.users response = self.search( self.defaultNamingContext, "(sAMAccountType=%d)" % (sAMAccountType.SAM_NORMAL_USER_ACCOUNT), ldap3.SUBTREE, ["DistinguishedName", "name", "userPrincipalName", "sAMAccountName", "objectSid", "ntSecurityDescriptor", "userAccountControl"] ) self.users = self.__createArrayOfObject(response, ADUser) return self.users def getAllGroups(self) -> List[ADGroup]: if len(self.groups): return self.groups response = self.search( self.defaultNamingContext, "(|(sAMAccountType=%d)(sAMAccountType=%d)(sAMAccountType=%d)(sAMAccountType=%d))" % ( sAMAccountType.SAM_GROUP_OBJECT, sAMAccountType.SAM_NON_SECURITY_GROUP_OBJECT, sAMAccountType.SAM_ALIAS_OBJECT, sAMAccountType.SAM_NON_SECURITY_ALIAS_OBJECT ), ldap3.SUBTREE, ["DistinguishedName", "name", "sAMAccountName", "objectSid", "ntSecurityDescriptor"] ) self.groups = self.__createArrayOfObject(response, ADGroup) return self.groups def getAllComputers(self) -> List[ADComputer]: if len(self.computers): return self.computers response = self.search( self.defaultNamingContext, "(sAMAccountType=%d)" % (sAMAccountType.SAM_MACHINE_ACCOUNT), ldap3.SUBTREE, ["DistinguishedName", "name", "sAMAccountName", "objectSid", "ntSecurityDescriptor", "userAccountControl"] ) self.computers = self.__createArrayOfObject(response, ADComputer) return self.computers def getAllCertificatesTemplates(self) -> List[ADCertificateTemplate]: if len(self.certificatesTemplates): return self.certificatesTemplates response = self.search( f"CN=Certificate Templates,CN=Public Key Services,CN=Services,{self.configurationNamingContext}", "(objectClass=pkiCertificateTemplate)", ldap3.SUBTREE, ["DistinguishedName", "name", "ntSecurityDescriptor"] ) self.certificatesTemplates = self.__createArrayOfObject(response, ADCertificateTemplate) return self.certificatesTemplates def getAllGPOs(self) -> List[ADGPO]: if len(self.gpos): return self.gpos response = self.search( f"CN=Policies,CN=System,{self.defaultNamingContext}", "(objectClass=groupPolicyContainer)", ldap3.SUBTREE, ["DistinguishedName", "displayName", "gPCFileSysPath", "ntSecurityDescriptor"] ) self.gpos = self.__createArrayOfObject(response, ADGPO) return self.gpos def getAllOUs(self) -> List[ADGPO]: if len(self.ous): return self.ous response = self.search( self.defaultNamingContext, "(objectClass=organizationalUnit)", ldap3.SUBTREE, ["DistinguishedName", "name", "ntSecurityDescriptor"] ) self.ous = self.__createArrayOfObject(response, ADOU) return self.ous def getAdminSDHolder(self) -> List[ADAdminSDHolder]: if len(self.adminSDHolder): return self.adminSDHolder response = self.search( f"CN=AdminSDHolder,CN=System,{self.defaultNamingContext}", "(cn=AdminSDHolder)", ldap3.BASE, ["DistinguishedName", "name", "ntSecurityDescriptor"] ) self.adminSDHolder = self.__createArrayOfObject(response, ADAdminSDHolder) return self.adminSDHolder
def getSchema(self) -> List[ADSchema]:
6
2023-10-30 21:19:24+00:00
8k
gydpku/PPTC
main.py
[ { "identifier": "ppt_executor", "path": "src/ppt_executor.py", "snippet": "SLIDE_HEIGHT = 6858000\nSLIDE_WIDTH = 9144000\nCENTER_TOP = 3429000\nCENTER_LEFT = 4572000\nSHAPE_HEIGHT = 900000\nSHAPE_WIDTH = 900000\nTABLE_HEIGHT = 370000 # per line\nCONTENT_HEIGHT = 4351338\nCONTENT_WIDTH = 7886700\nCONTENT_LEFT = 628650\nCONTENT_TOP = 1825625\nTITLE_HEIGHT = 1325563\nTITLE_WIDTH = 7886700\nTITLE_LEFT = 628650\nTITLE_TOP = 365126\nMARGIN = 600000\nCORNER_LEFT = 0 + MARGIN\nCORNER_TOP = 0 + MARGIN\nCORNER_RIGHT = SLIDE_WIDTH - MARGIN\nCORNER_BOTTOM = SLIDE_HEIGHT - MARGIN\nSHAPE_LEFT = CENTER_LEFT - SHAPE_WIDTH / 2\nSHAPE_TOP = CENTER_TOP - SHAPE_HEIGHT / 2\nPIC_LEFT = CONTENT_LEFT\nPIC_TOP = CONTENT_TOP \nPIC_PATH = \"./PPTC/\"+\"test/pics\"\ndef check_api_in_list(line, api_list):\ndef API_executor(lines, test=False,args=None):\ndef set_ppt(ppt_path):\ndef set_current_slide(idx):\ndef get_ppt():\ndef save_ppt(ppt_path):\ndef get_current_page_id():\ndef create_slide():\ndef move_to_next_slide():\ndef move_to_previous_slide():\ndef move_to_slide(idx):\ndef set_background_color(color):\ndef choose_title():\ndef choose_content():\ndef choose_textbox(idx=0):\ndef choose_picture(idx=0):\ndef choose_chart():\ndef choose_shape(shape_name):\ndef choose_table():\ndef choose_table_cell(row_id, column_id):\ndef insert_text(text):\ndef insert_bullet_point(text):\ndef insert_note(note):\ndef insert_textbox():\ndef delete_text():\ndef set_font_size(size):\ndef set_font_color(color):\ndef set_font_bold():\ndef set_font_italic():\ndef set_font_underline():\ndef set_font_style(font_name):\ndef set_line_space(line_space_level=0):\ndef text_align_left():\ndef text_align_center():\ndef text_align_right():\ndef insert_rectangle():\ndef insert_right_arrow():\ndef insert_rounded_rectangle():\ndef insert_triangle():\ndef insert_callout():\ndef insert_cloud():\ndef insert_star():\ndef insert_circle():\ndef insert_picture(picture_name):\ndef set_width(width):\ndef set_height(height):\ndef rotate_element(angle):\ndef set_fill_color(color):\ndef align_top_right_corner():\ndef align_top_left_corner():\ndef align_bottom_right_corner():\ndef align_bottom_left_corner():\ndef align_slide_left():\ndef align_slide_right():\ndef align_slide_top():\ndef align_slide_bottom():\ndef align_slide_center():\ndef set_left(left):\ndef set_top(top):\ndef insert_table(row_num, col_num):\ndef insert_table_row(row_data):\ndef insert_line_chart(data,series=None):\ndef insert_bar_chart(data,series=None):\ndef insert_pie_chart(data,series=None):\ndef set_chart_title(title):" }, { "identifier": "ppt_reader", "path": "src/ppt_reader.py", "snippet": "SCALE = 1000\ndef get_fill_color(shape):\n def __init__(self, shape):\n def text_info(self):\n def space_info(self):\n def size_info(self):\n def style_info(self):\n def discription(self):\n def __repr__(self):\n def __init__(self, shape, id=None):\n def style_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\n def discription(self):\n def __init__(self, shape, id=None):\n def text_info(self):\n def style_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\ndef hasshape(shape_str, shape_list):\ndef get_content(need_text,need_style,need_position,need_title,need_content,need_picture,need_table,need_chart,need_textbox,need_shape):\ndef get_content_by_instructions(ppt_path, instruction, args, ppt):\ndef eval_get_contents(need_text=True, need_style=True, need_position=True, need_shape_list=None, ppt=None):\nclass BasicShape:\nclass Picture(BasicShape):\nclass Table(BasicShape):\nclass Chart(BasicShape):\nclass Textbox(BasicShape):\nclass Placeholder(BasicShape):\nclass AutoShape(BasicShape):" }, { "identifier": "openai_api", "path": "src/openai_api.py", "snippet": "def completions_with_backoff(**kwargs):\ndef chat_with_backoff(**kwargs):\ndef embeddings_with_backoff(**kwargs):\ndef query_azure_openai(query, model = \"vicuna-13b-v1.5-16k\",id=None):\n def truncate_text_with_token_count (text, max_tokens):\ndef rewrite(prompt):" }, { "identifier": "prompt_factor", "path": "src/prompt_factor.py", "snippet": "def get_instruction_to_API_code_prompt(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):\ndef get_instruction_to_API_code_prompt2(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):" }, { "identifier": "dataset", "path": "src/dataset.py", "snippet": "def load_data(path, dataset, args):\ndef load_data_json(path, dataset):" }, { "identifier": "api_selection", "path": "src/api_selection.py", "snippet": "K = None\n K = args.api_topk\ndef get_topk(scores, k=10):\ndef get_embedding(text):\ndef get_api_embedding(args):\ndef select_api(query, k=10):\ndef get_selected_apis(instruction, args):\ndef get_all_apis(args):\ndef prepare_embedding(args):" }, { "identifier": "utils", "path": "src/utils.py", "snippet": "def write_list(lst, filename):\ndef read_list(filename):\ndef write_lines(lst, path):\ndef read_lines(path):\ndef makedir(path):\ndef merge_list(lst):\ndef get_picture_name(labels):\ndef get_picture_name_list(args):\ndef parse_api(codes):\ndef prepare_exp_name(args):\ndef get_tokens(text):\ndef calc_api_cost(path):\ndef check_token(model, prompt):\ndef get_token(text, trunc_num, model):\ndef checkpoint(mode,args,idx,step):\ndef sorted_list(path):\ndef parse_train_json(path):\ndef parse_test_json(path):" }, { "identifier": "modeling", "path": "src/modeling.py", "snippet": "class PPT_assistant(object):\n def __init__(self, args=None):\n def planner(self, instruction):\n def api_selector(self, instruction):\n def content_selector(self, ppt_path, instruction, args, ppt):\n def api_executor(self, apis, test=False):\n def load_chat_history(self, instructions, labels):\n def load_ppt(self, path):\n def chat(self, user_instruction, ppt_path=None, verbose=False):" }, { "identifier": "evaluate", "path": "src/evaluate.py", "snippet": "def calc_token_cost(path):\ndef calc_acc(label_path, pred_path, instruction, additional_restrictions=[]):\ndef check_eval(args):\ndef get_error_case(args):\ndef eval(args):" }, { "identifier": "content_selection", "path": "src/content_selection.py", "snippet": "def select_information_type(query,args):\ndef select_shape_type(query,args):\ndef parse_answer(answer):" } ]
from src import ppt_executor, ppt_reader, openai_api, prompt_factor, dataset, api_selection, utils, modeling, evaluate, content_selection from tqdm import tqdm import argparse import os import jsonlines
4,373
utils.write_lines([prompt],args.user_path+f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt') with jsonlines.open(args.user_path+f"PPT_test_output/{set_name}/{args.exp_name}_session_{sess_id}.json", mode='a') as writer: data={'Turn':turn_id,'User instruction':instruction,'Feasible API sequence':label_api,'Reply':reply,'Pred API sequence':apis,'Pred File':f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx','Label File':label_file,'Prompt File':f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt'} writer.write(data) def test_planning(ppt_assistant): instructions, labels = dataset.load_data(args.data_path, args.dataset) f = open(f'test_system/planning_{args.dataset}.txt','a+') for idx, dialogue in tqdm(enumerate(instructions)): for step, instruction in enumerate(dialogue): instruction = instruction.split("##")[0] try: planned = ppt_assistant.planner(instruction) f.write(f'{idx}/{step}\n') f.write(instruction+'\n') f.write(str(planned)+'\n\n') f.flush() except: pass def test_api_selection(ppt_assistant): instructions, labels = dataset.load_data(args.data_path, args.dataset) f = open(f'test_system/api_selection_{args.api_topk}_{args.dataset}.txt','a+') cnt = 0 for idx, dialogue in tqdm(enumerate(instructions)): for step, instruction in enumerate(dialogue): label_apis = labels[idx][step] instruction = instruction.split("##")[0] # instructions = ppt_assistant.planner(instruction) # selected_apis = [] # for ins in instructions: # selected_apis.extend(ppt_assistant.api_selector(ins)) selected_apis = ppt_assistant.api_selector(instruction) selected_apis = [x.name for x in selected_apis] for xx in label_apis: if ('align_slide' in xx.split('(')[0]) or (xx.split('(')[0] in ['set_left','set_right','set_top','set_bottom']) or ('corner' in xx.split('(')[0]): continue if not xx.split('(')[0] in selected_apis: f.write(f'{idx}/{step}\n') f.write(instruction+'\n') f.write(xx.split('(')[0]+'\n') f.write(str(selected_apis)+'\n\n') f.flush() cnt += 1 print(cnt) def test_content_selection(ppt_assistant): instructions, labels = dataset.load_data(args.data_path, args.dataset) f = open(f'test_system/content_selection_{args.dataset}.txt','a+') for idx, dialogue in tqdm(enumerate(instructions)): for step, instruction in enumerate(dialogue): instruction = instruction.split("##")[0] prompt = prompt_factor.PPT_content_selection_prompt.format(instruction) reply = openai_api.query_azure_openai(prompt, model='turbo') f.write(f'{idx}/{step}\n') f.write(instruction+'\n') f.write(reply+'\n\n') if __name__ == "__main__": parser = argparse.ArgumentParser() # PPT assistant parser.add_argument("--data_path", default="test", type=str, help="The data path to load the instructions") parser.add_argument("--dataset", default="short", type=str, help="short/long") parser.add_argument("--model_id", default="None", type=str, help="short/long") parser.add_argument("--user_path", default='./PPTC/', type=str, help="the user storage file path ") parser.add_argument("--save_path", default="test_pptx_data", type=str, help="the path to save the intermediate ppts.") # mode parser.add_argument("--prepare", default=False, action='store_true', help='whether to prepare the data for the model') parser.add_argument("--eval", default=False, action='store_true', help='whether to evaluate the pptx file generated by the model') parser.add_argument("--test", default=False, action='store_true', help='whether to test on the instruction data loaded from data_path') parser.add_argument("--tf", default=False, action='store_true', help='whether to use teacher forcing mode') parser.add_argument("--sess", default=False, action='store_true', help='whether to test from session level') parser.add_argument("--resume", default=False, action='store_true', help='whether to continue generation from the last unfinished instruction') # modeling parser.add_argument("--model", default="turbo",type=str, help="turbo/gpt4/text3") parser.add_argument("--planning", default=False, action='store_true', help="whether to apply the planning module") parser.add_argument("--api_selection", default=False, action='store_true', help="whether to apply the api selection module") parser.add_argument("--api_topk", default=10, type=int, help="How many apis to retrieve from the api pool") parser.add_argument("--content_selection", default=False, action='store_true', help="whether to apply the shape selection module") # api update/lack parser.add_argument("--api_lack", default=False, action='store_true', help='whether to test in the api lack setting') parser.add_argument("--api_update", default=False, action='store_true', help='whether to test in the api update setting') parser.add_argument("--second", default=False, action='store_true', help='second test') parser.add_argument("--robust", default=False, action='store_true', help='whether to test in robust data') parser.add_argument("--robust_num", default=0, type=int, help="which robusted data") parser.add_argument("--noisy", default=False, action='store_true', help='whether to test in noisy data') args = parser.parse_args() args.exp_name = utils.prepare_exp_name(args) args.save_path = os.path.join(args.save_path,args.dataset) api_selection.prepare_embedding(args)
def prepare_data(ppt_assistant, args): instructions, labels = dataset.load_data(args.user_path+args.data_path, args.dataset, args) print(f"#Dialogues: {len(instructions)}") for idx, dialogue in enumerate(instructions): if args.dataset == 'long': ppt_assistant.load_ppt(os.path.join(args.user_path+'long_slides',f'{idx}.pptx')) else: ppt_assistant.load_ppt(None) set_name = 'Edit_PPT_template' if args.dataset == 'long' else 'Create_new_slides' if args.api_lack: utils.makedir(args.user_path+f"PPT_Base_File/{set_name}_API_lack/") utils.makedir(args.user_path+f"PPT_Label_File/{set_name}_API_lack/") else: utils.makedir(args.user_path+f"PPT_Base_File/{set_name}/") utils.makedir(args.user_path+f"PPT_Label_File/{set_name}/") for step, instruction in enumerate(dialogue): instruction = instruction.split("##")[0] label_apis = utils.merge_list(labels[idx][:step]) if args.dataset == 'long': ppt_assistant.load_ppt(os.path.join(args.user_path+'long_slides',f'{idx}.pptx')) else: ppt_assistant.load_ppt(None) ppt_assistant.api_executor(label_apis,test=False) if args.api_lack: ppt_executor.save_ppt(args.user_path+f"PPT_Base_File/{set_name}_API_lack/{idx}_{step}.pptx") else: ppt_executor.save_ppt(args.user_path+f"PPT_Base_File/{set_name}/{idx}_{step}.pptx") ppt_assistant.api_executor(labels[idx][step],test=False) if args.api_lack: ppt_executor.save_ppt(args.user_path+f"PPT_Label_File/{set_name}_API_lack/{idx}_{step}.pptx") else: ppt_executor.save_ppt(args.user_path+f"PPT_Label_File/{set_name}/{idx}_{step}.pptx") print(f"{idx}/{step} done!") def test(ppt_assistant, args): set_name = 'Create_new_slides' if args.dataset == 'short' else 'Edit_PPT_template' utils.makedir(args.user_path+f'PPT_Pred_File/{set_name}') utils.makedir(args.user_path+f'PPT_Prompt_File/{set_name}') for sess_id, session_path in enumerate(utils.sorted_list(args.user_path+f'PPT_test_input/{set_name}')): session = utils.parse_train_json(args.user_path+f'PPT_test_input/{set_name}/{session_path}') chat_history = [] for turn_id, turn in tqdm(enumerate(session)): print(f"{sess_id}/{turn_id}") if args.resume: if args.tf and os.path.exists(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx'): print('Exists!') continue if args.sess and os.path.exists(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{len(session)-1}.pptx'): print('Exists!') continue turn_id, instruction, label_api, base_ppt_path, label_ppt_path, api_lack_base_ppt_path, api_lack_label_ppt_path = turn if turn_id == 0 and args.sess: if args.api_lack: ppt_assistant.load_ppt(args.user_path+api_lack_base_ppt_path) label_file = api_lack_label_ppt_path else: ppt_assistant.load_ppt(args.user_path+base_ppt_path) label_file = label_ppt_path splitted_instruction = instruction.split("##")[0] if args.tf: if args.api_lack: ppt_assistant.load_ppt(args.user_path+api_lack_base_ppt_path) label_file = api_lack_label_ppt_path else: ppt_assistant.load_ppt(args.user_path+base_ppt_path) label_file = label_ppt_path ppt_assistant.load_chat_history([x[0] for x in chat_history],[x[1].strip(';').split(';') for x in chat_history]) prompt, reply = ppt_assistant.chat(splitted_instruction, ppt_path=args.user_path+base_ppt_path, verbose=False) apis = utils.parse_api(reply) ppt_assistant.api_executor(apis,test=True) ppt_executor.save_ppt(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx') utils.write_lines([prompt],args.user_path+f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt') #import pdb #pdb.set_trace() with jsonlines.open(args.user_path+f"PPT_test_output/{set_name}/{args.exp_name}_session_{sess_id}.json", mode='a') as writer: data={'Turn':turn_id,'User instruction':instruction,'Feasible API sequence':label_api,'Reply':reply,'Pred API sequence':apis,'Pred File':f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx','Label File':label_file,'Prompt File':f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt'} writer.write(data) chat_history.append([splitted_instruction, label_api]) elif args.sess: prompt, reply = ppt_assistant.chat(instruction, ppt_path=None, verbose=False) apis = utils.parse_api(reply) ppt_assistant.api_executor(apis,test=True) ppt_executor.save_ppt(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx') utils.write_lines([prompt],args.user_path+f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt') with jsonlines.open(args.user_path+f"PPT_test_output/{set_name}/{args.exp_name}_session_{sess_id}.json", mode='a') as writer: data={'Turn':turn_id,'User instruction':instruction,'Feasible API sequence':label_api,'Reply':reply,'Pred API sequence':apis,'Pred File':f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx','Label File':label_file,'Prompt File':f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt'} writer.write(data) def test_planning(ppt_assistant): instructions, labels = dataset.load_data(args.data_path, args.dataset) f = open(f'test_system/planning_{args.dataset}.txt','a+') for idx, dialogue in tqdm(enumerate(instructions)): for step, instruction in enumerate(dialogue): instruction = instruction.split("##")[0] try: planned = ppt_assistant.planner(instruction) f.write(f'{idx}/{step}\n') f.write(instruction+'\n') f.write(str(planned)+'\n\n') f.flush() except: pass def test_api_selection(ppt_assistant): instructions, labels = dataset.load_data(args.data_path, args.dataset) f = open(f'test_system/api_selection_{args.api_topk}_{args.dataset}.txt','a+') cnt = 0 for idx, dialogue in tqdm(enumerate(instructions)): for step, instruction in enumerate(dialogue): label_apis = labels[idx][step] instruction = instruction.split("##")[0] # instructions = ppt_assistant.planner(instruction) # selected_apis = [] # for ins in instructions: # selected_apis.extend(ppt_assistant.api_selector(ins)) selected_apis = ppt_assistant.api_selector(instruction) selected_apis = [x.name for x in selected_apis] for xx in label_apis: if ('align_slide' in xx.split('(')[0]) or (xx.split('(')[0] in ['set_left','set_right','set_top','set_bottom']) or ('corner' in xx.split('(')[0]): continue if not xx.split('(')[0] in selected_apis: f.write(f'{idx}/{step}\n') f.write(instruction+'\n') f.write(xx.split('(')[0]+'\n') f.write(str(selected_apis)+'\n\n') f.flush() cnt += 1 print(cnt) def test_content_selection(ppt_assistant): instructions, labels = dataset.load_data(args.data_path, args.dataset) f = open(f'test_system/content_selection_{args.dataset}.txt','a+') for idx, dialogue in tqdm(enumerate(instructions)): for step, instruction in enumerate(dialogue): instruction = instruction.split("##")[0] prompt = prompt_factor.PPT_content_selection_prompt.format(instruction) reply = openai_api.query_azure_openai(prompt, model='turbo') f.write(f'{idx}/{step}\n') f.write(instruction+'\n') f.write(reply+'\n\n') if __name__ == "__main__": parser = argparse.ArgumentParser() # PPT assistant parser.add_argument("--data_path", default="test", type=str, help="The data path to load the instructions") parser.add_argument("--dataset", default="short", type=str, help="short/long") parser.add_argument("--model_id", default="None", type=str, help="short/long") parser.add_argument("--user_path", default='./PPTC/', type=str, help="the user storage file path ") parser.add_argument("--save_path", default="test_pptx_data", type=str, help="the path to save the intermediate ppts.") # mode parser.add_argument("--prepare", default=False, action='store_true', help='whether to prepare the data for the model') parser.add_argument("--eval", default=False, action='store_true', help='whether to evaluate the pptx file generated by the model') parser.add_argument("--test", default=False, action='store_true', help='whether to test on the instruction data loaded from data_path') parser.add_argument("--tf", default=False, action='store_true', help='whether to use teacher forcing mode') parser.add_argument("--sess", default=False, action='store_true', help='whether to test from session level') parser.add_argument("--resume", default=False, action='store_true', help='whether to continue generation from the last unfinished instruction') # modeling parser.add_argument("--model", default="turbo",type=str, help="turbo/gpt4/text3") parser.add_argument("--planning", default=False, action='store_true', help="whether to apply the planning module") parser.add_argument("--api_selection", default=False, action='store_true', help="whether to apply the api selection module") parser.add_argument("--api_topk", default=10, type=int, help="How many apis to retrieve from the api pool") parser.add_argument("--content_selection", default=False, action='store_true', help="whether to apply the shape selection module") # api update/lack parser.add_argument("--api_lack", default=False, action='store_true', help='whether to test in the api lack setting') parser.add_argument("--api_update", default=False, action='store_true', help='whether to test in the api update setting') parser.add_argument("--second", default=False, action='store_true', help='second test') parser.add_argument("--robust", default=False, action='store_true', help='whether to test in robust data') parser.add_argument("--robust_num", default=0, type=int, help="which robusted data") parser.add_argument("--noisy", default=False, action='store_true', help='whether to test in noisy data') args = parser.parse_args() args.exp_name = utils.prepare_exp_name(args) args.save_path = os.path.join(args.save_path,args.dataset) api_selection.prepare_embedding(args)
ppt_assistant = modeling.PPT_assistant(args)
7
2023-10-25 13:14:46+00:00
8k
nv-tlabs/pacer
pacer/env/tasks/humanoid_pedestrain_terrain.py
[ { "identifier": "flags", "path": "pacer/utils/flags.py", "snippet": "class Flags(object):\n def __init__(self, items):" }, { "identifier": "quat_inverse", "path": "poselib/poselib/core/rotation3d.py", "snippet": "@torch.jit.script\ndef quat_inverse(x):\n \"\"\"\n The inverse of the rotation\n \"\"\"\n return quat_conjugate(x)" }, { "identifier": "quat_mul", "path": "poselib/poselib/core/rotation3d.py", "snippet": "@torch.jit.script\ndef quat_mul(a, b):\n \"\"\"\n quaternion multiplication\n \"\"\"\n x1, y1, z1, w1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3]\n x2, y2, z2, w2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3]\n\n w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2\n x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2\n y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2\n z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2\n\n return torch.stack([x, y, z, w], dim=-1)" }, { "identifier": "agt_color", "path": "pacer/utils/draw_utils.py", "snippet": "def agt_color(aidx):\n return matplotlib.colors.to_rgb(plt.rcParams['axes.prop_cycle'].by_key()['color'][aidx % 10])" }, { "identifier": "compute_humanoid_observations_smpl_max", "path": "pacer/env/tasks/humanoid.py", "snippet": "@torch.jit.script\ndef compute_humanoid_observations_smpl_max(body_pos, body_rot, body_vel,\n body_ang_vel, smpl_params, limb_weight_params,\n local_root_obs, root_height_obs,\n upright, has_smpl_params, has_limb_weight_params):\n # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, bool, bool, bool, bool, bool) -> Tensor\n root_pos = body_pos[:, 0, :]\n root_rot = body_rot[:, 0, :]\n\n root_h = root_pos[:, 2:3]\n if not upright:\n root_rot = remove_base_rot(root_rot)\n heading_rot_inv = torch_utils.calc_heading_quat_inv(root_rot)\n\n if (not root_height_obs):\n root_h_obs = torch.zeros_like(root_h)\n else:\n root_h_obs = root_h\n\n heading_rot_inv_expand = heading_rot_inv.unsqueeze(-2)\n heading_rot_inv_expand = heading_rot_inv_expand.repeat((1, body_pos.shape[1], 1))\n flat_heading_rot_inv = heading_rot_inv_expand.reshape(heading_rot_inv_expand.shape[0] * heading_rot_inv_expand.shape[1],heading_rot_inv_expand.shape[2])\n\n root_pos_expand = root_pos.unsqueeze(-2)\n local_body_pos = body_pos - root_pos_expand\n flat_local_body_pos = local_body_pos.reshape(local_body_pos.shape[0] * local_body_pos.shape[1],local_body_pos.shape[2])\n flat_local_body_pos = torch_utils.my_quat_rotate(flat_heading_rot_inv, flat_local_body_pos)\n local_body_pos = flat_local_body_pos.reshape(local_body_pos.shape[0],local_body_pos.shape[1] * local_body_pos.shape[2])\n local_body_pos = local_body_pos[..., 3:] # remove root pos\n\n flat_body_rot = body_rot.reshape(body_rot.shape[0] * body_rot.shape[1], body_rot.shape[2])\n flat_local_body_rot = quat_mul(flat_heading_rot_inv, flat_body_rot)\n flat_local_body_rot_obs = torch_utils.quat_to_tan_norm(flat_local_body_rot)\n local_body_rot_obs = flat_local_body_rot_obs.reshape(\n body_rot.shape[0],\n body_rot.shape[1] * flat_local_body_rot_obs.shape[1])\n\n if not (local_root_obs): ##### ZL Here: it should be \"not local_root_obs\". If local, it shouldn't be overriden\n root_rot_obs = torch_utils.quat_to_tan_norm(root_rot)\n local_body_rot_obs[..., 0:6] = root_rot_obs\n\n flat_body_vel = body_vel.reshape(body_vel.shape[0] * body_vel.shape[1], body_vel.shape[2])\n flat_local_body_vel = torch_utils.my_quat_rotate(flat_heading_rot_inv, flat_body_vel)\n local_body_vel = flat_local_body_vel.reshape(\n body_vel.shape[0], body_vel.shape[1] * body_vel.shape[2])\n\n flat_body_ang_vel = body_ang_vel.reshape(\n body_ang_vel.shape[0] * body_ang_vel.shape[1], body_ang_vel.shape[2])\n flat_local_body_ang_vel = torch_utils.my_quat_rotate(\n flat_heading_rot_inv, flat_body_ang_vel)\n local_body_ang_vel = flat_local_body_ang_vel.reshape(\n body_ang_vel.shape[0], body_ang_vel.shape[1] * body_ang_vel.shape[2])\n\n obs_list = []\n if root_height_obs: obs_list.append(root_h_obs)\n obs_list += [\n local_body_pos, local_body_rot_obs, local_body_vel, local_body_ang_vel\n ]\n if has_smpl_params: obs_list.append(smpl_params[:, :-6])\n if has_limb_weight_params: obs_list.append(limb_weight_params)\n\n obs = torch.cat(obs_list, dim=-1)\n return obs" }, { "identifier": "compute_humanoid_observations_smpl", "path": "pacer/env/tasks/humanoid.py", "snippet": "@torch.jit.script\ndef compute_humanoid_observations_smpl(root_pos, root_rot, root_vel,\n root_ang_vel, dof_pos, dof_vel,\n key_body_pos, dof_obs_size, dof_offsets,\n smpl_params, local_root_obs,\n root_height_obs, upright,\n has_smpl_params):\n # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, List[int], Tensor, bool, bool,bool, bool) -> Tensor\n root_h = root_pos[:, 2:3]\n if not upright:\n root_rot = remove_base_rot(root_rot)\n heading_rot_inv = torch_utils.calc_heading_quat_inv(root_rot)\n\n if (local_root_obs):\n root_rot_obs = quat_mul(heading_rot_inv, root_rot)\n else:\n root_rot_obs = root_rot\n \n \n root_rot_obs = torch_utils.quat_to_tan_norm(root_rot_obs)\n\n if (not root_height_obs):\n root_h_obs = torch.zeros_like(root_h)\n else:\n root_h_obs = root_h\n\n local_root_vel = torch_utils.my_quat_rotate(heading_rot_inv, root_vel)\n local_root_ang_vel = torch_utils.my_quat_rotate(heading_rot_inv, root_ang_vel)\n\n root_pos_expand = root_pos.unsqueeze(-2)\n local_key_body_pos = key_body_pos - root_pos_expand\n\n heading_rot_expand = heading_rot_inv.unsqueeze(-2)\n heading_rot_expand = heading_rot_expand.repeat( (1, local_key_body_pos.shape[1], 1))\n flat_end_pos = local_key_body_pos.view(\n local_key_body_pos.shape[0] * local_key_body_pos.shape[1],\n local_key_body_pos.shape[2])\n flat_heading_rot = heading_rot_expand.view(\n heading_rot_expand.shape[0] * heading_rot_expand.shape[1],\n heading_rot_expand.shape[2])\n local_end_pos = torch_utils.my_quat_rotate(flat_heading_rot, flat_end_pos)\n flat_local_key_pos = local_end_pos.view(\n local_key_body_pos.shape[0],\n local_key_body_pos.shape[1] * local_key_body_pos.shape[2])\n\n dof_obs = dof_to_obs(dof_pos, dof_obs_size, dof_offsets)\n\n obs_list = []\n if root_height_obs: obs_list.append(root_h_obs)\n obs_list += [\n root_rot_obs,\n local_root_vel,\n local_root_ang_vel,\n dof_obs,\n dof_vel,\n flat_local_key_pos,\n ]\n if has_smpl_params: obs_list.append(smpl_params)\n obs = torch.cat(obs_list, dim=-1)\n\n return obs" }, { "identifier": "compute_humanoid_observations_max", "path": "pacer/env/tasks/humanoid.py", "snippet": "@torch.jit.script\ndef compute_humanoid_observations_max(body_pos, body_rot, body_vel,\n body_ang_vel, local_root_obs,\n root_height_obs):\n # type: (Tensor, Tensor, Tensor, Tensor, bool, bool) -> Tensor\n root_pos = body_pos[:, 0, :]\n root_rot = body_rot[:, 0, :]\n\n root_h = root_pos[:, 2:3]\n heading_rot = torch_utils.calc_heading_quat_inv(root_rot)\n\n if (not root_height_obs):\n root_h_obs = torch.zeros_like(root_h)\n else:\n root_h_obs = root_h\n\n heading_rot_expand = heading_rot.unsqueeze(-2)\n heading_rot_expand = heading_rot_expand.repeat((1, body_pos.shape[1], 1))\n flat_heading_rot = heading_rot_expand.reshape(\n heading_rot_expand.shape[0] * heading_rot_expand.shape[1],\n heading_rot_expand.shape[2])\n\n root_pos_expand = root_pos.unsqueeze(-2)\n local_body_pos = body_pos - root_pos_expand\n flat_local_body_pos = local_body_pos.reshape(\n local_body_pos.shape[0] * local_body_pos.shape[1],\n local_body_pos.shape[2])\n flat_local_body_pos = torch_utils.my_quat_rotate(flat_heading_rot,\n flat_local_body_pos)\n local_body_pos = flat_local_body_pos.reshape(\n local_body_pos.shape[0],\n local_body_pos.shape[1] * local_body_pos.shape[2])\n local_body_pos = local_body_pos[..., 3:] # remove root pos\n\n flat_body_rot = body_rot.reshape(body_rot.shape[0] * body_rot.shape[1],\n body_rot.shape[2])\n flat_local_body_rot = quat_mul(flat_heading_rot, flat_body_rot)\n flat_local_body_rot_obs = torch_utils.quat_to_tan_norm(flat_local_body_rot)\n local_body_rot_obs = flat_local_body_rot_obs.reshape(\n body_rot.shape[0],\n body_rot.shape[1] * flat_local_body_rot_obs.shape[1])\n\n if (local_root_obs): # This is wrong. \n print(\"this is wrong!!!\")\n print(\"this is wrong!!!\")\n print(\"this is wrong!!!\")\n root_rot_obs = torch_utils.quat_to_tan_norm(root_rot)\n local_body_rot_obs[..., 0:6] = root_rot_obs\n\n flat_body_vel = body_vel.reshape(body_vel.shape[0] * body_vel.shape[1],\n body_vel.shape[2])\n flat_local_body_vel = torch_utils.my_quat_rotate(flat_heading_rot,\n flat_body_vel)\n local_body_vel = flat_local_body_vel.reshape(\n body_vel.shape[0], body_vel.shape[1] * body_vel.shape[2])\n\n flat_body_ang_vel = body_ang_vel.reshape(\n body_ang_vel.shape[0] * body_ang_vel.shape[1], body_ang_vel.shape[2])\n flat_local_body_ang_vel = torch_utils.my_quat_rotate(\n flat_heading_rot, flat_body_ang_vel)\n local_body_ang_vel = flat_local_body_ang_vel.reshape(\n body_ang_vel.shape[0], body_ang_vel.shape[1] * body_ang_vel.shape[2])\n\n obs = torch.cat((root_h_obs, local_body_pos, local_body_rot_obs,\n local_body_vel, local_body_ang_vel),\n dim=-1)\n return obs" }, { "identifier": "compute_humanoid_observations", "path": "pacer/env/tasks/humanoid.py", "snippet": "@torch.jit.script\ndef compute_humanoid_observations(root_pos, root_rot, root_vel, root_ang_vel,\n dof_pos, dof_vel, key_body_pos,\n local_root_obs, root_height_obs,\n dof_obs_size, dof_offsets):\n # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, bool, bool, int, List[int]) -> Tensor\n root_h = root_pos[:, 2:3]\n heading_rot = torch_utils.calc_heading_quat_inv(root_rot)\n\n if (local_root_obs):\n root_rot_obs = quat_mul(heading_rot, root_rot)\n else:\n root_rot_obs = root_rot\n root_rot_obs = torch_utils.quat_to_tan_norm(root_rot_obs)\n\n if (not root_height_obs):\n root_h_obs = torch.zeros_like(root_h)\n else:\n root_h_obs = root_h\n\n local_root_vel = torch_utils.my_quat_rotate(heading_rot, root_vel)\n local_root_ang_vel = torch_utils.my_quat_rotate(heading_rot, root_ang_vel)\n\n root_pos_expand = root_pos.unsqueeze(-2)\n local_key_body_pos = key_body_pos - root_pos_expand\n\n heading_rot_expand = heading_rot.unsqueeze(-2)\n heading_rot_expand = heading_rot_expand.repeat(\n (1, local_key_body_pos.shape[1], 1))\n flat_end_pos = local_key_body_pos.view(\n local_key_body_pos.shape[0] * local_key_body_pos.shape[1],\n local_key_body_pos.shape[2])\n flat_heading_rot = heading_rot_expand.view(\n heading_rot_expand.shape[0] * heading_rot_expand.shape[1],\n heading_rot_expand.shape[2])\n local_end_pos = torch_utils.my_quat_rotate(flat_heading_rot, flat_end_pos)\n flat_local_key_pos = local_end_pos.view(\n local_key_body_pos.shape[0],\n local_key_body_pos.shape[1] * local_key_body_pos.shape[2])\n\n dof_obs = dof_to_obs(dof_pos, dof_obs_size, dof_offsets)\n\n obs = torch.cat((root_h_obs, root_rot_obs, local_root_vel,\n local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos),\n dim=-1)\n return obs" }, { "identifier": "ENABLE_MAX_COORD_OBS", "path": "pacer/env/tasks/humanoid.py", "snippet": "ENABLE_MAX_COORD_OBS = True" } ]
from shutil import ExecError from isaacgym import gymapi from isaacgym.torch_utils import * from env.tasks.humanoid import dof_to_obs from env.tasks.humanoid_amp import HumanoidAMP, remove_base_rot from pacer.utils.flags import flags from utils import torch_utils from isaacgym import gymtorch from poselib.poselib.core.rotation3d import quat_inverse, quat_mul from tqdm import tqdm from scipy.spatial.transform import Rotation as sRot from typing import OrderedDict from pacer.utils.draw_utils import agt_color from pacer.env.tasks.humanoid import compute_humanoid_observations_smpl_max, compute_humanoid_observations_smpl,\ compute_humanoid_observations_max, compute_humanoid_observations,\ ENABLE_MAX_COORD_OBS from isaacgym.terrain_utils import * from pacer.utils.draw_utils import * import torch import numpy as np import env.tasks.humanoid_traj as humanoid_traj import joblib import matplotlib.pyplot as plt
4,958
self.sensor_extent = cfg["env"].get("sensor_extent", 2) self.sensor_res = cfg["env"].get("sensor_res", 32) self.power_reward = cfg["env"].get("power_reward", False) self.power_coefficient = cfg["env"].get("power_coefficient", 0.0005) self.fuzzy_target = cfg["env"].get("fuzzy_target", False) self.square_height_points = self.init_square_height_points() self.terrain_obs_type = self.cfg['env'].get("terrain_obs_type", "square") self.terrain_obs = self.cfg['env'].get("terrain_obs", False) self.terrain_obs_root = self.cfg['env'].get("terrain_obs_root", "pelvis") if self.terrain_obs_type == "fov": self.height_points = self.init_fov_height_points() elif self.terrain_obs_type == "square_fov": self.height_points = self.init_square_fov_height_points() elif self.terrain_obs_type == "square": self.height_points = self.square_height_points self.root_points = self.init_root_points() self.center_height_points = self.init_center_height_points() self.height_meas_scale = 5 self.show_sensors = self.cfg['args'].show_sensors if (not self.headless) and self.show_sensors: self._sensor_handles = [[] for _ in range(self.num_envs)] super().__init__(cfg=cfg, sim_params=sim_params, physics_engine=physics_engine, device_type=device_type, device_id=device_id, headless=headless) self.reward_raw = torch.zeros((self.num_envs, 2)).to(self.device) if (not self.headless) and self.show_sensors: self._build_sensor_state_tensors() return def _build_env(self, env_id, env_ptr, humanoid_asset): super()._build_env(env_id, env_ptr, humanoid_asset) if (not self.headless) and self.show_sensors: self._load_sensor_asset() self._build_sensor(env_id, env_ptr) return def _build_sensor(self, env_id, env_ptr): default_pose = gymapi.Transform() for i in range(self.num_height_points): marker_handle = self.gym.create_actor(env_ptr, self._sensor_asset, default_pose, "marker", self.num_envs + 1, 0, 0) self.gym.set_rigid_body_color(env_ptr, marker_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*agt_color(env_id))) self._sensor_handles[env_id].append(marker_handle) return def _build_sensor_state_tensors(self): num_actors = self._root_states.shape[0] // self.num_envs self._sensor_states = self._root_states.view(self.num_envs, num_actors, self._root_states.shape[-1])[..., 11:(11 + self.num_height_points), :] self._sensor_pos = self._sensor_states[..., :3] self._sensor_actor_ids = self._humanoid_actor_ids.unsqueeze(-1) + to_torch(self._sensor_handles, dtype=torch.int32, device=self.device) self._sensor_actor_ids = self._sensor_actor_ids.flatten() return def _load_sensor_asset(self): asset_root = "pacer/data/assets/mjcf/" asset_file = "sensor_marker.urdf" asset_options = gymapi.AssetOptions() asset_options.angular_damping = 0.01 asset_options.linear_damping = 0.01 asset_options.max_angular_velocity = 100.0 asset_options.density = 1.0 asset_options.fix_base_link = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE self._sensor_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) return def _draw_task(self): # cols = np.array([[1.0, 0.0, 0.0]], dtype=np.float32) norm_states = self.get_head_pose() base_quat = norm_states[:, 3:7] if not self._has_upright_start: base_quat = remove_base_rot(base_quat) heading_rot = torch_utils.calc_heading_quat(base_quat) points = quat_apply( heading_rot.repeat(1, self.num_height_points).reshape(-1, 4), self.height_points) + (norm_states[:, :3]).unsqueeze(1) if (not self.headless) and self.show_sensors: self._sensor_pos[:] = points # self._sensor_pos[..., 2] += 0.3 # self._sensor_pos[..., 2] -= 5 traj_samples = self._fetch_traj_samples() self._marker_pos[:] = traj_samples self._marker_pos[..., 2] = self._humanoid_root_states[..., 2:3] # jp hack # ZL hack # self._marker_pos[..., 2] = 0.89 # self._marker_pos[..., 2] = 0 if (not self.headless) and self.show_sensors: comb_idx = torch.cat([self._sensor_actor_ids, self._marker_actor_ids]) else: comb_idx = torch.cat([self._marker_actor_ids])
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. HACK_MOTION_SYNC = False class HumanoidPedestrianTerrain(humanoid_traj.HumanoidTraj): def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless): ## ZL Hack to get the height map to load. self.real_mesh = cfg['args'].real_mesh self.device = "cpu" self.device_type = device_type if device_type == "cuda" or device_type == "GPU": self.device = "cuda" + ":" + str(device_id) self.load_smpl_configs(cfg) self.cfg = cfg self.num_envs = cfg["env"]["numEnvs"] self.device_type = cfg.get("device_type", "cuda") self.device_id = cfg.get("device_id", 0) self.headless = cfg["headless"] self.sensor_extent = cfg["env"].get("sensor_extent", 2) self.sensor_res = cfg["env"].get("sensor_res", 32) self.power_reward = cfg["env"].get("power_reward", False) self.power_coefficient = cfg["env"].get("power_coefficient", 0.0005) self.fuzzy_target = cfg["env"].get("fuzzy_target", False) self.square_height_points = self.init_square_height_points() self.terrain_obs_type = self.cfg['env'].get("terrain_obs_type", "square") self.terrain_obs = self.cfg['env'].get("terrain_obs", False) self.terrain_obs_root = self.cfg['env'].get("terrain_obs_root", "pelvis") if self.terrain_obs_type == "fov": self.height_points = self.init_fov_height_points() elif self.terrain_obs_type == "square_fov": self.height_points = self.init_square_fov_height_points() elif self.terrain_obs_type == "square": self.height_points = self.square_height_points self.root_points = self.init_root_points() self.center_height_points = self.init_center_height_points() self.height_meas_scale = 5 self.show_sensors = self.cfg['args'].show_sensors if (not self.headless) and self.show_sensors: self._sensor_handles = [[] for _ in range(self.num_envs)] super().__init__(cfg=cfg, sim_params=sim_params, physics_engine=physics_engine, device_type=device_type, device_id=device_id, headless=headless) self.reward_raw = torch.zeros((self.num_envs, 2)).to(self.device) if (not self.headless) and self.show_sensors: self._build_sensor_state_tensors() return def _build_env(self, env_id, env_ptr, humanoid_asset): super()._build_env(env_id, env_ptr, humanoid_asset) if (not self.headless) and self.show_sensors: self._load_sensor_asset() self._build_sensor(env_id, env_ptr) return def _build_sensor(self, env_id, env_ptr): default_pose = gymapi.Transform() for i in range(self.num_height_points): marker_handle = self.gym.create_actor(env_ptr, self._sensor_asset, default_pose, "marker", self.num_envs + 1, 0, 0) self.gym.set_rigid_body_color(env_ptr, marker_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*agt_color(env_id))) self._sensor_handles[env_id].append(marker_handle) return def _build_sensor_state_tensors(self): num_actors = self._root_states.shape[0] // self.num_envs self._sensor_states = self._root_states.view(self.num_envs, num_actors, self._root_states.shape[-1])[..., 11:(11 + self.num_height_points), :] self._sensor_pos = self._sensor_states[..., :3] self._sensor_actor_ids = self._humanoid_actor_ids.unsqueeze(-1) + to_torch(self._sensor_handles, dtype=torch.int32, device=self.device) self._sensor_actor_ids = self._sensor_actor_ids.flatten() return def _load_sensor_asset(self): asset_root = "pacer/data/assets/mjcf/" asset_file = "sensor_marker.urdf" asset_options = gymapi.AssetOptions() asset_options.angular_damping = 0.01 asset_options.linear_damping = 0.01 asset_options.max_angular_velocity = 100.0 asset_options.density = 1.0 asset_options.fix_base_link = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_NONE self._sensor_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options) return def _draw_task(self): # cols = np.array([[1.0, 0.0, 0.0]], dtype=np.float32) norm_states = self.get_head_pose() base_quat = norm_states[:, 3:7] if not self._has_upright_start: base_quat = remove_base_rot(base_quat) heading_rot = torch_utils.calc_heading_quat(base_quat) points = quat_apply( heading_rot.repeat(1, self.num_height_points).reshape(-1, 4), self.height_points) + (norm_states[:, :3]).unsqueeze(1) if (not self.headless) and self.show_sensors: self._sensor_pos[:] = points # self._sensor_pos[..., 2] += 0.3 # self._sensor_pos[..., 2] -= 5 traj_samples = self._fetch_traj_samples() self._marker_pos[:] = traj_samples self._marker_pos[..., 2] = self._humanoid_root_states[..., 2:3] # jp hack # ZL hack # self._marker_pos[..., 2] = 0.89 # self._marker_pos[..., 2] = 0 if (not self.headless) and self.show_sensors: comb_idx = torch.cat([self._sensor_actor_ids, self._marker_actor_ids]) else: comb_idx = torch.cat([self._marker_actor_ids])
if flags.show_traj:
0
2023-10-31 20:47:12+00:00
8k
Improbable-AI/dexenv
dexenv/envs/dclaw_rptd.py
[ { "identifier": "DclawMultiObjs", "path": "dexenv/envs/dclaw_multiobjs.py", "snippet": "class DclawMultiObjs(DClawBase):\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n self.set_random_gen()\n self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset)\n self.num_objects = len(self.object_urdfs)\n logger.info(f'Object urdf root path:{self.dataset_path}.')\n logger.info(f'Number of available objects:{self.num_objects}.')\n super().__init__(cfg=cfg,\n sim_device=sim_device,\n rl_device=rl_device,\n graphics_device_id=graphics_device_id)\n\n def set_random_gen(self, seed=12345):\n self.np_random, seed = seeding.np_random(seed)\n\n def _create_envs(self, num_envs, spacing, num_per_row):\n lower = gymapi.Vec3(-spacing, -spacing, 0.0)\n upper = gymapi.Vec3(spacing, spacing, spacing)\n\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix()\n\n dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root)\n # load manipulated object and goal assets\n table_asset = self.get_table_asset()\n table_pose = self.get_table_pose()\n object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset()\n\n # create fingertip force sensors, if needed\n if self.obs_type == \"full_state\":\n sensor_pose = gymapi.Transform()\n for ft_handle in self.fingertip_handles:\n self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose)\n\n dclaw_start_pose = self.get_dclaw_start_pose()\n object_start_pose = self.get_object_start_pose(dclaw_start_pose)\n goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose)\n\n self.dclaws = []\n self.envs = []\n\n self.object_init_state = []\n self.hand_start_states = []\n\n self.hand_indices = []\n self.fingertip_indices = []\n self.object_indices = []\n self.object_cat_indices = []\n self.goal_object_indices = []\n\n self.render_camera_handles = []\n if self.cfg.rgb_render:\n render_cam_pose, render_cam_params = self.get_visual_render_camera_setup()\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n\n dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset)\n object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0])\n\n self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count))\n self.object_handles = []\n num_object_assets = len(object_assets)\n env_obj_ids = []\n for i in range(self.num_envs):\n # create env instance\n obj_asset_id = i % num_object_assets\n env_obj_ids.append(object_ids[obj_asset_id])\n env_ptr = self.gym.create_env(\n self.sim, lower, upper, num_per_row\n )\n\n if self.aggregate_mode >= 1:\n # compute aggregate size\n obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id])\n obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id])\n max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1\n max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1\n self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)\n\n self.create_hand_actor(env_ptr=env_ptr,\n dclaw_asset=dclaw_asset,\n dclaw_start_pose=dclaw_start_pose,\n dclaw_dof_props=dclaw_dof_props,\n env_id=i)\n # add object\n object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id],\n object_start_pose, \"object\", i, 0, 1)\n self.object_handles.append(object_handle)\n self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,\n object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z,\n object_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)\n self.object_indices.append(object_idx)\n self.object_cat_indices.append(object_cat_ids[obj_asset_id])\n\n # add goal object\n goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id],\n goal_start_pose, \"goal_object\",\n i + self.num_envs,\n 0, 2)\n goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)\n self.goal_object_indices.append(goal_object_idx)\n\n if self.cfg.obj.load_texture:\n self.gym.set_rigid_body_texture(env_ptr,\n object_handle,\n 0,\n gymapi.MESH_VISUAL_AND_COLLISION,\n object_textures[obj_asset_id]\n )\n self.gym.set_rigid_body_texture(env_ptr,\n goal_handle,\n 0,\n gymapi.MESH_VISUAL_AND_COLLISION,\n object_textures[obj_asset_id]\n )\n else:\n color = np.array([179, 193, 134]) / 255.0\n self.gym.set_rigid_body_color(\n env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color))\n self.gym.set_rigid_body_color(\n env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color))\n table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, \"table\", i, 0)\n self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL,\n gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.))\n\n if self.cfg.rgb_render:\n render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params)\n\n self.render_camera_handles.append(render_camera_handle[0])\n if self.aggregate_mode > 0:\n self.gym.end_aggregate(env_ptr)\n self.envs.append(env_ptr)\n\n object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)\n self.object_rb_masses = [prop.mass for prop in object_rb_props]\n self.setup_torch_states()\n self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1)\n self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1)\n\n def parse_obj_dataset(self, dataset):\n asset_root = dexenv.LIB_PATH.joinpath('assets')\n split_dataset_name = dataset.split(':')\n if len(split_dataset_name) == 1:\n dataset_path = asset_root.joinpath(dataset, 'train')\n else:\n target_object = split_dataset_name[1]\n dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object)\n\n logger.warning(f'Dataset path:{dataset_path}')\n urdf_files = get_all_files_with_name(dataset_path, name='model.urdf')\n permute_ids = self.np_random.permutation(np.arange(len(urdf_files)))\n permuted_urdfs = [urdf_files[i] for i in permute_ids]\n object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs])))\n obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)}\n return permuted_urdfs, dataset_path, obj_name_to_id\n\n def get_object_category(self, urdf_path):\n cat = urdf_path.parents[0].name\n if 'var_' in cat:\n cat = urdf_path.parents[1].name\n return cat\n\n def load_object_asset(self):\n asset_root = dexenv.LIB_PATH.joinpath('assets')\n object_urdfs = self.object_urdfs\n\n object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], []\n object_cat_ids = []\n if self.cfg.obj.object_id is not None:\n urdf_to_load = self.object_urdfs[self.cfg.obj.object_id]\n logger.info(f'Loading a single object: {urdf_to_load}')\n obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root,\n urdf_to_load)\n object_assets.append(obj_asset)\n goal_assets.append(goal_asset)\n object_ids.append(self.object_urdfs.index(urdf_to_load))\n object_tex_handles.append(texture_handle)\n object_ptds.append(ptd)\n object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)])\n else:\n if self.cfg.obj.start_id is None:\n start = 0\n end = min(len(object_urdfs), self.cfg.obj.num_objs)\n else:\n start = self.cfg.obj.start_id\n end = min(start + self.cfg.obj.num_objs, len(object_urdfs))\n iters = range(start, end)\n logger.info(f'Loading object IDs from {start} to {end}.')\n for idx in tqdm(iters, desc='Loading Asset'):\n urdf_to_load = object_urdfs[idx]\n obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root,\n urdf_to_load)\n object_assets.append(obj_asset)\n goal_assets.append(goal_asset)\n object_ids.append(self.object_urdfs.index(urdf_to_load))\n object_tex_handles.append(texture_handle)\n object_ptds.append(ptd)\n object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)])\n return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids\n\n def load_an_object(self, asset_root, object_urdf):\n out = []\n obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd)\n obj_asset = self.change_obj_asset_dyn(obj_asset)\n goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False)\n ptd = None\n if self.cfg.env.loadCADPTD:\n ptd_file = object_urdf.parent.joinpath(f'point_cloud_{self.cfg.env.objCadNumPts}_pts.pkl')\n if ptd_file.exists():\n ptd = load_from_pickle(ptd_file)\n out.append(obj_asset)\n out.append(goal_obj_asset)\n if self.cfg.obj.load_texture:\n texture_handle = load_obj_texture(self.gym, self.sim, object_urdf)\n out.append(texture_handle)\n else:\n out.append([])\n out.append(ptd)\n return out\n\n def change_obj_asset_dyn(self, obj_asset):\n object_props = self.gym.get_asset_rigid_shape_properties(obj_asset)\n for p in object_props:\n p.friction = self.cfg.env.obj.friction\n p.torsion_friction = self.cfg.env.obj.torsion_friction\n p.rolling_friction = self.cfg.env.obj.rolling_friction\n p.restitution = self.cfg.env.obj.restitution\n\n self.gym.set_asset_rigid_shape_properties(obj_asset, object_props)\n return obj_asset" }, { "identifier": "load_from_pickle", "path": "dexenv/utils/common.py", "snippet": "def load_from_pickle(file_name):\n file_name = pathlib_file(file_name)\n with file_name.open('rb') as f:\n data = pkl.load(f)\n return data" }, { "identifier": "get_camera_params", "path": "dexenv/utils/isaac_utils.py", "snippet": "def get_camera_params(width=640, height=480, hov=75, cuda=True):\n camera_props = gymapi.CameraProperties()\n camera_props.horizontal_fov = hov\n camera_props.width = width\n camera_props.height = height\n camera_props.enable_tensors = cuda\n return camera_props" }, { "identifier": "CameraPointCloud", "path": "dexenv/utils/point_cloud_utils.py", "snippet": "class CameraPointCloud:\n def __init__(self, isc_sim, isc_gym, envs, camera_handles,\n camera_props, sample_num=4000,\n filter_func=None, pt_in_local=False,\n depth_max=None, graphics_device='cpu',\n compute_device='cpu'):\n self.sim = isc_sim\n self.gym = isc_gym\n self.envs = envs\n self.camera_handles = camera_handles\n assert pt_in_local\n self.filter_func = filter_func\n print(f'Depth max:{depth_max}')\n\n self.camera_props = camera_props\n self.graphics_device = graphics_device\n self.compute_device = compute_device\n self.sample_num = sample_num\n self.num_envs = len(self.envs)\n self.num_cams = len(camera_handles[0])\n print(f'Number of envs in camera:{self.num_envs}')\n print(f'Number of cameras:{self.num_cams}')\n\n all_depth_buffers = []\n self.pt_generators = []\n for idx in range(len(envs)):\n depth_buffers = []\n pt_generators = []\n for c in range(len(camera_handles[idx])):\n c_handle = camera_handles[idx][c]\n env = envs[idx]\n depth_tensor = self.gym.get_camera_image_gpu_tensor(self.sim,\n env,\n c_handle,\n gymapi.IMAGE_DEPTH)\n torch_depth_tensor = gymtorch.wrap_tensor(depth_tensor)\n depth_buffers.append(torch_depth_tensor)\n\n view_matrix = self.gym.get_camera_view_matrix(self.sim,\n envs[0],\n c_handle)\n proj_matrix = self.gym.get_camera_proj_matrix(self.sim,\n envs[0],\n c_handle)\n pt_generators.append(\n PointCloudGenerator(\n camera_props=camera_props,\n proj_matrix=proj_matrix,\n view_matrix=view_matrix,\n depth_max=depth_max,\n device=self.graphics_device\n )\n )\n all_depth_buffers.append(depth_buffers)\n self.pt_generators.append(pt_generators)\n\n self.depth_tensors = all_depth_buffers\n\n @torch.no_grad()\n def get_point_cloud(self, env_ids=None, filter_func=None, sample_num=None):\n if filter_func is None:\n filter_func = self.filter_func\n dim_per_pt = 3\n sample_num = self.sample_num if sample_num is None else sample_num\n num_envs = self.num_envs if env_ids is None else len(env_ids)\n out = torch.zeros((num_envs, sample_num, dim_per_pt), device=self.compute_device)\n all_pts = self.get_ptd_cuda(env_ids=env_ids, filter_func=filter_func)\n\n for env_id in range(num_envs):\n env_pt = torch.cat([x[env_id] for x in all_pts])\n env_pt = self.sample_n(env_pt, sample_num=sample_num)\n out[env_id, :, :3] = env_pt.to(self.compute_device)\n return out.detach()\n\n @torch.no_grad()\n def _proc_pts(self, camera_id, env_id, depth_images, filter_func=None):\n pts = self.pt_generators[env_id][camera_id].convert(depth_images)\n\n if filter_func is not None:\n pts = filter_func(pts)\n elif self.filter_func is not None:\n pts = self.filter_func(pts)\n return pts\n\n @torch.no_grad()\n def sample_n(self, pts, sample_num=None):\n sample_num = self.sample_num if sample_num is None else sample_num\n num = pts.shape[0]\n ids = torch.randint(0, num, size=(sample_num,))\n pts = pts[ids]\n return pts\n\n @torch.no_grad()\n def get_ptd_cuda(self, env_ids=None, filter_func=None):\n depth_imgs = self.clone_img_tensor(self.depth_tensors, env_ids)\n all_pts = []\n env_iter = range(len(self.envs)) if env_ids is None else env_ids\n for cid in range(depth_imgs.shape[1]):\n c_pts = []\n for env_id in env_iter:\n pts = self._proc_pts(camera_id=cid,\n env_id=env_id,\n depth_images=depth_imgs[env_id, cid],\n filter_func=filter_func)\n c_pts.append(pts)\n all_pts.append(c_pts)\n return all_pts\n\n @torch.no_grad()\n def clone_img_tensor(self, img_tensors, env_ids=None):\n out = []\n env_iter = range(len(self.envs)) if env_ids is None else env_ids\n out = [torch.stack(img_tensors[i]) for i in env_iter]\n\n return torch.stack(out)" }, { "identifier": "quat_xyzw_to_wxyz", "path": "dexenv/utils/torch_utils.py", "snippet": "@torch.no_grad()\ndef quat_xyzw_to_wxyz(quat_xyzw):\n quat_wxyz = torch.index_select(quat_xyzw, -1,\n torch.LongTensor([3, 0, 1, 2]).to(quat_xyzw.device))\n return quat_wxyz" }, { "identifier": "torch_float", "path": "dexenv/utils/torch_utils.py", "snippet": "def torch_float(array, device='cpu'):\n if isinstance(array, torch.Tensor):\n return array.float().to(device)\n elif isinstance(array, np.ndarray):\n return torch.from_numpy(array).float().to(device)\n elif isinstance(array, list):\n return torch.FloatTensor(array).to(device)\n elif isinstance(array, dict):\n new_dict = dict()\n for k, v in array.items():\n new_dict[k] = torch_float(v, device)\n return new_dict" }, { "identifier": "torch_long", "path": "dexenv/utils/torch_utils.py", "snippet": "def torch_long(array, device='cpu'):\n if isinstance(array, torch.Tensor):\n return array.long().to(device)\n elif isinstance(array, np.ndarray):\n return torch.from_numpy(array).long().to(device)\n elif isinstance(array, list):\n return torch.LongTensor(array).to(device)\n elif isinstance(array, dict):\n new_dict = dict()\n for k, v in array.items():\n new_dict[k] = torch_long(v, device)\n return new_dict" } ]
import numpy as np import pytorch3d.transforms as p3dtf import torch import dexenv from gym import spaces from isaacgym import gymapi from scipy.spatial.transform import Rotation as R from dexenv.envs.dclaw_multiobjs import DclawMultiObjs from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import get_camera_params from dexenv.utils.point_cloud_utils import CameraPointCloud from dexenv.utils.torch_utils import quat_xyzw_to_wxyz from dexenv.utils.torch_utils import torch_float from dexenv.utils.torch_utils import torch_long
6,479
1, 1) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=None) object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() table_asset = self.get_table_asset() table_pose = self.get_table_pose() if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.cam_handles = [] self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] camera_poses, camera_params = self.get_camera_setup() dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) num_object_assets = len(object_assets) env_obj_ids = [] self.object_ptds = [] self.object_handles = [] for i in range(self.num_envs): obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) self.object_ptds.append(object_ptds[obj_asset_id]) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) cam_handles = self.create_camera(camera_poses, env_ptr, camera_params) self.cam_handles.append(cam_handles) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.setup_ptd_cam(camera_params) self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) self.object_ptds = np.stack(self.object_ptds, axis=0)
class DclawRealPTD(DclawMultiObjs): def __init__(self, cfg, sim_device, rl_device, graphics_device_id, quantization_size=None): cfg.env.enableCameraSensors = True super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) self.quantization_size = quantization_size self.read_finger_ptd() ob_buf_shape = (self.cfg.env.robotCadNumPts * len(self.ptd_body_links) + self.cfg.env.objCadNumPts + self.cfg.cam.sample_num, 3) self.obs_space = spaces.Dict({'ob': spaces.Box(np.ones(ob_buf_shape) * -np.Inf, np.ones(ob_buf_shape) * np.Inf), 'state': spaces.Box(np.ones(self.num_obs) * -np.Inf, np.ones(self.num_obs) * np.Inf)}) def read_finger_ptd(self): ptd_path = dexenv.LIB_PATH.joinpath('assets', f'{self.cfg.env.robot}', 'meshes', 'visual', f'point_cloud_{self.cfg.env.robotCadNumPts}_pts.pkl') self.hand_ptd_dict = load_from_pickle(ptd_path) body_links = list(self.hand_ptd_dict.keys()) body_links.remove('base_link') self.ptd_body_links = body_links self.hand_body_links_to_handles = self.gym.get_actor_rigid_body_dict(self.envs[0], self.dclaws[0]) self.hand_ptds = torch.from_numpy(np.stack([self.hand_ptd_dict[x] for x in self.ptd_body_links])) self.hand_ptds = self.hand_ptds.to(self.device) self.base_link_handle = torch_long([self.hand_body_links_to_handles['base_link']]) self.hand_body_handles = [self.hand_body_links_to_handles[x] for x in self.ptd_body_links] self.hand_body_handles = torch_long(self.hand_body_handles, device=self.device) hand_ptds = self.hand_ptds.repeat(self.num_envs, 1, 1, 1) self.scene_cad_ptd = torch.cat((hand_ptds, self.object_ptds.unsqueeze(1)), dim=1) self.scene_cad_ptd = self.scene_cad_ptd.view(-1, self.scene_cad_ptd.shape[-2], self.scene_cad_ptd.shape[-1]).float() self.scene_ptd_buf = torch.zeros( (self.num_envs, self.cfg.env.robotCadNumPts * len(self.ptd_body_links) + self.cfg.env.objCadNumPts + self.cfg.cam.sample_num, 3), device=self.device, dtype=torch.float) self.se3_T_buf = torch.eye(4, device=self.device).repeat(self.num_envs * (len(self.ptd_body_links) + 1), 1, 1) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=None) object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() table_asset = self.get_table_asset() table_pose = self.get_table_pose() if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.cam_handles = [] self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] camera_poses, camera_params = self.get_camera_setup() dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) num_object_assets = len(object_assets) env_obj_ids = [] self.object_ptds = [] self.object_handles = [] for i in range(self.num_envs): obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) self.object_ptds.append(object_ptds[obj_asset_id]) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) cam_handles = self.create_camera(camera_poses, env_ptr, camera_params) self.cam_handles.append(cam_handles) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.setup_ptd_cam(camera_params) self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) self.object_ptds = np.stack(self.object_ptds, axis=0)
self.object_ptds = torch_float(self.object_ptds, device=self.device)
5
2023-10-25 17:22:41+00:00
8k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/metrics/train/neuron_activity_metric.py
[ { "identifier": "MetricLocation", "path": "sparse_autoencoder/metrics/abstract_metric.py", "snippet": "class MetricLocation(SnakeCaseStrEnum):\n \"\"\"Metric location.\n\n Metrics can be logged at different stages of the training pipeline. This enum is used to define\n when the metric was logged.\n \"\"\"\n\n GENERATE = auto()\n TRAIN = auto()\n RESAMPLE = auto()\n VALIDATE = auto()\n SAVE = auto()" }, { "identifier": "MetricResult", "path": "sparse_autoencoder/metrics/abstract_metric.py", "snippet": "class MetricResult:\n \"\"\"Metric result.\n\n Every metric (and loss module) should return a list of metric results (a list so that it can\n return more than one metric result if needed). Each metric result defines the name of the\n result, as well as the component-wise values and how they should be aggregated.\n \"\"\"\n\n location: MetricLocation\n name: str\n postfix: str | None\n _component_names: list[str]\n component_wise_values: Sequence[WandbSupportedLogTypes] | Float[\n Tensor, Axis.names(Axis.COMPONENT)\n ] | Int[Tensor, Axis.names(Axis.COMPONENT)]\n aggregate_approach: ComponentAggregationApproach | None\n _aggregate_value: Any | None\n\n def __init__(\n self,\n component_wise_values: Sequence[WandbSupportedLogTypes]\n | Float[Tensor, Axis.names(Axis.COMPONENT)]\n | Int[Tensor, Axis.names(Axis.COMPONENT)],\n name: str,\n location: MetricLocation,\n aggregate_approach: ComponentAggregationApproach | None = ComponentAggregationApproach.MEAN,\n aggregate_value: Any | None = None, # noqa: ANN401\n postfix: str | None = None,\n ) -> None:\n \"\"\"Initialize a metric result.\n\n Example:\n >>> metric_result = MetricResult(\n ... location=MetricLocation.TRAIN,\n ... name=\"loss\",\n ... component_wise_values=[1.0, 2.0, 3.0],\n ... aggregate_approach=ComponentAggregationApproach.MEAN,\n ... )\n >>> for k, v in metric_result.wandb_log.items():\n ... print(f\"{k}: {v}\")\n component_0/train/loss: 1.0\n component_1/train/loss: 2.0\n component_2/train/loss: 3.0\n train/loss/component_mean: 2.0\n\n\n Args:\n component_wise_values: Values for each component.\n name: Metric name (e.g. `l2_loss`). This will be combined with the component name and\n metric locations, as well as an optional postfix, to create a Weights and Biases\n name of the form `component_name/metric_location/metric_name/metric_postfix`.\n location: Metric location.\n aggregate_approach: Component aggregation approach.\n aggregate_value: Override the aggregate value across components. For most metric results\n you can instead just specify the `aggregate_approach` and it will be automatically\n calculated.\n postfix: Metric name postfix.\n \"\"\"\n self.location = location\n self.name = name\n self.component_wise_values = component_wise_values\n self.aggregate_approach = aggregate_approach\n self._aggregate_value = aggregate_value\n self.postfix = postfix\n self._component_names = [f\"component_{i}\" for i in range(len(component_wise_values))]\n\n @final\n @property\n def n_components(self) -> int:\n \"\"\"Number of components.\"\"\"\n return len(self.component_wise_values)\n\n @final\n @property\n def aggregate_value( # noqa: PLR0911\n self,\n ) -> (\n WandbSupportedLogTypes\n | Float[Tensor, Axis.names(Axis.COMPONENT)]\n | Int[Tensor, Axis.names(Axis.COMPONENT)]\n ):\n \"\"\"Aggregate value across components.\n\n Returns:\n Aggregate value (defaults to the initialised aggregate value if set, or otherwise\n attempts to automatically aggregate the component-wise values).\n\n Raises:\n ValueError: If the component-wise values cannot be automatically aggregated.\n \"\"\"\n # Allow overriding\n if self._aggregate_value is not None:\n return self._aggregate_value\n\n if self.n_components == 1:\n return self.component_wise_values[0]\n\n cannot_aggregate_error_message = \"Cannot aggregate component-wise values.\"\n\n # Automatically aggregate number lists/sequences/tuples/sets\n if (isinstance(self.component_wise_values, (Sequence, list, tuple, set))) and all(\n isinstance(x, (int, float)) for x in self.component_wise_values\n ):\n values: list = cast(list[float], self.component_wise_values)\n match self.aggregate_approach:\n case ComponentAggregationApproach.MEAN:\n return sum(values) / len(values)\n case ComponentAggregationApproach.SUM:\n return sum(values)\n case ComponentAggregationApproach.ALL:\n return values\n case _:\n raise ValueError(cannot_aggregate_error_message)\n\n # Automatically aggregate number tensors\n if (\n isinstance(self.component_wise_values, Tensor)\n and self.component_wise_values.shape[0] == self.n_components\n ):\n match self.aggregate_approach:\n case ComponentAggregationApproach.MEAN:\n return self.component_wise_values.mean(dim=0, dtype=torch.float32)\n case ComponentAggregationApproach.SUM:\n return self.component_wise_values.sum(dim=0, dtype=torch.float32)\n case ComponentAggregationApproach.ALL:\n return self.component_wise_values\n case _:\n raise ValueError(cannot_aggregate_error_message)\n\n # Raise otherwise\n raise ValueError(cannot_aggregate_error_message)\n\n @final\n def create_wandb_name(\n self,\n component_name: str | None = None,\n aggregation_approach: ComponentAggregationApproach | None = None,\n ) -> str:\n \"\"\"Weights and Biases Metric Name.\n\n Note Weights and Biases categorises metrics using a forward slash (`/`) in the name string.\n\n Example:\n >>> metric_result = MetricResult(\n ... location=MetricLocation.VALIDATE,\n ... name=\"loss\",\n ... component_wise_values=[1.0, 2.0, 3.0],\n ... aggregate_approach=ComponentAggregationApproach.MEAN,\n ... )\n >>> metric_result.create_wandb_name()\n 'validate/loss'\n\n >>> metric_result.create_wandb_name(component_name=\"component_0\")\n 'component_0/validate/loss'\n\n Args:\n component_name: Component name, if creating a Weights and Biases name for a specific\n component.\n aggregation_approach: Component aggregation approach, if creating an aggregate metric.\n\n Returns:\n Weights and Biases metric name.\n \"\"\"\n # Add the name parts in order\n name_parts = []\n\n # Component name (e.g. `component_0` if set)\n if component_name is not None:\n name_parts.append(component_name)\n\n # Always include location (e.g. `train`) and the core metric name (e.g. neuron_activity).\n name_parts.extend([self.location.value, self.name])\n\n # Postfix (e.g. `almost_dead_1e-3`)\n if self.postfix is not None:\n name_parts.append(self.postfix)\n\n # Aggregation approach (e.g. `component_mean`) if set and not ALL\n if (\n aggregation_approach is not None\n and aggregation_approach != ComponentAggregationApproach.ALL\n ):\n name_parts.append(f\"component_{aggregation_approach.value.lower()}\")\n\n return \"/\".join(name_parts)\n\n @final\n @property\n def wandb_log(self) -> dict[str, WandbSupportedLogTypes]:\n \"\"\"Create the Weights and Biases Log data.\n\n For use with `wandb.log()`.\n\n https://docs.wandb.ai/ref/python/log\n\n Examples:\n With just one component:\n\n >>> metric_result = MetricResult(\n ... location=MetricLocation.VALIDATE,\n ... name=\"loss\",\n ... component_wise_values=[1.5],\n ... )\n >>> for k, v in metric_result.wandb_log.items():\n ... print(f\"{k}: {v}\")\n validate/loss: 1.5\n\n With multiple components:\n\n >>> metric_result = MetricResult(\n ... location=MetricLocation.VALIDATE,\n ... name=\"loss\",\n ... component_wise_values=[1.0, 2.0],\n ... aggregate_approach=ComponentAggregationApproach.MEAN,\n ... )\n >>> for k, v in metric_result.wandb_log.items():\n ... print(f\"{k}: {v}\")\n component_0/validate/loss: 1.0\n component_1/validate/loss: 2.0\n validate/loss/component_mean: 1.5\n\n Returns:\n Weights and Biases log data.\n \"\"\"\n # Create the component wise logs if there is more than one component\n component_wise_logs = {}\n if self.n_components > 1:\n for component_name, value in zip(self._component_names, self.component_wise_values):\n component_wise_logs[self.create_wandb_name(component_name=component_name)] = value\n\n # Create the aggregate log if there is an aggregate value\n aggregate_log = {}\n if self.aggregate_approach is not None or self._aggregate_value is not None:\n aggregate_log = {\n self.create_wandb_name(\n aggregation_approach=self.aggregate_approach if self.n_components > 1 else None\n ): self.aggregate_value\n }\n\n return {**component_wise_logs, **aggregate_log}\n\n def __str__(self) -> str:\n \"\"\"String representation.\"\"\"\n return str(self.wandb_log)\n\n def __repr__(self) -> str:\n \"\"\"Representation.\"\"\"\n class_name = self.__class__.__name__\n return f\"\"\"{class_name}(\n location={self.location},\n name={self.name},\n postfix={self.postfix},\n component_wise_values={self.component_wise_values},\n aggregate_approach={self.aggregate_approach},\n aggregate_value={self._aggregate_value},\n )\"\"\"" }, { "identifier": "AbstractTrainMetric", "path": "sparse_autoencoder/metrics/train/abstract_train_metric.py", "snippet": "class AbstractTrainMetric(AbstractMetric, ABC):\n \"\"\"Abstract train metric.\"\"\"\n\n @final\n @property\n def location(self) -> MetricLocation:\n \"\"\"Metric type name.\"\"\"\n return MetricLocation.TRAIN\n\n @abstractmethod\n def calculate(self, data: TrainMetricData) -> list[MetricResult]:\n \"\"\"Calculate any metrics component wise.\n\n Args:\n data: Train metric data.\n\n Returns:\n Dictionary of metrics.\n \"\"\"" }, { "identifier": "TrainMetricData", "path": "sparse_autoencoder/metrics/train/abstract_train_metric.py", "snippet": "class TrainMetricData:\n \"\"\"Train metric data.\"\"\"\n\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Input activations.\"\"\"\n\n learned_activations: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.LEARNT_FEATURE)]\n \"\"\"Learned activations.\"\"\"\n\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Decoded activations.\"\"\"\n\n def __init__(\n self,\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> None:\n \"\"\"Initialize the train metric data.\"\"\"\n self.input_activations = add_component_axis_if_missing(\n input_activations, dimensions_without_component=2\n ).detach()\n self.learned_activations = add_component_axis_if_missing(\n learned_activations, dimensions_without_component=2\n ).detach()\n self.decoded_activations = add_component_axis_if_missing(\n decoded_activations, dimensions_without_component=2\n ).detach()" }, { "identifier": "Axis", "path": "sparse_autoencoder/tensor_types.py", "snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)" } ]
from jaxtyping import Float, Int, Int64 from torch import Tensor from sparse_autoencoder.metrics.abstract_metric import ( MetricLocation, MetricResult, ) from sparse_autoencoder.metrics.train.abstract_train_metric import ( AbstractTrainMetric, TrainMetricData, ) from sparse_autoencoder.tensor_types import Axis import numpy as np import torch import wandb
4,602
"""Neuron activity metric. Logs the number of dead and alive neurons at various horizons. Also logs histograms of neuron activity, and the number of neurons that are almost dead. """ DEFAULT_HORIZONS = [10_000, 100_000, 1_000_000, 10_000_000] """Default horizons (in number of logged activations).""" DEFAULT_THRESHOLDS = [1e-5, 1e-6] """Default thresholds for determining if a neuron is almost dead.""" class NeuronActivityHorizonData: """Neuron activity data for a specific horizon (number of activations seen). For each time horizon we store some data (e.g. the number of times each neuron fired inside this time horizon). This class also contains some helper methods for then calculating metrics from this data. """ _horizon_n_activations: int """Horizon in number of activations.""" _horizon_steps: int """Horizon in number of steps.""" _steps_since_last_calculated: int """Steps since last calculated.""" _neuron_activity: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] """Neuron activity since inception.""" _thresholds: list[float] """Thresholds for almost dead neurons.""" _n_components: int """Number of components.""" _n_learned_features: int """Number of learned features.""" @property def _dead_count(self) -> Int[Tensor, Axis.COMPONENT]: """Dead count.""" dead_bool_mask: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] = ( self._neuron_activity == 0 ) return dead_bool_mask.sum(-1) @property def _dead_fraction(self) -> Float[Tensor, Axis.COMPONENT]: """Dead fraction.""" return self._dead_count / self._n_learned_features @property def _alive_count(self) -> Int[Tensor, Axis.COMPONENT]: """Alive count.""" alive_bool_mask: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] = ( self._neuron_activity > 0 ) return alive_bool_mask.sum(-1) def _almost_dead(self, threshold: float) -> Int[Tensor, Axis.COMPONENT]: """Almost dead count.""" threshold_in_activations: float = threshold * self._horizon_n_activations almost_dead_bool_mask: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] = ( self._neuron_activity < threshold_in_activations ) return almost_dead_bool_mask.sum(-1) @property def _activity_histogram(self) -> list[wandb.Histogram]: """Activity histogram.""" numpy_neuron_activity: Float[ np.ndarray, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE) ] = self._neuron_activity.cpu().numpy() np_histograms = [np.histogram(activity) for activity in numpy_neuron_activity] return [wandb.Histogram(np_histogram=histogram) for histogram in np_histograms] @property def _log_activity_histogram(self) -> list[wandb.Histogram]: """Log activity histogram.""" log_epsilon = 0.1 # To avoid log(0) log_neuron_activity: Float[ Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE) ] = torch.log(self._neuron_activity + log_epsilon) numpy_log_neuron_activity: Float[ np.ndarray, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE) ] = log_neuron_activity.cpu().numpy() np_histograms = [np.histogram(activity) for activity in numpy_log_neuron_activity] return [wandb.Histogram(np_histogram=histogram) for histogram in np_histograms] @property def metric_results(self) -> list[MetricResult]: """Metric results."""
"""Neuron activity metric. Logs the number of dead and alive neurons at various horizons. Also logs histograms of neuron activity, and the number of neurons that are almost dead. """ DEFAULT_HORIZONS = [10_000, 100_000, 1_000_000, 10_000_000] """Default horizons (in number of logged activations).""" DEFAULT_THRESHOLDS = [1e-5, 1e-6] """Default thresholds for determining if a neuron is almost dead.""" class NeuronActivityHorizonData: """Neuron activity data for a specific horizon (number of activations seen). For each time horizon we store some data (e.g. the number of times each neuron fired inside this time horizon). This class also contains some helper methods for then calculating metrics from this data. """ _horizon_n_activations: int """Horizon in number of activations.""" _horizon_steps: int """Horizon in number of steps.""" _steps_since_last_calculated: int """Steps since last calculated.""" _neuron_activity: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] """Neuron activity since inception.""" _thresholds: list[float] """Thresholds for almost dead neurons.""" _n_components: int """Number of components.""" _n_learned_features: int """Number of learned features.""" @property def _dead_count(self) -> Int[Tensor, Axis.COMPONENT]: """Dead count.""" dead_bool_mask: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] = ( self._neuron_activity == 0 ) return dead_bool_mask.sum(-1) @property def _dead_fraction(self) -> Float[Tensor, Axis.COMPONENT]: """Dead fraction.""" return self._dead_count / self._n_learned_features @property def _alive_count(self) -> Int[Tensor, Axis.COMPONENT]: """Alive count.""" alive_bool_mask: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] = ( self._neuron_activity > 0 ) return alive_bool_mask.sum(-1) def _almost_dead(self, threshold: float) -> Int[Tensor, Axis.COMPONENT]: """Almost dead count.""" threshold_in_activations: float = threshold * self._horizon_n_activations almost_dead_bool_mask: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)] = ( self._neuron_activity < threshold_in_activations ) return almost_dead_bool_mask.sum(-1) @property def _activity_histogram(self) -> list[wandb.Histogram]: """Activity histogram.""" numpy_neuron_activity: Float[ np.ndarray, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE) ] = self._neuron_activity.cpu().numpy() np_histograms = [np.histogram(activity) for activity in numpy_neuron_activity] return [wandb.Histogram(np_histogram=histogram) for histogram in np_histograms] @property def _log_activity_histogram(self) -> list[wandb.Histogram]: """Log activity histogram.""" log_epsilon = 0.1 # To avoid log(0) log_neuron_activity: Float[ Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE) ] = torch.log(self._neuron_activity + log_epsilon) numpy_log_neuron_activity: Float[ np.ndarray, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE) ] = log_neuron_activity.cpu().numpy() np_histograms = [np.histogram(activity) for activity in numpy_log_neuron_activity] return [wandb.Histogram(np_histogram=histogram) for histogram in np_histograms] @property def metric_results(self) -> list[MetricResult]: """Metric results."""
metric_location = MetricLocation.TRAIN
0
2023-10-27 07:37:15+00:00
8k
NVlabs/handover-sim2real
examples/train.py
[ { "identifier": "get_cfg", "path": "handover_sim2real/config.py", "snippet": "def get_cfg(handover_config_only=False):\n if not handover_config_only:\n cfg = _C\n else:\n cfg = _C_handover_config\n return cfg.clone()" }, { "identifier": "HandoverSim2RealPolicy", "path": "handover_sim2real/policy.py", "snippet": "class HandoverSim2RealPolicy:\n def __init__(self, cfg, agent, grasp_agent, grasp_pred_threshold, use_ray=False, seed=None):\n self._cfg = cfg\n self._agent = agent\n self._grasp_agent = grasp_agent\n self._grasp_pred_threshold = grasp_pred_threshold\n self._use_ray = use_ray\n\n self._point_listener = PointListener(cfg, seed=seed)\n\n self._panda_base_invert_transform = pybullet.invertTransform(\n self._cfg.ENV.PANDA_BASE_POSITION, self._cfg.ENV.PANDA_BASE_ORIENTATION\n )\n\n self._steps_action_repeat = int(\n self._cfg.POLICY.TIME_ACTION_REPEAT / self._cfg.SIM.TIME_STEP\n )\n self._steps_close_gripper = int(\n self._cfg.POLICY.TIME_CLOSE_GRIPPER / self._cfg.SIM.TIME_STEP\n )\n self._standoff_offset = np.array([0.0, 0.0, 0.08])\n\n @property\n def steps_action_repeat(self):\n return self._steps_action_repeat\n\n def reset(self):\n self._done_frame = None\n self._grasp = None\n self._back = None\n\n self._point_listener.reset()\n\n def get_state(self, obs):\n point_states, elapsed_time = self._get_point_states_from_callback(obs)\n ee_pose = self._get_ee_pose(obs, in_panda_base=True)\n state = self._point_listener.point_states_to_state(point_states, ee_pose)\n return state, elapsed_time\n\n @timer\n def _get_point_states_from_callback(self, obs):\n point_states = obs[\"callback_get_point_states\"]()\n point_states = [point_state.T for point_state in point_states]\n return point_states\n\n def _get_ee_pose(self, obs, in_panda_base=False):\n pos = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 0:3]\n orn = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 3:7]\n if in_panda_base:\n pos, orn = pybullet.multiplyTransforms(*self._panda_base_invert_transform, pos, orn)\n ee_pose = unpack_pose(np.hstack((pos, tf_quat(orn))))\n return ee_pose\n\n def select_action(self, state, expert_policy=False):\n if self._use_ray:\n action, _, _, _ = ray.get(\n self._agent.select_action.remote(\n state, remain_timestep=1, expert_policy=expert_policy\n )\n )\n else:\n action, _, _, _ = self._agent.select_action(\n state, remain_timestep=1, expert_policy=expert_policy\n )\n return action\n\n def convert_action_to_target_joint_position(self, action, obs):\n ee_pose = self._get_ee_pose(obs)\n delta_ee_pose = unpack_action(action)\n target_ee_pose = np.matmul(ee_pose, delta_ee_pose)\n\n pos = target_ee_pose[:3, 3]\n orn = Rot.from_matrix(target_ee_pose[:3, :3]).as_quat()\n target_joint_position = pybullet.calculateInverseKinematics(\n obs[\"panda_body\"].contact_id[0], obs[\"panda_link_ind_hand\"] - 1, pos, orn\n )\n target_joint_position = np.array(target_joint_position)\n target_joint_position[7:9] = 0.04\n\n return target_joint_position\n\n def select_action_grasp(self, state):\n if self._use_ray:\n action = ray.get(\n self._grasp_agent.select_action_grasp.remote(state, self._grasp_pred_threshold)\n )\n else:\n action = self._grasp_agent.select_action_grasp(state, self._grasp_pred_threshold)\n return action\n\n def grasp_and_back(self, obs):\n if self._done_frame is None:\n self._done_frame = obs[\"frame\"]\n\n done = False\n\n if obs[\"frame\"] < self._done_frame + 4 * self._steps_action_repeat:\n if self._grasp is None:\n pos = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 0:3].numpy()\n orn = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 3:7].numpy()\n R = Rot.from_quat(orn).as_matrix()\n reach_goal = np.matmul(R, self._standoff_offset) + pos\n reach_traj = np.linspace(pos, reach_goal, 5)[1:]\n\n self._grasp = []\n for pos in reach_traj:\n conf = pybullet.calculateInverseKinematics(\n obs[\"panda_body\"].contact_id[0],\n obs[\"panda_link_ind_hand\"] - 1,\n pos,\n orn,\n )\n conf = np.array(conf)\n conf[7:9] = 0.04\n self._grasp.append(conf)\n\n i = (obs[\"frame\"] - self._done_frame) // self._steps_action_repeat\n action = self._grasp[i].copy()\n elif (\n obs[\"frame\"]\n < self._done_frame + 4 * self._steps_action_repeat + self._steps_close_gripper\n ):\n action = self._grasp[3].copy()\n action[7:9] = 0.0\n else:\n if self._back is None:\n self._back = []\n pos = obs[\"panda_body\"].link_state[0, obs[\"panda_link_ind_hand\"], 0:3].numpy()\n dpos_goal = self._cfg.BENCHMARK.GOAL_CENTER - pos\n dpos_step = dpos_goal / np.linalg.norm(dpos_goal) * self._cfg.POLICY.BACK_STEP_SIZE\n num_steps = int(\n np.ceil(np.linalg.norm(dpos_goal) / self._cfg.POLICY.BACK_STEP_SIZE)\n )\n for _ in range(num_steps):\n pos += dpos_step\n conf = pybullet.calculateInverseKinematics(\n obs[\"panda_body\"].contact_id[0], obs[\"panda_link_ind_hand\"] - 1, pos\n )\n conf = np.array(conf)\n conf[7:9] = 0.0\n self._back.append(conf)\n\n num_frames = (\n obs[\"frame\"]\n - self._done_frame\n - 4 * self._steps_action_repeat\n - self._steps_close_gripper\n )\n i = num_frames // self._steps_action_repeat\n i = min(i, len(self._back) - 1)\n action = self._back[i].copy()\n done = i == len(self._back) - 1\n\n return action, done" }, { "identifier": "add_sys_path_from_env", "path": "handover_sim2real/utils.py", "snippet": "def add_sys_path_from_env(name):\n assert name in os.environ, \"Environment variable '{}' is not set\".format(name)\n if os.environ[name] not in sys.path:\n sys.path.append(os.environ[name])" } ]
import argparse import gym import itertools import numpy as np import os import ray from datetime import datetime from handover.benchmark_wrapper import EpisodeStatus, HandoverBenchmarkWrapper from handover_sim2real.config import get_cfg from handover_sim2real.policy import HandoverSim2RealPolicy from handover_sim2real.utils import add_sys_path_from_env from experiments.config import cfg_from_file, save_cfg_to_file from core.trainer import ( AgentWrapper, AgentWrapperGPU05, ReplayMemoryWrapper, ReplayMemoryWrapperBase, RolloutAgentWrapperGPU1, Trainer, TrainerRemote, ) from core.utils import get_noise_delta, get_valid_index, rand_sample_joint
4,272
action, obs ) else: # Online. action = self._policy.select_action(state) noise = get_noise_delta( action, self._cfg.RL_TRAIN.action_noise, self._cfg.RL_TRAIN.noise_type ) action = action + noise * noise_scale target_joint_position = self._policy.convert_action_to_target_joint_position( action, obs ) if self._stage == "finetune" and expert_flag: expert_action = action obs, reward, done, info = self._step_env_repeat( target_joint_position, break_if_done=True ) run_grasp_and_back = False if not done: if ( step + 1 == self._max_explore_steps or self._stage == "pretrain" and not explore and step == len(expert_plan) - 5 ): run_grasp_and_back = True elif self._use_grasp_predictor and ( self._stage == "pretrain" and explore or self._stage == "finetune" ): state_grasp, _ = self._policy.get_state(obs) grasp_pred = self._policy.select_action_grasp(state_grasp).item() if grasp_pred: run_grasp_and_back = True if run_grasp_and_back: back_done = False if self._stage == "pretrain" and not explore: obs, _, done, _ = self._step_env_repeat( target_joint_position, break_if_done=True ) if done: back_done = True while not back_done: target_joint_position, back_done = self._policy.grasp_and_back(obs) obs, reward, done, info = self._step_env_repeat( target_joint_position, break_if_done=True ) if done: back_done = True if not done: done = True failure_1 = ( info["status"] & EpisodeStatus.FAILURE_HUMAN_CONTACT == EpisodeStatus.FAILURE_HUMAN_CONTACT ) failure_2 = ( info["status"] & EpisodeStatus.FAILURE_OBJECT_DROP == EpisodeStatus.FAILURE_OBJECT_DROP ) failure_3 = ( info["status"] & EpisodeStatus.FAILURE_TIMEOUT == EpisodeStatus.FAILURE_TIMEOUT ) step_dict = { "timestep": step, "point_state": state[0][0], "expert_flags": expert_flag, "perturb_flags": perturb_flag, "action": action, "reward": reward, "returns": reward, "terminal": done, "target_name": "", "failure_case_1": failure_1, "failure_case_2": failure_2, "failure_case_3": failure_3, } if self._stage == "pretrain": step_dict["goal"] = ee_to_goal_pose if expert_flag: step_dict["expert_action"] = expert_action cur_episode.append(step_dict) step += 1 if not explore: if self._use_ray: self._expert_buffer.add_episode.remote(cur_episode, explore, test) else: self._expert_buffer.add_episode(cur_episode, explore, test) else: if self._use_ray: self._online_buffer.add_episode.remote(cur_episode, explore, test) else: self._online_buffer.add_episode(cur_episode, explore, test) def _step_env_repeat(self, target_joint_position, break_if_done=False): for _ in range(self._policy.steps_action_repeat): obs, reward, done, info = self._env.step(target_joint_position) if break_if_done and done: break return obs, reward, done, info @ray.remote(num_gpus=0.13) class ActorWrapperRemote(ActorWrapper): pass def main(): args = parse_args() args.log = True args.policy = "DDPG" args.save_model = True
# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the NVIDIA License [see LICENSE for details]. add_sys_path_from_env("GADDPG_DIR") def parse_args(): parser = argparse.ArgumentParser(description="Train.") parser.add_argument("--cfg-file", help="path to config file") parser.add_argument("--seed", default=0, type=int, help="random seed") parser.add_argument("--use-grasp-predictor", action="store_true", help="use grasp predictor") parser.add_argument("--use-ray", action="store_true", help="use Ray") parser.add_argument("--pretrained-dir", help="pretrained model directory") parser.add_argument( "opts", nargs=argparse.REMAINDER, help=( """modify config options at the end of the command; use space-separated """ """"PATH.KEY VALUE" pairs; see handover_sim2real/config.py, """ """handover-sim/handover/config.py, and easysim/src/easysim/config.py for all options""" ), ) args = parser.parse_args() return args class ActorWrapper: def __init__( self, stage, cfg, use_ray, rollout_agent, expert_buffer, online_buffer, actor_seed, grasp_agent, grasp_pred_threshold, ): self._stage = stage self._cfg = cfg self._use_ray = use_ray self._expert_buffer = expert_buffer self._online_buffer = online_buffer self._use_grasp_predictor = grasp_agent is not None self._env = HandoverBenchmarkWrapper(gym.make(self._cfg.ENV.ID, cfg=self._cfg)) self._policy = HandoverSim2RealPolicy( cfg, rollout_agent, grasp_agent, grasp_pred_threshold, use_ray=self._use_ray ) self._max_explore_steps = self._cfg.RL_MAX_STEP + 7 if actor_seed is not None: np.random.seed(seed=actor_seed) def rollout(self, num_episodes, explore, test, noise_scale): for _ in range(num_episodes): self._rollout_one(explore, test, noise_scale) def _rollout_one(self, explore, test, noise_scale): scene_idx = np.random.randint(self._env.num_scenes) if self._stage == "pretrain": sample_initial_joint_position = ( np.random.uniform() < self._cfg.RL_TRAIN.HANDOVER_SIM2REAL.sample_initial_joint_position_ratio ) if self._stage == "finetune": sample_initial_joint_position = False reset_to_sample = False if sample_initial_joint_position: self._env.reset(idx=scene_idx) for _ in range(self._cfg.RL_TRAIN.ENV_RESET_TRIALS): initial_joint_position = rand_sample_joint(self._env, init_joints=None) if initial_joint_position is not None: self._env.set_initial_joint_position(initial_joint_position) obs = self._env.reset(idx=scene_idx) if self._env.get_ee_to_ycb_distance() > self._cfg.RL_TRAIN.init_distance_low: reset_to_sample = True break if not reset_to_sample: self._env.set_initial_joint_position(self._cfg.ENV.PANDA_INITIAL_POSITION) obs = self._env.reset(idx=scene_idx) self._policy.reset() expert_plan, _ = self._env.run_omg_planner(self._cfg.RL_MAX_STEP, scene_idx) if expert_plan is None: return if self._stage == "pretrain" and explore: expert_initial = self._cfg.RL_TRAIN.expert_initial_state and not test if expert_initial: expert_initial_steps = np.random.randint( self._cfg.RL_TRAIN.EXPERT_INIT_MIN_STEP, high=self._cfg.RL_TRAIN.EXPERT_INIT_MAX_STEP, ) step = 0 done = False cur_episode = [] while not done: state, _ = self._policy.get_state(obs) if self._stage == "pretrain": apply_dart = ( self._cfg.RL_TRAIN.dart and not explore and reset_to_sample and step > self._cfg.RL_TRAIN.DART_MIN_STEP and step < self._cfg.RL_TRAIN.DART_MAX_STEP and np.random.uniform() < self._cfg.RL_TRAIN.DART_RATIO ) apply_dagger = ( self._cfg.RL_TRAIN.dagger and explore and reset_to_sample and step > self._cfg.RL_TRAIN.DAGGER_MIN_STEP and step < self._cfg.RL_TRAIN.DAGGER_MAX_STEP and np.random.uniform() < self._cfg.RL_TRAIN.DAGGER_RATIO ) if apply_dart: t = np.random.uniform(low=-0.04, high=+0.04, size=(3,)) r = np.random.uniform(low=-0.20, high=+0.20, size=(3,)) action = np.hstack([t, r]) target_joint_position = self._policy.convert_action_to_target_joint_position( action, obs ) obs, _, _, _ = self._step_env_repeat(target_joint_position) if apply_dart or apply_dagger: num_steps = self._cfg.RL_MAX_STEP - step expert_plan_dart, _ = self._env.run_omg_planner( num_steps, scene_idx, reset_scene=False ) expert_plan = np.concatenate((expert_plan[:step], expert_plan_dart)) if self._stage == "pretrain": nearest = explore and not apply_dagger ee_to_goal_pose = self._env.get_ee_to_goal_pose(nearest=nearest) if self._stage == "pretrain": expert_flag = ( not explore or expert_initial and step < expert_initial_steps or apply_dagger ) perturb_flag = apply_dart if self._stage == "finetune": expert_flag = not explore perturb_flag = False if self._stage == "pretrain" and expert_flag: expert_action = self._env.convert_target_joint_position_to_action(expert_plan[step]) if ( not explore or self._stage == "pretrain" and expert_initial and step < expert_initial_steps ): # Expert. if self._stage == "pretrain": action = expert_action target_joint_position = expert_plan[step] if self._stage == "finetune": action = self._policy.select_action(state, expert_policy=True) target_joint_position = self._policy.convert_action_to_target_joint_position( action, obs ) else: # Online. action = self._policy.select_action(state) noise = get_noise_delta( action, self._cfg.RL_TRAIN.action_noise, self._cfg.RL_TRAIN.noise_type ) action = action + noise * noise_scale target_joint_position = self._policy.convert_action_to_target_joint_position( action, obs ) if self._stage == "finetune" and expert_flag: expert_action = action obs, reward, done, info = self._step_env_repeat( target_joint_position, break_if_done=True ) run_grasp_and_back = False if not done: if ( step + 1 == self._max_explore_steps or self._stage == "pretrain" and not explore and step == len(expert_plan) - 5 ): run_grasp_and_back = True elif self._use_grasp_predictor and ( self._stage == "pretrain" and explore or self._stage == "finetune" ): state_grasp, _ = self._policy.get_state(obs) grasp_pred = self._policy.select_action_grasp(state_grasp).item() if grasp_pred: run_grasp_and_back = True if run_grasp_and_back: back_done = False if self._stage == "pretrain" and not explore: obs, _, done, _ = self._step_env_repeat( target_joint_position, break_if_done=True ) if done: back_done = True while not back_done: target_joint_position, back_done = self._policy.grasp_and_back(obs) obs, reward, done, info = self._step_env_repeat( target_joint_position, break_if_done=True ) if done: back_done = True if not done: done = True failure_1 = ( info["status"] & EpisodeStatus.FAILURE_HUMAN_CONTACT == EpisodeStatus.FAILURE_HUMAN_CONTACT ) failure_2 = ( info["status"] & EpisodeStatus.FAILURE_OBJECT_DROP == EpisodeStatus.FAILURE_OBJECT_DROP ) failure_3 = ( info["status"] & EpisodeStatus.FAILURE_TIMEOUT == EpisodeStatus.FAILURE_TIMEOUT ) step_dict = { "timestep": step, "point_state": state[0][0], "expert_flags": expert_flag, "perturb_flags": perturb_flag, "action": action, "reward": reward, "returns": reward, "terminal": done, "target_name": "", "failure_case_1": failure_1, "failure_case_2": failure_2, "failure_case_3": failure_3, } if self._stage == "pretrain": step_dict["goal"] = ee_to_goal_pose if expert_flag: step_dict["expert_action"] = expert_action cur_episode.append(step_dict) step += 1 if not explore: if self._use_ray: self._expert_buffer.add_episode.remote(cur_episode, explore, test) else: self._expert_buffer.add_episode(cur_episode, explore, test) else: if self._use_ray: self._online_buffer.add_episode.remote(cur_episode, explore, test) else: self._online_buffer.add_episode(cur_episode, explore, test) def _step_env_repeat(self, target_joint_position, break_if_done=False): for _ in range(self._policy.steps_action_repeat): obs, reward, done, info = self._env.step(target_joint_position) if break_if_done and done: break return obs, reward, done, info @ray.remote(num_gpus=0.13) class ActorWrapperRemote(ActorWrapper): pass def main(): args = parse_args() args.log = True args.policy = "DDPG" args.save_model = True
cfg = get_cfg()
0
2023-10-26 23:25:13+00:00
8k
openai/bugbounty-gpt
bugbounty_gpt/__main__.py
[ { "identifier": "db_handler", "path": "bugbounty_gpt/db/db_handler.py", "snippet": "async def _find_submission_by_id(session, submission_id):\nasync def insert_submission(session, submission_data):\nasync def update_submission_state(session, submission_id, new_state):\nasync def fetch_submission_by_state_and_classification(session, states, classifications):\nasync def fetch_submission_by_id(session, submission_id):" }, { "identifier": "SubmissionState", "path": "bugbounty_gpt/db/models.py", "snippet": "class SubmissionState(Enum):\n NEW = 1\n UPDATED_OUT_OF_BAND = 2\n UPDATED = 3" }, { "identifier": "OpenAIHandler", "path": "bugbounty_gpt/handlers/openai_handler.py", "snippet": "class OpenAIHandler:\n @staticmethod\n def _classifications_sanitization(input_string):\n \"\"\"\n Sanitizes the input string by removing spaces, converting to upper case, and replacing spaces with underscores.\n\n :param input_string: The input string to sanitize.\n :return: The sanitized string.\n \"\"\"\n return input_string.strip().replace(' ', '_').upper()\n\n @staticmethod\n def _build_request_data(submission_content):\n \"\"\"\n Builds the request data for the OpenAI API.\n\n :param submission_content: The content of the submission to be classified.\n :return: Dictionary containing the request data.\n \"\"\"\n return {\n \"model\": OPENAI_MODEL,\n \"temperature\": 0,\n \"max_tokens\": 512,\n \"messages\": [\n {\"role\": \"system\", \"content\": OPENAI_PROMPT},\n {\"role\": \"user\", \"content\": submission_content}\n ]\n }\n\n @staticmethod\n def _handle_response_error(error):\n \"\"\"\n Handles errors that occurred during the OpenAI request.\n\n :param error: The error that occurred.\n :return: A tuple containing the default category and an error message.\n \"\"\"\n logger.error(f\"An error occurred during the OpenAI request: {error}\")\n return DEFAULT_CATEGORY, \"An error occurred during classification. Please check application logs.\"\n\n @staticmethod\n def _handle_response(response):\n \"\"\"\n Handles the response from the OpenAI API.\n\n :param response: The response object from the OpenAI API.\n :return: A tuple containing the judgment category and explanation, or an error response if something goes wrong.\n \"\"\"\n try:\n response_text = response.choices[0].message.content\n judgement, explanation = response_text.rsplit('\\n', 1)\n sanitized_judgement = OpenAIHandler._classifications_sanitization(judgement)\n if sanitized_judgement in VALID_CATEGORIES:\n return sanitized_judgement, explanation.strip()\n else:\n return DEFAULT_CATEGORY, explanation.strip()\n except Exception as error:\n return OpenAIHandler._handle_response_error(error)\n\n @staticmethod\n async def classify_submission(submission_content):\n \"\"\"\n Classifies the submission content using the OpenAI API.\n\n :param submission_content: The content of the submission to be classified.\n :return: A tuple containing the judgment category and explanation, or an error response if something goes wrong.\n \"\"\"\n logger.info(\"Classifying submission's content.\")\n time.sleep(5) # Consider replacing with a more robust rate-limiting strategy\n try:\n request_data = OpenAIHandler._build_request_data(submission_content)\n loop = asyncio.get_running_loop()\n response = await loop.run_in_executor(None, lambda: openai.ChatCompletion.create(**request_data))\n return OpenAIHandler._handle_response(response)\n except Exception as error:\n return OpenAIHandler._handle_response_error(error)" }, { "identifier": "BugCrowdSubmission", "path": "bugbounty_gpt/handlers/submission_handler.py", "snippet": "class BugCrowdSubmission:\n def __init__(self, submission_id, classification, reasoning):\n \"\"\"\n Initializes a BugCrowdSubmission object.\n\n :param submission_id: ID of the submission.\n :param classification: Classification information for the submission.\n :param reasoning: Reasoning information for the submission.\n \"\"\"\n self.submission_id = submission_id\n self.classification = classification\n self.reasoning = reasoning\n\n def _prepare_assign_data(self, user_id):\n \"\"\"\n Prepares data to assign a user to the submission.\n\n :param user_id: ID of the user to be assigned.\n :return: Dictionary containing the required data.\n \"\"\"\n return {\n 'data': {\n 'type': 'submission',\n 'relationships': {\n 'assignee': {\n 'data': {\n 'id': user_id,\n 'type': 'identity'\n }\n }\n }\n }\n }\n\n def _handle_assign_response(self, response, user_id):\n \"\"\"\n Handles the response after assigning a user to the submission.\n\n :param response: Response object from the assignment operation.\n :param user_id: ID of the user assigned.\n \"\"\"\n if response.status_code == 200:\n logger.info(f\"Submission {self.submission_id} assigned to user {user_id}.\")\n else:\n logger.error(f\"Unable to assign submission {self.submission_id} to user {user_id}. Status code: {response.status_code}\")\n\n async def assign_to_user(self, user_id):\n \"\"\"\n Assigns a user to the submission.\n\n :param user_id: ID of the user to be assigned.\n \"\"\"\n data = self._prepare_assign_data(user_id)\n response = await BugCrowdAPI.patch_submission(self.submission_id, data)\n self._handle_assign_response(response, user_id)\n\n async def is_submission_new(self):\n \"\"\"\n Checks if the submission is new.\n\n :return: True if the submission is new, False otherwise.\n \"\"\"\n submission_data = await BugCrowdAPI.fetch_submission(self.submission_id)\n submission_state = submission_data['data']['attributes']['state']\n return submission_state.lower() == 'new'\n\n async def close_submission(self):\n \"\"\"\n Closes the submission on BugCrowd.\n \"\"\"\n logger.info(f\"Closing submission {self.submission_id} on BugCrowd.\")\n data = {\n 'data': {\n 'type': 'submission',\n 'attributes': {\n 'state': 'not_applicable'\n }\n }\n }\n response = await BugCrowdAPI.patch_submission(self.submission_id, data)\n if response.status_code != 200:\n raise Exception(f\"Failed to close submission {self.submission_id}. Status code: {response.status_code}, Content: {response.content}\")\n\n def _prepare_comment_data(self, comment_body, visibility_scope='everyone'):\n \"\"\"\n Prepares data to create a comment.\n\n :param comment_body: Text of the comment.\n :param visibility_scope: Visibility scope of the comment. Default is 'everyone'.\n :return: Dictionary containing the required data.\n \"\"\"\n return {\n \"data\": {\n \"type\": \"comment\",\n \"attributes\": {\n \"body\": comment_body,\n \"visibility_scope\": visibility_scope\n },\n \"relationships\": {\n \"submission\": {\n \"data\": {\n \"id\": self.submission_id,\n \"type\": \"submission\"\n }\n }\n }\n }\n }\n\n def _handle_comment_response_error(self, response):\n \"\"\"\n Handles the error response for a comment creation request.\n\n :param response: Response object from the comment creation operation.\n \"\"\"\n try:\n error_message = response.json()[\"errors\"][0][\"detail\"]\n except (json.JSONDecodeError, KeyError, IndexError):\n error_message = \"An error occurred, but the response is not a valid JSON object.\"\n logger.error(\"Error: \" + error_message)\n\n async def create_comment(self, comment_body, visibility_scope='everyone'):\n \"\"\"\n Creates a comment for the submission on BugCrowd.\n\n :param comment_body: Text of the comment.\n :param visibility_scope: Visibility scope of the comment. Default is 'everyone'.\n \"\"\"\n logger.info(f\"Creating comment for submission {self.submission_id} on BugCrowd.\")\n comment_data = self._prepare_comment_data(comment_body, visibility_scope)\n response = await BugCrowdAPI.create_comment(comment_data)\n if response.status_code in [400, 404, 409]:\n self._handle_comment_response_error(response)\n elif response.status_code != 201:\n logger.error(\"An unexpected error occurred.\")\n\n def generate_comment_text(self):\n \"\"\"\n Generates the text for a comment based on the classification.\n\n :return: Generated comment text or None if the classification is not found.\n \"\"\"\n try:\n specific_classification_name = self.classification.name\n specific_classification_text = RESPONSES[specific_classification_name]\n comment_text = f\"Hello!\\n\\n{specific_classification_text}\"\n return comment_text\n except KeyError:\n logger.error(f\"Response for classification {self.classification.name} not found.\")\n return None" }, { "identifier": "BugCrowdAPI", "path": "bugbounty_gpt/handlers/bugcrowd_api.py", "snippet": "class BugCrowdAPI:\n @staticmethod\n def _get_headers(content_type='application/vnd.bugcrowd+json'):\n \"\"\"\n Returns common headers for Bugcrowd API requests.\n\n :param content_type: Content type for the Accept header. Default is 'application/vnd.bugcrowd+json'.\n :return: Dictionary containing the required headers.\n \"\"\"\n return {\n 'Accept': content_type,\n 'Authorization': f'Token {BUGCROWD_API_KEY}'\n }\n\n @staticmethod\n async def _fetch_page(url, params, page_limit, page_offset):\n \"\"\"\n Fetches a page of data from the specified URL with pagination.\n\n :param url: URL to fetch data from.\n :param params: Parameters to include in the request.\n :param page_limit: Limit of items per page.\n :param page_offset: Offset for pagination.\n :return: List of data fetched from the page or an empty list if there is an error.\n \"\"\"\n pagination_params = {\n 'page[limit]': page_limit,\n 'page[offset]': page_offset,\n }\n complete_params = {**params, **pagination_params}\n\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=BugCrowdAPI._get_headers(), params=complete_params)\n try:\n data = response.json()\n except json.JSONDecodeError as e:\n logger.error(f\"Error: Unable to decode JSON. {e}\")\n return []\n\n return data['data'] if data['data'] else []\n\n @staticmethod\n async def fetch_submissions(params):\n \"\"\"\n Fetches all submissions from BugCrowd.\n\n :param params: Parameters to include in the request.\n :return: List of all submissions or None if no submissions found.\n \"\"\"\n logger.info(\"Fetching submissions from BugCrowd.\")\n url = f'{API_BASE_URL}/submissions'\n page_limit = 100\n page_offset = 0\n all_submissions = []\n delay = 2 # Delay in seconds\n\n while True:\n submissions = await BugCrowdAPI._fetch_page(url, params, page_limit, page_offset)\n if not submissions:\n break\n\n all_submissions.extend(submissions)\n page_offset += page_limit\n\n time.sleep(delay) # Add a delay between API calls\n\n return all_submissions if all_submissions else None\n\n @staticmethod\n async def fetch_submission(submission_id):\n \"\"\"\n Fetches a specific submission from BugCrowd.\n\n :param submission_id: ID of the submission to fetch.\n :return: Submission data as a dictionary or None if an error occurred.\n \"\"\"\n logger.info(f\"Fetching submission {submission_id} from BugCrowd.\")\n url = f'{API_BASE_URL}/submissions/{submission_id}'\n\n async with httpx.AsyncClient() as client:\n response = await client.get(url, headers=BugCrowdAPI._get_headers())\n if response.status_code == 200:\n return response.json()\n else:\n logger.error(f\"Failed to fetch submission {submission_id}. Status code: {response.status_code}\")\n return None\n\n @staticmethod\n async def create_comment(comment_data):\n \"\"\"\n Creates a comment using the provided data.\n\n :param comment_data: Data for the comment.\n :return: Response object from the comment creation operation.\n \"\"\"\n url = f'{API_BASE_URL}/comments'\n headers = BugCrowdAPI._get_headers('application/json')\n\n async with httpx.AsyncClient() as client:\n response = await client.post(url, headers=headers, json=comment_data)\n if response.status_code == 201:\n logger.info(\"Comment created successfully.\")\n else:\n logger.error(f\"Failed to create comment. Status code: {response.status_code}\")\n return response\n\n @staticmethod\n async def patch_submission(submission_id, data):\n \"\"\"\n Patches a specific submission on BugCrowd.\n\n :param submission_id: ID of the submission to patch.\n :param data: Data to be patched.\n :return: Response object from the patch operation or None if an error occurred.\n \"\"\"\n logger.info(f\"Patching submission {submission_id} on BugCrowd.\")\n url = f'{API_BASE_URL}/submissions/{submission_id}'\n headers = BugCrowdAPI._get_headers()\n headers['Content-Type'] = 'application/vnd.bugcrowd.v4+json'\n\n async with httpx.AsyncClient() as client:\n response = await client.patch(url, headers=headers, data=json.dumps(data))\n\n if response.status_code != 200:\n logger.error(f\"Failed to patch submission {submission_id}. Status code: {response.status_code}\")\n return None\n\n return response" }, { "identifier": "USER_ID", "path": "bugbounty_gpt/env.py", "snippet": "USER_ID = CONFIG['user']['user_id']" }, { "identifier": "FILTER_PROGRAM", "path": "bugbounty_gpt/env.py", "snippet": "FILTER_PROGRAM = CONFIG['user']['filter_program']" }, { "identifier": "RESPONSE_CATEGORIES", "path": "bugbounty_gpt/env.py", "snippet": "RESPONSE_CATEGORIES = sanitize_categories([item['name'] for item in CONFIG['categories']['response']])" }, { "identifier": "SQLALCHEMY_URL", "path": "bugbounty_gpt/env.py", "snippet": "SQLALCHEMY_URL = os.getenv(\"SQLALCHEMY_URL\")" } ]
import logging import asyncio from bugbounty_gpt.db import db_handler from bugbounty_gpt.db.models import SubmissionState from bugbounty_gpt.handlers.openai_handler import OpenAIHandler from bugbounty_gpt.handlers.submission_handler import BugCrowdSubmission from bugbounty_gpt.handlers.bugcrowd_api import BugCrowdAPI from bugbounty_gpt.env import USER_ID, FILTER_PROGRAM, RESPONSE_CATEGORIES, SQLALCHEMY_URL from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine from sqlalchemy.orm import sessionmaker
3,627
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) logger.info("Configuration is valid.") logger.info("Initializing database connection.") engine = create_async_engine(SQLALCHEMY_URL, echo=False) SessionLocal = sessionmaker( bind=engine, class_=AsyncSession, expire_on_commit=False, ) SEEN_SUBMISSIONS = [] async def process_new_submissions(): """ Fetch and process new submissions that are not duplicates and store them in the database. """ params = {
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) logger.info("Configuration is valid.") logger.info("Initializing database connection.") engine = create_async_engine(SQLALCHEMY_URL, echo=False) SessionLocal = sessionmaker( bind=engine, class_=AsyncSession, expire_on_commit=False, ) SEEN_SUBMISSIONS = [] async def process_new_submissions(): """ Fetch and process new submissions that are not duplicates and store them in the database. """ params = {
'filter[program]': FILTER_PROGRAM,
6
2023-10-27 22:41:24+00:00
8k
LeapLabTHU/FamO2O
jax_cql/JaxCQL/sac_main.py
[ { "identifier": "SAC", "path": "jax_cql/JaxCQL/sac.py", "snippet": "class SAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 1.0\n config.use_automatic_entropy_tuning = True\n config.backup_entropy = False\n config.target_entropy = 0.0\n config.policy_lr = 3e-4\n config.qf_lr = 3e-4\n config.optimizer_type = 'adam'\n config.soft_target_update_rate = 5e-3\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, policy, qf):\n self.config = self.get_default_config(config)\n self.policy = policy\n self.qf = qf\n self.observation_dim = policy.observation_dim\n self.action_dim = policy.action_dim\n\n self._train_states = {}\n\n optimizer_class = {\n 'adam': optax.adam,\n 'sgd': optax.sgd,\n }[self.config.optimizer_type]\n\n policy_params = self.policy.init(\n next_rng(self.policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['policy'] = TrainState.create(\n params=policy_params,\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n\n qf1_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf1'] = TrainState.create(\n params=qf1_params,\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None,\n )\n qf2_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf2'] = TrainState.create(\n params=qf2_params,\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None,\n )\n self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})\n\n model_keys = ['policy', 'qf1', 'qf2']\n\n if self.config.use_automatic_entropy_tuning:\n self.log_alpha = Scalar(0.0)\n self._train_states['log_alpha'] = TrainState.create(\n params=self.log_alpha.init(next_rng()),\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha')\n\n self._model_keys = tuple(model_keys)\n self._total_steps = 0\n\n def train(self, batch):\n self._total_steps += 1\n self._train_states, self._target_qf_params, metrics = self._train_step(\n self._train_states, self._target_qf_params, next_rng(), batch\n )\n return metrics\n\n @partial(jax.jit, static_argnames='self')\n def _train_step(self, train_states, target_qf_params, rng, batch):\n rng_generator = JaxRNG(rng)\n\n def loss_fn(train_params, rng):\n observations = batch['observations']\n actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n\n loss_collection = {}\n\n @wrap_function_with_rng(rng_generator())\n def forward_policy(rng, *args, **kwargs):\n return self.policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_qf(rng, *args, **kwargs):\n return self.qf.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.qf.rng_keys())\n )\n\n new_actions, log_pi = forward_policy(train_params['policy'], observations)\n\n if self.config.use_automatic_entropy_tuning:\n alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()\n loss_collection['log_alpha'] = alpha_loss\n alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier\n else:\n alpha_loss = 0.0\n alpha = self.config.alpha_multiplier\n\n \"\"\" Policy loss \"\"\"\n q_new_actions = jnp.minimum(\n forward_qf(train_params['qf1'], observations, new_actions),\n forward_qf(train_params['qf2'], observations, new_actions),\n )\n policy_loss = (alpha*log_pi - q_new_actions).mean()\n\n loss_collection['policy'] = policy_loss\n\n \"\"\" Q function loss \"\"\"\n q1_pred = forward_qf(train_params['qf1'], observations, actions)\n q2_pred = forward_qf(train_params['qf2'], observations, actions)\n\n new_next_actions, next_log_pi = forward_policy(train_params['policy'], next_observations)\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n\n if self.config.backup_entropy:\n target_q_values = target_q_values - alpha * next_log_pi\n\n q_target = jax.lax.stop_gradient(\n rewards + (1. - dones) * self.config.discount * target_q_values\n )\n qf1_loss = mse_loss(q1_pred, q_target)\n qf2_loss = mse_loss(q2_pred, q_target)\n\n loss_collection['qf1'] = qf1_loss\n loss_collection['qf2'] = qf2_loss\n\n return tuple(loss_collection[key] for key in self.model_keys), locals()\n\n train_params = {key: train_states[key].params for key in self.model_keys}\n (_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)\n\n new_train_states = {\n key: train_states[key].apply_gradients(grads=grads[i][key])\n for i, key in enumerate(self.model_keys)\n }\n new_target_qf_params = {}\n new_target_qf_params['qf1'] = update_target_network(\n new_train_states['qf1'].params, target_qf_params['qf1'],\n self.config.soft_target_update_rate\n )\n new_target_qf_params['qf2'] = update_target_network(\n new_train_states['qf2'].params, target_qf_params['qf2'],\n self.config.soft_target_update_rate\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['log_pi', 'policy_loss', 'qf1_loss', 'qf2_loss', 'alpha_loss',\n 'alpha', 'q1_pred', 'q2_pred', 'target_q_values']\n )\n return new_train_states, new_target_qf_params, metrics\n\n @property\n def model_keys(self):\n return self._model_keys\n\n @property\n def train_states(self):\n return self._train_states\n\n @property\n def train_params(self):\n return {key: self.train_states[key].params for key in self.model_keys}\n\n @property\n def total_steps(self):\n return self._total_steps" }, { "identifier": "ReplayBuffer", "path": "jax_cql/JaxCQL/replay_buffer.py", "snippet": "class ReplayBuffer(object):\n def __init__(self, max_size, reward_scale, reward_bias, data=None):\n self._max_size = max_size\n self._next_idx = 0\n self._size = 0\n self._initialized = False\n self._total_steps = 0\n\n self.reward_scale = reward_scale\n self.reward_bias = reward_bias\n\n if data is not None:\n if self._max_size < data['observations'].shape[0]:\n self._max_size = data['observations'].shape[0]\n self.add_batch(data)\n\n def __len__(self):\n return self._size\n\n def clear(self):\n self._size = 0\n self._next_idx = 0\n self._total_steps = 0\n\n def _init_storage(self, observation_dim, action_dim):\n self._observation_dim = observation_dim\n self._action_dim = action_dim\n self._observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)\n self._next_observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)\n self._actions = np.zeros((self._max_size, action_dim), dtype=np.float32)\n self._rewards = np.zeros(self._max_size, dtype=np.float32)\n self._dones = np.zeros(self._max_size, dtype=np.float32)\n self._next_idx = 0\n self._size = 0\n self._initialized = True\n\n def add_sample(self, observation, action, reward, next_observation, done):\n if not self._initialized:\n self._init_storage(observation.size, action.size)\n\n # newly add\n reward = self.reward_scale * reward + self.reward_bias\n\n self._observations[self._next_idx, :] = np.array(observation, dtype=np.float32)\n self._next_observations[self._next_idx, :] = np.array(next_observation, dtype=np.float32)\n self._actions[self._next_idx, :] = np.array(action, dtype=np.float32)\n self._rewards[self._next_idx] = reward\n self._dones[self._next_idx] = float(done)\n\n if self._size < self._max_size:\n self._size += 1\n self._next_idx = (self._next_idx + 1) % self._max_size\n self._total_steps += 1\n\n def add_traj(self, observations, actions, rewards, next_observations, dones):\n for o, a, r, no, d in zip(observations, actions, rewards, next_observations, dones):\n self.add_sample(o, a, r, no, d)\n\n def add_batch(self, batch):\n self.add_traj(\n batch['observations'], batch['actions'], batch['rewards'],\n batch['next_observations'], batch['dones']\n )\n\n def sample(self, batch_size):\n indices = np.random.randint(len(self), size=batch_size)\n return self.select(indices)\n\n def select(self, indices):\n return dict(\n observations=self._observations[indices, ...],\n actions=self._actions[indices, ...],\n rewards=self._rewards[indices, ...],\n next_observations=self._next_observations[indices, ...],\n dones=self._dones[indices, ...],\n )\n\n def generator(self, batch_size, n_batchs=None):\n i = 0\n while n_batchs is None or i < n_batchs:\n yield self.sample(batch_size)\n i += 1\n\n @property\n def total_steps(self):\n return self._total_steps\n\n @property\n def data(self):\n return dict(\n observations=self._observations[:self._size, ...],\n actions=self._actions[:self._size, ...],\n rewards=self._rewards[:self._size, ...],\n next_observations=self._next_observations[:self._size, ...],\n dones=self._dones[:self._size, ...]\n )" }, { "identifier": "batch_to_jax", "path": "jax_cql/JaxCQL/jax_utils.py", "snippet": "@jax.jit\ndef batch_to_jax(batch):\n return jax.tree_util.tree_map(jax.device_put, batch)" }, { "identifier": "TanhGaussianPolicy", "path": "jax_cql/JaxCQL/model.py", "snippet": "class TanhGaussianPolicy(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n log_std_multiplier: float = 1.0\n log_std_offset: float = -1.0\n\n def setup(self):\n self.base_network = FullyConnectedNetwork(\n output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init\n )\n self.log_std_multiplier_module = Scalar(self.log_std_multiplier)\n self.log_std_offset_module = Scalar(self.log_std_offset)\n\n def log_prob(self, observations, actions):\n if actions.ndim == 3:\n observations = extend_and_repeat(observations, 1, actions.shape[1])\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.Transformed(\n distrax.MultivariateNormalDiag(mean, jnp.exp(log_std)),\n distrax.Block(distrax.Tanh(), ndims=1)\n )\n return action_distribution.log_prob(actions)\n\n def __call__(self, observations, deterministic=False, repeat=None):\n if repeat is not None:\n observations = extend_and_repeat(observations, 1, repeat)\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.Transformed(\n distrax.MultivariateNormalDiag(mean, jnp.exp(log_std)),\n distrax.Block(distrax.Tanh(), ndims=1)\n )\n if deterministic:\n samples = jnp.tanh(mean)\n log_prob = action_distribution.log_prob(samples)\n else:\n samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))\n\n return samples, log_prob\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')" }, { "identifier": "FullyConnectedQFunction", "path": "jax_cql/JaxCQL/model.py", "snippet": "class FullyConnectedQFunction(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n @multiple_action_q_function\n def __call__(self, observations, actions):\n x = jnp.concatenate([observations, actions], axis=-1)\n x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)\n return jnp.squeeze(x, -1)\n\n @nn.nowrap\n def rng_keys(self):\n return ('params',)" }, { "identifier": "SamplerPolicy", "path": "jax_cql/JaxCQL/model.py", "snippet": "class SamplerPolicy(object):\n\n def __init__(self, policy, params):\n self.policy = policy\n self.params = params\n\n def update_params(self, params):\n self.params = params\n return self\n\n @partial(jax.jit, static_argnames=('self', 'deterministic'))\n def act(self, params, rng, observations, deterministic):\n return self.policy.apply(params, observations, deterministic, repeat=None,\n rngs=JaxRNG(rng)(self.policy.rng_keys()), )\n\n def __call__(self, observations, deterministic=False):\n actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)" }, { "identifier": "StepSampler", "path": "jax_cql/JaxCQL/sampler.py", "snippet": "class StepSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n for _ in range(n_steps):\n self._traj_steps += 1\n observation = self._current_observation\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n self._current_observation = next_observation\n\n if done or self._traj_steps >= self.max_traj_length:\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n return dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n )\n\n @property\n def env(self):\n return self._env" }, { "identifier": "TrajSampler", "path": "jax_cql/JaxCQL/sampler.py", "snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, deterministic=False, replay_buffer=None):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env" }, { "identifier": "Timer", "path": "jax_cql/JaxCQL/utils.py", "snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time" }, { "identifier": "define_flags_with_default", "path": "jax_cql/JaxCQL/utils.py", "snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs" }, { "identifier": "set_random_seed", "path": "jax_cql/JaxCQL/utils.py", "snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)" }, { "identifier": "print_flags", "path": "jax_cql/JaxCQL/utils.py", "snippet": "def print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]\n )\n )\n )" }, { "identifier": "get_user_flags", "path": "jax_cql/JaxCQL/utils.py", "snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output" }, { "identifier": "prefix_metrics", "path": "jax_cql/JaxCQL/utils.py", "snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }" }, { "identifier": "WandBLogger", "path": "jax_cql/JaxCQL/utils.py", "snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'FamilyJaxCQL'\n config.project = 'sac'\n config.output_dir = '/tmp/FamilyJaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir" } ]
import os import time import uuid import numpy as np import pprint import gym import jax import jax.numpy as jnp import flax import absl.app import absl.flags from copy import deepcopy from .sac import SAC from .replay_buffer import ReplayBuffer from .jax_utils import batch_to_jax from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy from .sampler import StepSampler, TrajSampler from .utils import ( Timer, define_flags_with_default, set_random_seed, print_flags, get_user_flags, prefix_metrics, WandBLogger ) from viskit.logging import logger, setup_logger
6,539
FLAGS_DEF = define_flags_with_default( env='HalfCheetah-v2', max_traj_length=1000, replay_buffer_size=1000000, seed=42, save_model=False, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=2000, n_env_steps_per_epoch=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, batch_size=256, sac=SAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) train_sampler = StepSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length) eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length) replay_buffer = ReplayBuffer(FLAGS.replay_buffer_size) observation_dim = eval_sampler.env.observation_space.shape[0] action_dim = eval_sampler.env.action_space.shape[0] policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.sac.target_entropy >= 0.0: FLAGS.sac.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = SAC(FLAGS.sac, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {} with Timer() as rollout_timer: train_sampler.sample( sampler_policy.update_params(sac.train_params['policy']), FLAGS.n_env_steps_per_epoch, deterministic=False, replay_buffer=replay_buffer ) metrics['env_steps'] = replay_buffer.total_steps metrics['epoch'] = epoch with Timer() as train_timer: for batch_idx in range(FLAGS.n_train_step_per_epoch):
FLAGS_DEF = define_flags_with_default( env='HalfCheetah-v2', max_traj_length=1000, replay_buffer_size=1000000, seed=42, save_model=False, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=2000, n_env_steps_per_epoch=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, batch_size=256, sac=SAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) train_sampler = StepSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length) eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length) replay_buffer = ReplayBuffer(FLAGS.replay_buffer_size) observation_dim = eval_sampler.env.observation_space.shape[0] action_dim = eval_sampler.env.action_space.shape[0] policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.sac.target_entropy >= 0.0: FLAGS.sac.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = SAC(FLAGS.sac, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {} with Timer() as rollout_timer: train_sampler.sample( sampler_policy.update_params(sac.train_params['policy']), FLAGS.n_env_steps_per_epoch, deterministic=False, replay_buffer=replay_buffer ) metrics['env_steps'] = replay_buffer.total_steps metrics['epoch'] = epoch with Timer() as train_timer: for batch_idx in range(FLAGS.n_train_step_per_epoch):
batch = batch_to_jax(replay_buffer.sample(FLAGS.batch_size))
2
2023-10-25 11:53:25+00:00
8k
DAMO-NLP-SG/CLEX
serve/cli.py
[ { "identifier": "ChatIO", "path": "serve/inference.py", "snippet": "class ChatIO(abc.ABC):\n @abc.abstractmethod\n def prompt_for_input(self, role: str) -> str:\n \"\"\"Prompt for input from a role.\"\"\"\n\n @abc.abstractmethod\n def prompt_for_output(self, role: str):\n \"\"\"Prompt for output from a role.\"\"\"\n\n @abc.abstractmethod\n def stream_output(self, output_stream):\n \"\"\"Stream output.\"\"\"\n\n @abc.abstractmethod\n def print_output(self, text: str):\n \"\"\"Print output.\"\"\"" }, { "identifier": "chat_loop", "path": "serve/inference.py", "snippet": "def chat_loop(\n model_path: str,\n device: str,\n num_gpus: int,\n max_gpu_memory: str,\n dtype: Optional[torch.dtype],\n load_8bit: bool,\n cpu_offloading: bool,\n conv_template: Optional[str],\n conv_system_msg: Optional[str],\n temperature: float,\n repetition_penalty: float,\n max_new_tokens: int,\n chatio: ChatIO,\n gptq_config: Optional[GptqConfig] = None,\n awq_config: Optional[AWQConfig] = None,\n exllama_config: Optional[ExllamaConfig] = None,\n revision: str = \"main\",\n judge_sent_end: bool = True,\n debug: bool = True,\n history: bool = True,\n log_scale: bool = True,\n):\n # Model\n config = AutoConfig.from_pretrained(\n model_path\n )\n config.log_scale = log_scale\n model, tokenizer = load_model(\n model_path,\n device=device,\n num_gpus=num_gpus,\n max_gpu_memory=max_gpu_memory,\n dtype=dtype,\n load_8bit=load_8bit,\n cpu_offloading=cpu_offloading,\n gptq_config=gptq_config,\n awq_config=awq_config,\n exllama_config=exllama_config,\n revision=revision,\n debug=debug,\n config=config\n )\n generate_stream_func = get_generate_stream_function(model, model_path)\n\n model_type = str(type(model)).lower()\n is_t5 = \"t5\" in model_type\n is_codet5p = \"codet5p\" in model_type\n\n # Hardcode T5's default repetition penalty to be 1.2\n if is_t5 and repetition_penalty == 1.0:\n repetition_penalty = 1.2\n\n # Set context length\n context_len = get_context_length(model.config)\n\n # Chat\n def new_chat():\n if conv_template:\n conv = get_conv_template(conv_template)\n else:\n conv = get_conversation_template(model_path)\n if conv_system_msg is not None:\n conv.set_system_message(conv_system_msg)\n return conv\n\n def reload_conv(conv):\n \"\"\"\n Reprints the conversation from the start.\n \"\"\"\n for message in conv.messages[conv.offset :]:\n chatio.prompt_for_output(message[0])\n chatio.print_output(message[1])\n\n conv = None\n\n while True:\n if not history or not conv:\n conv = new_chat()\n\n try:\n inp = chatio.prompt_for_input(conv.roles[0])\n except EOFError:\n inp = \"\"\n\n if inp == \"!!exit\" or not inp:\n print(\"exit...\")\n break\n elif inp == \"!!reset\":\n print(\"resetting...\")\n conv = new_chat()\n continue\n elif inp == \"!!remove\":\n print(\"removing last message...\")\n if len(conv.messages) > conv.offset:\n # Assistant\n if conv.messages[-1][0] == conv.roles[1]:\n conv.messages.pop()\n # User\n if conv.messages[-1][0] == conv.roles[0]:\n conv.messages.pop()\n reload_conv(conv)\n else:\n print(\"No messages to remove.\")\n continue\n elif inp == \"!!regen\":\n print(\"regenerating last message...\")\n if len(conv.messages) > conv.offset:\n # Assistant\n if conv.messages[-1][0] == conv.roles[1]:\n conv.messages.pop()\n # User\n if conv.messages[-1][0] == conv.roles[0]:\n reload_conv(conv)\n # Set inp to previous message\n inp = conv.messages.pop()[1]\n else:\n # Shouldn't happen in normal circumstances\n print(\"No user message to regenerate from.\")\n continue\n else:\n print(\"No messages to regenerate.\")\n continue\n elif inp.startswith(\"!!save\"):\n args = inp.split(\" \", 1)\n\n if len(args) != 2:\n print(\"usage: !!save <filename>\")\n continue\n else:\n filename = args[1]\n\n # Add .json if extension not present\n if not \".\" in filename:\n filename += \".json\"\n\n print(\"saving...\", filename)\n with open(filename, \"w\") as outfile:\n json.dump(conv.dict(), outfile)\n continue\n elif inp.startswith(\"!!load\"):\n args = inp.split(\" \", 1)\n\n if len(args) != 2:\n print(\"usage: !!load <filename>\")\n continue\n else:\n filename = args[1]\n\n # Check if file exists and add .json if needed\n if not os.path.exists(filename):\n if (not filename.endswith(\".json\")) and os.path.exists(\n filename + \".json\"\n ):\n filename += \".json\"\n else:\n print(\"file not found:\", filename)\n continue\n\n print(\"loading...\", filename)\n with open(filename, \"r\") as infile:\n new_conv = json.load(infile)\n\n conv = get_conv_template(new_conv[\"template_name\"])\n conv.set_system_message(new_conv[\"system_message\"])\n conv.messages = new_conv[\"messages\"]\n reload_conv(conv)\n continue\n\n conv.append_message(conv.roles[0], inp)\n conv.append_message(conv.roles[1], None)\n prompt = conv.get_prompt()\n\n if is_codet5p: # codet5p is a code completion model.\n prompt = inp\n\n gen_params = {\n \"model\": model_path,\n \"prompt\": prompt,\n \"temperature\": temperature,\n \"repetition_penalty\": repetition_penalty,\n \"max_new_tokens\": max_new_tokens,\n \"stop\": conv.stop_str,\n \"stop_token_ids\": conv.stop_token_ids,\n \"echo\": False,\n }\n\n try:\n chatio.prompt_for_output(conv.roles[1])\n output_stream = generate_stream_func(\n model,\n tokenizer,\n gen_params,\n device,\n context_len=context_len,\n judge_sent_end=judge_sent_end,\n )\n t = time.time()\n outputs = chatio.stream_output(output_stream)\n duration = time.time() - t\n conv.update_last_message(outputs.strip())\n\n if debug:\n num_tokens = len(tokenizer.encode(outputs))\n msg = {\n \"conv_template\": conv.name,\n \"prompt\": prompt,\n \"outputs\": outputs,\n \"speed (token/s)\": round(num_tokens / duration, 2),\n }\n print(f\"\\n{msg}\\n\")\n\n except KeyboardInterrupt:\n print(\"stopped generation.\")\n # If generation didn't finish\n if conv.messages[-1][1] is None:\n conv.messages.pop()\n # Remove last user message, so there isn't a double up\n if conv.messages[-1][0] == conv.roles[0]:\n conv.messages.pop()\n\n reload_conv(conv)" } ]
import argparse import os import re import sys import torch from prompt_toolkit import PromptSession from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.completion import WordCompleter from prompt_toolkit.history import InMemoryHistory from prompt_toolkit.key_binding import KeyBindings from rich.console import Console from rich.live import Live from rich.markdown import Markdown from fastchat.model.model_adapter import add_model_args from fastchat.modules.awq import AWQConfig from fastchat.modules.exllama import ExllamaConfig from fastchat.modules.gptq import GptqConfig from serve.inference import ChatIO, chat_loop from fastchat.utils import str_to_torch_dtype
3,622
# TODO(suquark): multiline input has some issues. fix it later. prompt_input = self._prompt_session.prompt( completer=self._completer, multiline=False, mouse_support=self._mouse, auto_suggest=AutoSuggestFromHistory(), key_bindings=self.bindings if self._multiline else None, ) self._console.print() return prompt_input def prompt_for_output(self, role: str): self._console.print(f"[bold]{role.replace('/', '|')}:") def stream_output(self, output_stream): """Stream output from a role.""" # TODO(suquark): the console flickers when there is a code block # above it. We need to cut off "live" when a code block is done. # Create a Live context for updating the console output with Live(console=self._console, refresh_per_second=4) as live: # Read lines from the stream for outputs in output_stream: if not outputs: continue text = outputs["text"] # Render the accumulated text as Markdown # NOTE: this is a workaround for the rendering "unstandard markdown" # in rich. The chatbots output treat "\n" as a new line for # better compatibility with real-world text. However, rendering # in markdown would break the format. It is because standard markdown # treat a single "\n" in normal text as a space. # Our workaround is adding two spaces at the end of each line. # This is not a perfect solution, as it would # introduce trailing spaces (only) in code block, but it works well # especially for console output, because in general the console does not # care about trailing spaces. lines = [] for line in text.splitlines(): lines.append(line) if line.startswith("```"): # Code block marker - do not add trailing spaces, as it would # break the syntax highlighting lines.append("\n") else: lines.append(" \n") markdown = Markdown("".join(lines)) # Update the Live console output live.update(markdown) self._console.print() return text def print_output(self, text: str): self.stream_output([{"text": text}]) class ProgrammaticChatIO(ChatIO): def prompt_for_input(self, role) -> str: contents = "" # `end_sequence` signals the end of a message. It is unlikely to occur in # message content. end_sequence = " __END_OF_A_MESSAGE_47582648__\n" len_end = len(end_sequence) while True: if len(contents) >= len_end: last_chars = contents[-len_end:] if last_chars == end_sequence: break try: char = sys.stdin.read(1) contents = contents + char except EOFError: continue contents = contents[:-len_end] print(f"[!OP:{role}]: {contents}", flush=True) return contents def prompt_for_output(self, role: str): print(f"[!OP:{role}]: ", end="", flush=True) def stream_output(self, output_stream): pre = 0 for outputs in output_stream: output_text = outputs["text"] output_text = output_text.strip().split(" ") now = len(output_text) - 1 if now > pre: print(" ".join(output_text[pre:now]), end=" ", flush=True) pre = now print(" ".join(output_text[pre:]), flush=True) return " ".join(output_text) def print_output(self, text: str): print(text) def main(args): if args.gpus: if len(args.gpus.split(",")) < args.num_gpus: raise ValueError( f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!" ) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus os.environ["XPU_VISIBLE_DEVICES"] = args.gpus if args.enable_exllama: exllama_config = ExllamaConfig( max_seq_len=args.exllama_max_seq_len, gpu_split=args.exllama_gpu_split, ) else: exllama_config = None if args.style == "simple": chatio = SimpleChatIO(args.multiline) elif args.style == "rich": chatio = RichChatIO(args.multiline, args.mouse) elif args.style == "programmatic": chatio = ProgrammaticChatIO() else: raise ValueError(f"Invalid style for console: {args.style}") try:
""" Chat with a model with command line interface. Usage: python3 -m fastchat.serve.cli --model lmsys/vicuna-7b-v1.3 python3 -m fastchat.serve.cli --model lmsys/fastchat-t5-3b-v1.0 Other commands: - Type "!!exit" or an empty line to exit. - Type "!!reset" to start a new conversation. - Type "!!remove" to remove the last prompt. - Type "!!regen" to regenerate the last message. - Type "!!save <filename>" to save the conversation history to a json file. - Type "!!load <filename>" to load a conversation history from a json file. """ class SimpleChatIO(ChatIO): def __init__(self, multiline: bool = False): self._multiline = multiline def prompt_for_input(self, role) -> str: if not self._multiline: return input(f"{role}: ") prompt_data = [] line = input(f"{role} [ctrl-d/z on empty line to end]: ") while True: prompt_data.append(line.strip()) try: line = input() except EOFError as e: break return "\n".join(prompt_data) def prompt_for_output(self, role: str): print(f"{role}: ", end="", flush=True) def stream_output(self, output_stream): pre = 0 for outputs in output_stream: output_text = outputs["text"] output_text = output_text.strip().split(" ") now = len(output_text) - 1 if now > pre: print(" ".join(output_text[pre:now]), end=" ", flush=True) pre = now print(" ".join(output_text[pre:]), flush=True) return " ".join(output_text) def print_output(self, text: str): print(text) class RichChatIO(ChatIO): bindings = KeyBindings() @bindings.add("escape", "enter") def _(event): event.app.current_buffer.newline() def __init__(self, multiline: bool = False, mouse: bool = False): self._prompt_session = PromptSession(history=InMemoryHistory()) self._completer = WordCompleter( words=["!!exit", "!!reset", "!!remove", "!!regen", "!!save", "!!load"], pattern=re.compile("$"), ) self._console = Console() self._multiline = multiline self._mouse = mouse def prompt_for_input(self, role) -> str: self._console.print(f"[bold]{role}:") # TODO(suquark): multiline input has some issues. fix it later. prompt_input = self._prompt_session.prompt( completer=self._completer, multiline=False, mouse_support=self._mouse, auto_suggest=AutoSuggestFromHistory(), key_bindings=self.bindings if self._multiline else None, ) self._console.print() return prompt_input def prompt_for_output(self, role: str): self._console.print(f"[bold]{role.replace('/', '|')}:") def stream_output(self, output_stream): """Stream output from a role.""" # TODO(suquark): the console flickers when there is a code block # above it. We need to cut off "live" when a code block is done. # Create a Live context for updating the console output with Live(console=self._console, refresh_per_second=4) as live: # Read lines from the stream for outputs in output_stream: if not outputs: continue text = outputs["text"] # Render the accumulated text as Markdown # NOTE: this is a workaround for the rendering "unstandard markdown" # in rich. The chatbots output treat "\n" as a new line for # better compatibility with real-world text. However, rendering # in markdown would break the format. It is because standard markdown # treat a single "\n" in normal text as a space. # Our workaround is adding two spaces at the end of each line. # This is not a perfect solution, as it would # introduce trailing spaces (only) in code block, but it works well # especially for console output, because in general the console does not # care about trailing spaces. lines = [] for line in text.splitlines(): lines.append(line) if line.startswith("```"): # Code block marker - do not add trailing spaces, as it would # break the syntax highlighting lines.append("\n") else: lines.append(" \n") markdown = Markdown("".join(lines)) # Update the Live console output live.update(markdown) self._console.print() return text def print_output(self, text: str): self.stream_output([{"text": text}]) class ProgrammaticChatIO(ChatIO): def prompt_for_input(self, role) -> str: contents = "" # `end_sequence` signals the end of a message. It is unlikely to occur in # message content. end_sequence = " __END_OF_A_MESSAGE_47582648__\n" len_end = len(end_sequence) while True: if len(contents) >= len_end: last_chars = contents[-len_end:] if last_chars == end_sequence: break try: char = sys.stdin.read(1) contents = contents + char except EOFError: continue contents = contents[:-len_end] print(f"[!OP:{role}]: {contents}", flush=True) return contents def prompt_for_output(self, role: str): print(f"[!OP:{role}]: ", end="", flush=True) def stream_output(self, output_stream): pre = 0 for outputs in output_stream: output_text = outputs["text"] output_text = output_text.strip().split(" ") now = len(output_text) - 1 if now > pre: print(" ".join(output_text[pre:now]), end=" ", flush=True) pre = now print(" ".join(output_text[pre:]), flush=True) return " ".join(output_text) def print_output(self, text: str): print(text) def main(args): if args.gpus: if len(args.gpus.split(",")) < args.num_gpus: raise ValueError( f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!" ) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus os.environ["XPU_VISIBLE_DEVICES"] = args.gpus if args.enable_exllama: exllama_config = ExllamaConfig( max_seq_len=args.exllama_max_seq_len, gpu_split=args.exllama_gpu_split, ) else: exllama_config = None if args.style == "simple": chatio = SimpleChatIO(args.multiline) elif args.style == "rich": chatio = RichChatIO(args.multiline, args.mouse) elif args.style == "programmatic": chatio = ProgrammaticChatIO() else: raise ValueError(f"Invalid style for console: {args.style}") try:
chat_loop(
1
2023-10-25 05:30:25+00:00
8k
RenShuhuai-Andy/TESTA
models/blip.py
[ { "identifier": "VisionTransformer", "path": "models/vit.py", "snippet": "class VisionTransformer(nn.Module):\n \"\"\" Vision Transformer\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -\n https://arxiv.org/abs/2010.11929\n \"\"\"\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, \n use_grad_checkpointing=False, ckpt_layer=0):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n norm_layer: (nn.Module): normalization layer\n \"\"\"\n super().__init__()\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,\n use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer)\n )\n for i in range(depth)])\n self.norm = norm_layer(embed_dim)\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def forward(self, x, register_blk=-1):\n B = x.shape[0]\n x = self.patch_embed(x)\n\n cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_tokens, x), dim=1)\n \n x = x + self.pos_embed[:,:x.size(1),:]\n x = self.pos_drop(x)\n\n for i,blk in enumerate(self.blocks):\n x = blk(x, register_blk==i)\n x = self.norm(x)\n \n return x\n\n @torch.jit.ignore()\n def load_pretrained(self, checkpoint_path, prefix=''):\n _load_weights(self, checkpoint_path, prefix)" }, { "identifier": "interpolate_pos_embed", "path": "models/vit.py", "snippet": "def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): \n # interpolate position embedding\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = visual_encoder.patch_embed.num_patches\n num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n\n if orig_size!=new_size:\n # class_token and dist_token are kept unchanged\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))\n \n return new_pos_embed \n else:\n return pos_embed_checkpoint" }, { "identifier": "BertConfig", "path": "models/med.py", "snippet": "class BertEmbeddings(nn.Module):\nclass BertSelfAttention(nn.Module):\nclass BertSelfOutput(nn.Module):\nclass BertAttention(nn.Module):\nclass BertIntermediate(nn.Module):\nclass BertOutput(nn.Module):\nclass BertLayer(nn.Module):\nclass BertEncoder(nn.Module):\nclass BertPooler(nn.Module):\nclass BertPredictionHeadTransform(nn.Module):\nclass BertLMPredictionHead(nn.Module):\nclass BertOnlyMLMHead(nn.Module):\nclass BertPreTrainedModel(PreTrainedModel):\nclass BertModel(BertPreTrainedModel):\nclass BertLMHeadModel(BertPreTrainedModel):\n def __init__(self, config):\n def forward(\n self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n def __init__(self, config, is_cross_attention):\n def save_attn_gradients(self, attn_gradients):\n def get_attn_gradients(self):\n def save_attention_map(self, attention_map):\n def get_attention_map(self):\n def transpose_for_scores(self, x):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, is_cross_attention=False):\n def prune_heads(self, heads):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, layer_num):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n mode=None,\n ):\n def feed_forward_chunk(self, attention_output):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n mode='multimodal',\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, sequence_output):\n def _init_weights(self, module):\n def __init__(self, config, add_pooling_layer=True):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prune_heads(self, heads_to_prune):\n def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n mode='multimodal',\n ):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False, \n is_decoder=True,\n reduction='mean',\n mode='multimodal', \n ):\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n def _reorder_cache(self, past, beam_idx):" }, { "identifier": "TimeSformer", "path": "models/timesformer/models/vit.py", "snippet": "def TimeSformer(model_cfg , **kwargs):\n\n # img_size=224, patch_size=16, num_classes=400, num_frames=8, attention_type='divided_space_time', pretrained_model=''\n img_size = model_cfg['image_size']\n patch_size = model_cfg['patch_size']\n num_frames = model_cfg['num_frames'] \n learnable_temporal_scaling = model_cfg['learnable_temporal_scaling']\n attention_type = model_cfg['attention_type']\n embed_dim = model_cfg['vision_width']\n num_heads = model_cfg['num_heads']\n use_grad_checkpointing = model_cfg['use_grad_checkpointing']\n ckpt_layer = model_cfg['vit_ckpt_layer']\n num_classes = 0\n #self.pretrained = False # We don't need this because we aim to load from BLIP V-L pre-trained checkpoints instead of 2D/3D ViT checkpoints trained on vision tasks\n model = VisionTransformer(img_size=img_size, num_classes=num_classes, patch_size=patch_size, embed_dim=embed_dim, depth=12,\n num_heads=num_heads, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), drop_rate=0., attn_drop_rate=0.,\n drop_path_rate=0, num_frames=num_frames, attention_type=attention_type, learnable_temporal_scaling=learnable_temporal_scaling,\n use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, **kwargs)\n\n #self.attention_type = attention_type\n #model.default_cfg = default_cfgs['vit_base_patch'+str(patch_size)+'_224']\n model.num_patches = (img_size // patch_size) * (img_size // patch_size)\n #if self.pretrained:\n # load_pretrained(self.model, num_classes=self.model.num_classes, in_chans=kwargs.get('in_chans', 3), filter_fn=_conv_filter, img_size=img_size, num_frames=num_frames, num_patches=self.num_patches, attention_type=self.attention_type, pretrained_model=pretrained_model)\n\n return model" }, { "identifier": "basic_check_arguments", "path": "configs/config.py", "snippet": "def parse_with_config(parsed_args):\n def __init__(self, desc=\"shared config\"):\n def parse_args(self):\n def add_downstream_args(self):\n def shared_video_captioning_config(self, cbs=False, scst=False):\n def constraint_beam_search_args(self):\n def self_critic_args(self):\ndef basic_check_arguments(args):\ndef restore_training_settings(args):\nclass SharedConfigs(object):" }, { "identifier": "str_to_bool", "path": "utils.py", "snippet": "def str_to_bool(value):\n if value.lower() in {'false', 'f', '0', 'no', 'n'}:\n return False\n elif value.lower() in {'true', 't', '1', 'yes', 'y'}:\n return True\n raise ValueError(f'{value} is not a valid boolean value')" } ]
import warnings import torch import torch.nn.functional as F import os from models.vit import VisionTransformer, interpolate_pos_embed from models.med import BertConfig, BertModel, BertLMHeadModel from transformers import BertTokenizer from torch import nn from urllib.parse import urlparse from timm.models.hub import download_cached_file from models.timesformer.models.vit import TimeSformer from configs.config import basic_check_arguments, shared_configs, restore_training_settings from utils import str_to_bool from einops import rearrange from timm.models.helpers import load_custom_pretrained from timm.models.vision_transformer import default_cfgs
3,933
''' Adapted from https://github.com/salesforce/BLIP ''' warnings.filterwarnings("ignore") def get_custom_args(base_config): parser = base_config.parser ''' parser.add_argument('--max_num_frames', type=int, default=32) parser.add_argument('--img_res', type=int, default=224) parser.add_argument('--patch_size', type=int, default=32) parser.add_argument("--grid_feat", type=str_to_bool, nargs='?', const=True, default=True) parser.add_argument("--kinetics", type=str, default='600', help="400 or 600") parser.add_argument("--pretrained_2d", type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument("--vidswin_size", type=str, default='base') # change base to tiny parser.add_argument('--freeze_backbone', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--use_checkpoint', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--backbone_coef_lr', type=float, default=0.001) parser.add_argument("--reload_pretrained_swin", type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--learn_mask_enabled', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--loss_sparse_w', type=float, default=0) parser.add_argument('--sparse_mask_soft2hard', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--transfer_method', type=int, default=-1, help="0: load all SwinBERT pre-trained weights, 1: load only pre-trained sparse mask") parser.add_argument('--att_mask_expansion', type=int, default=-1, help="-1: random init, 0: random init and then diag-based copy, 1: interpolation") parser.add_argument('--resume_checkpoint', type=str, default='None') parser.add_argument('--test_video_fname', type=str, default='None') ''' args = base_config.parse_args() # change parse_args() to parse_known_args() return args class BLIP_Base(nn.Module): def __init__(self, med_config='configs/med_config.json', image_size=224, vit='base', vit_grad_ckpt=False, vit_ckpt_layer=0, ): """ Args: med_config (str): path for the mixture of encoder-decoder model's configuration file image_size (int): input image size vit (str): model size of vision transformer """ super().__init__() self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer) self.tokenizer = init_tokenizer()
''' Adapted from https://github.com/salesforce/BLIP ''' warnings.filterwarnings("ignore") def get_custom_args(base_config): parser = base_config.parser ''' parser.add_argument('--max_num_frames', type=int, default=32) parser.add_argument('--img_res', type=int, default=224) parser.add_argument('--patch_size', type=int, default=32) parser.add_argument("--grid_feat", type=str_to_bool, nargs='?', const=True, default=True) parser.add_argument("--kinetics", type=str, default='600', help="400 or 600") parser.add_argument("--pretrained_2d", type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument("--vidswin_size", type=str, default='base') # change base to tiny parser.add_argument('--freeze_backbone', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--use_checkpoint', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--backbone_coef_lr', type=float, default=0.001) parser.add_argument("--reload_pretrained_swin", type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--learn_mask_enabled', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--loss_sparse_w', type=float, default=0) parser.add_argument('--sparse_mask_soft2hard', type=str_to_bool, nargs='?', const=True, default=False) parser.add_argument('--transfer_method', type=int, default=-1, help="0: load all SwinBERT pre-trained weights, 1: load only pre-trained sparse mask") parser.add_argument('--att_mask_expansion', type=int, default=-1, help="-1: random init, 0: random init and then diag-based copy, 1: interpolation") parser.add_argument('--resume_checkpoint', type=str, default='None') parser.add_argument('--test_video_fname', type=str, default='None') ''' args = base_config.parse_args() # change parse_args() to parse_known_args() return args class BLIP_Base(nn.Module): def __init__(self, med_config='configs/med_config.json', image_size=224, vit='base', vit_grad_ckpt=False, vit_ckpt_layer=0, ): """ Args: med_config (str): path for the mixture of encoder-decoder model's configuration file image_size (int): input image size vit (str): model size of vision transformer """ super().__init__() self.visual_encoder, vision_width = create_vit(vit, image_size, vit_grad_ckpt, vit_ckpt_layer) self.tokenizer = init_tokenizer()
med_config = BertConfig.from_json_file(med_config)
2
2023-10-29 12:09:38+00:00
8k
microsoft/MathOctopus
step1_supervised_finetuning/main.py
[ { "identifier": "create_prompt_dataset", "path": "utils/data/data_utils.py", "snippet": "def create_prompt_dataset(local_rank,\n data_path,\n data_split,\n output_path,\n train_phase,\n seed,\n tokenizer,\n max_seq_len,\n end_of_conversation_token=\"<|endoftext|>\",\n sft_only_data_path=[],\n reload=True):\n \"\"\"\n Creates the prompt dataset\n \"\"\"\n os.makedirs(output_path, exist_ok=True)\n fname = \"_\".join(data_path)\n sft_cache_key = \"_\".join(sft_only_data_path)\n tokenizer_name = tokenizer.init_kwargs[\"name_or_path\"].replace(\"/\", \"_\")\n fname = f\"{fname}_split{data_split}_phase{train_phase}_seed{seed}_tokenizer{tokenizer_name}_seqlen{max_seq_len}_sft{sft_cache_key}\"\n fname = \"_\".join(fname.split(\"/\"))\n fname = hashlib.sha256(fname.encode()).hexdigest(\n ) # hash the file name to avoid too long file name\n train_fname = f\"{output_path}/traindata_{fname}.pt\"\n eval_fname = f\"{output_path}/evaldata_{fname}.pt\"\n\n cache_found = os.path.isfile(train_fname) and os.path.isfile(eval_fname)\n buf_create_cache = torch.ByteTensor([not cache_found]).cuda()\n torch.distributed.all_reduce(buf_create_cache)\n if local_rank <= 0 and (buf_create_cache.item() != 0 or reload):\n if len(data_path) == 1: # Single dataset.\n train_dataset, eval_dataset = create_dataset(\n local_rank, data_path[0], data_split, output_path, train_phase,\n seed, tokenizer, end_of_conversation_token, max_seq_len)\n else: # Blending datasets.\n train_datasets = []\n eval_datasets = []\n train_size = 0\n eval_size = 0\n for d_path in data_path:\n train_dataset, eval_dataset = create_dataset(\n local_rank, d_path, data_split, output_path, train_phase,\n seed, tokenizer, end_of_conversation_token, max_seq_len)\n train_datasets.append(train_dataset)\n eval_datasets.append(eval_dataset)\n train_size += len(train_dataset)\n eval_size += len(eval_dataset)\n train_dataset = ConcatDataset(train_datasets)\n shuffle_idx = get_shuffle_idx(seed, train_size)\n train_dataset = Subset(train_dataset, shuffle_idx.tolist())\n eval_dataset = ConcatDataset(eval_datasets)\n shuffle_idx = get_shuffle_idx(seed, eval_size)\n eval_dataset = Subset(eval_dataset, shuffle_idx.tolist())\n\n # Append the SFT-only dataset if it exists, and current phase is 1(SFT).\n if train_phase == 1 and sft_only_data_path:\n sft_train_datasets = []\n sft_eval_datasets = []\n sft_train_size = 0\n sft_eval_size = 0\n for sft_path in sft_only_data_path:\n sft_train_dataset, sft_eval_dataset = create_dataset(\n local_rank,\n sft_path,\n \"10,0,0\",\n output_path,\n train_phase,\n seed,\n tokenizer,\n end_of_conversation_token,\n max_seq_len,\n )\n sft_train_datasets.append(sft_train_dataset)\n sft_eval_datasets.append(sft_eval_dataset)\n sft_train_size += len(sft_train_dataset)\n sft_eval_size += len(sft_eval_dataset)\n if sft_train_datasets: # Check if sft_train_datasets is not empty\n sft_train_dataset = ConcatDataset(sft_train_datasets)\n train_dataset = ConcatDataset(\n [train_dataset, sft_train_dataset])\n shuffle_idx = get_shuffle_idx(seed, len(train_dataset))\n train_dataset = Subset(train_dataset, shuffle_idx.tolist())\n if sft_eval_datasets: # Check if sft_eval_datasets is not empty\n sft_eval_dataset = ConcatDataset(sft_eval_datasets)\n eval_dataset = ConcatDataset([eval_dataset, sft_eval_dataset])\n shuffle_idx = get_shuffle_idx(seed, len(eval_dataset))\n eval_dataset = Subset(eval_dataset, shuffle_idx.tolist())\n torch.save(train_dataset, train_fname)\n torch.save(eval_dataset, eval_fname)\n torch.distributed.barrier()\n return torch.load(train_fname), torch.load(eval_fname)" }, { "identifier": "print_rank_0", "path": "utils/utils.py", "snippet": "def print_rank_0(msg, rank=0):\n if rank <= 0:\n print(msg)" }, { "identifier": "to_device", "path": "utils/utils.py", "snippet": "def to_device(batch, device):\n output = {}\n for k, v in batch.items():\n try:\n output[k] = v.to(device)\n except:\n output[k] = v\n return output" }, { "identifier": "save_hf_format", "path": "utils/utils.py", "snippet": "def save_hf_format(model, tokenizer, args, sub_folder=\"\"):\n # used to save huggingface format, so we can use it for hf.from_pretrained\n model_to_save = model.module if hasattr(model, 'module') else model\n CONFIG_NAME = \"config.json\"\n WEIGHTS_NAME = \"pytorch_model.bin\"\n output_dir = os.path.join(args.output_dir, sub_folder)\n os.makedirs(output_dir, exist_ok=True)\n output_model_file = os.path.join(output_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(output_dir, CONFIG_NAME)\n save_dict = model_to_save.state_dict()\n for key in list(save_dict.keys()):\n if \"lora\" in key:\n del save_dict[key]\n torch.save(save_dict, output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(output_dir)" }, { "identifier": "set_random_seed", "path": "utils/utils.py", "snippet": "def set_random_seed(seed):\n if seed is not None:\n set_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)" }, { "identifier": "get_all_reduce_mean", "path": "utils/utils.py", "snippet": "def get_all_reduce_mean(tensor):\n torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)\n tensor = tensor / torch.distributed.get_world_size()\n return tensor" }, { "identifier": "get_optimizer_grouped_parameters", "path": "utils/utils.py", "snippet": "def get_optimizer_grouped_parameters(model,\n weight_decay,\n no_decay_name_list=[\n \"bias\", \"LayerNorm.weight\"\n ]):\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p for n, p in model.named_parameters()\n if (not any(nd in n\n for nd in no_decay_name_list) and p.requires_grad)\n ],\n \"weight_decay\":\n weight_decay,\n },\n {\n \"params\": [\n p for n, p in model.named_parameters()\n if (any(nd in n\n for nd in no_decay_name_list) and p.requires_grad)\n ],\n \"weight_decay\":\n 0.0,\n },\n ]\n return optimizer_grouped_parameters" }, { "identifier": "save_zero_three_model", "path": "utils/utils.py", "snippet": "def save_zero_three_model(model_ema, tokenizer, global_rank, save_dir, zero_stage=0):\n zero_stage_3 = (zero_stage == 3)\n os.makedirs(save_dir, exist_ok=True)\n WEIGHTS_NAME = \"pytorch_model.bin\"\n CONFIG_NAME = 'config.json'\n output_model_file = os.path.join(save_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(save_dir, CONFIG_NAME)\n \n model_to_save = model_ema.module if hasattr(model_ema,\n 'module') else model_ema\n if not zero_stage_3:\n if global_rank == 0:\n torch.save(model_to_save.state_dict(), output_model_file)\n else:\n output_state_dict = {}\n for k, v in model_to_save.named_parameters():\n\n if hasattr(v, 'ds_id'):\n with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([v\n ]),\n enabled=zero_stage_3):\n v_p = v.data.cpu()\n else:\n v_p = v.cpu()\n if global_rank == 0 and \"lora\" not in k:\n output_state_dict[k] = v_p\n if global_rank == 0:\n \n torch.save(output_state_dict, output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_pretrained(save_dir)\n del output_state_dict" }, { "identifier": "load_hf_tokenizer", "path": "utils/utils.py", "snippet": "def load_hf_tokenizer(model_name_or_path, fast_tokenizer=True):\n # if os.path.exists(model_name_or_path):\n # # Locally tokenizer loading has some issue, so we need to force download\n # model_json = os.path.join(model_name_or_path, \"config.json\")\n # if os.path.exists(model_json):\n # model_json_file = json.load(open(model_json))\n # model_name = model_json_file[\"_name_or_path\"]\n # # tokenizer = AutoTokenizer.from_pretrained(model_name,\n # # fast_tokenizer=True)\n # tokenizer = LlamaTokenizer.from_pretrained(model_name,\n # padding_side = 'left',\n # fast_tokenizer=True)\n # print('i am loading here')\n # else:\n tokenizer = LlamaTokenizer.from_pretrained(model_name_or_path,\n padding_side = 'left',\n fast_tokenizer=True)\n \n # tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,\n # fast_tokenizer=True)\n return tokenizer" }, { "identifier": "get_train_ds_config", "path": "utils/ds_utils.py", "snippet": "def get_train_ds_config(offload,\n stage=2,\n enable_hybrid_engine=False,\n inference_tp_size=1,\n release_inference_cache=False,\n pin_parameters=True,\n tp_gather_partition_size=8,\n max_out_tokens=512):\n\n device = \"cpu\" if offload else \"none\"\n zero_opt_dict = {\n \"stage\": stage,\n \"offload_param\": {\n \"device\": device\n },\n \"offload_optimizer\": {\n \"device\": device\n },\n \"stage3_param_persistence_threshold\": 1e4,\n \"stage3_max_live_parameters\": 3e7,\n \"stage3_prefetch_bucket_size\": 3e7,\n \"memory_efficient_linear\": False\n }\n return {\n \"train_batch_size\": GLOBAL_BATCH_SIZE,\n \"train_micro_batch_size_per_gpu\": MICRO_BATCH_SIZE,\n \"steps_per_print\": 10,\n \"zero_optimization\": zero_opt_dict,\n \"bf16\": {\n \"enabled\": True,\n \"loss_scale_window\": 50,\n \"min_loss_scale\": 1e-10,\n },\n \"gradient_clipping\": 1.0,\n \"prescale_gradients\": False,\n \"wall_clock_breakdown\": False,\n \"hybrid_engine\": {\n \"enabled\": enable_hybrid_engine,\n \"max_out_tokens\": max_out_tokens,\n \"inference_tp_size\": inference_tp_size,\n \"release_inference_cache\": release_inference_cache,\n \"pin_parameters\": pin_parameters,\n \"tp_gather_partition_size\": tp_gather_partition_size,\n }\n }" }, { "identifier": "convert_linear_layer_to_lora", "path": "utils/module/lora.py", "snippet": "def convert_linear_layer_to_lora(model,\n part_module_name,\n lora_dim=0,\n lora_scaling=1,\n lora_droppout=0):\n repalce_name = []\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear) and part_module_name in name:\n repalce_name.append(name)\n for name in repalce_name:\n module = recursive_getattr(model, name)\n tmp = LinearLayer_LoRA(\n module.weight, lora_dim, lora_scaling, lora_droppout,\n module.bias).to(module.weight.device).to(module.weight.dtype)\n recursive_setattr(model, name, tmp)\n return model" }, { "identifier": "convert_lora_to_linear_layer", "path": "utils/module/lora.py", "snippet": "def convert_lora_to_linear_layer(model):\n repalce_name = []\n for name, module in model.named_modules():\n if isinstance(module, LinearLayer_LoRA):\n repalce_name.append(name)\n for name in repalce_name:\n module = recursive_getattr(model, name)\n zero_stage_3 = hasattr(module.weight, 'ds_id')\n with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([\n module.weight, module.bias, module.lora_left_weight,\n module.lora_right_weight\n ]),\n modifier_rank=0,\n enabled=zero_stage_3):\n module.fuse_lora_weight()\n return model" }, { "identifier": "only_optimize_lora_parameters", "path": "utils/module/lora.py", "snippet": "def only_optimize_lora_parameters(model):\n # turn off the gradient of all the parameters except the LoRA parameters\n for name, param in model.named_parameters():\n if \"lora_right_weight\" in name or \"lora_left_weight\" in name:\n param.requires_grad = True\n else:\n param.requires_grad = False\n return model" }, { "identifier": "create_hf_model", "path": "utils/model/model_utils.py", "snippet": "def create_hf_model(model_class,\n model_name_or_path,\n tokenizer,\n ds_config=None,\n rlhf_training=False,\n disable_dropout=False,\n bf16 = True):\n model_config = AutoConfig.from_pretrained(model_name_or_path)\n if disable_dropout:\n model_config.dropout = 0.0\n # Note: dschf is defined in function scope to avoid global effects\n # https://huggingface.co/docs/transformers/main_classes/deepspeed#nontrainer-deepspeed-integration\n if ds_config is not None and ds_config[\"zero_optimization\"][\"stage\"] == 3:\n dschf = HfDeepSpeedConfig(ds_config)\n else:\n dschf = None\n if rlhf_training:\n # the weight loading is handled by create critic model\n model = model_class.from_config(model_config)\n else:\n if not bf16:\n model = model_class.from_pretrained(\n model_name_or_path,\n from_tf=bool(\".ckpt\" in model_name_or_path),\n config=model_config)\n else:\n model = model_class.from_pretrained(\n model_name_or_path,\n from_tf=bool(\".ckpt\" in model_name_or_path),\n config=model_config,\n torch_dtype=torch.bfloat16) \n \n\n model.config.end_token_id = tokenizer.eos_token_id\n model.config.pad_token_id = model.config.eos_token_id\n # model.resize_token_embeddings(len(tokenizer))\n model.resize_token_embeddings(int(\n 8 *\n math.ceil(len(tokenizer) / 8.0))) # make the vocab size multiple of 8\n\n return model" } ]
import argparse import os import math import sys import torch import transformers import deepspeed from torch.utils import tensorboard from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from typing import Optional, Dict, Sequence from transformers import ( AutoModelForCausalLM, SchedulerType, default_data_collator, get_scheduler, LlamaTokenizer ) from deepspeed.accelerator import get_accelerator from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam from utils.data.data_utils import create_prompt_dataset from utils.utils import print_rank_0, to_device, save_hf_format, set_random_seed, get_all_reduce_mean, get_optimizer_grouped_parameters, save_zero_three_model, load_hf_tokenizer from utils.ds_utils import get_train_ds_config from utils.module.lora import convert_linear_layer_to_lora, convert_lora_to_linear_layer, only_optimize_lora_parameters from utils.model.model_utils import create_hf_model
5,807
# tokenizer.pad_token_id = 0 # make sure tokenizer is right pad in our logic tokenizer.padding_side = 'right' if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # special_tokens_dict = dict() # if tokenizer.pad_token is None: # tokenizer.pad_token = tokenizer.unk_token # special_tokens_dict["pad_token"] = "[PAD]" # tokenizer.add_special_tokens(special_tokens_dict) # print_rank_0(special_tokens_dict, args.global_rank) # tokenizer.pad_token_id = 0 # tokenizer.bos_token_id = 1 # tokenizer.eos_token_id = 2 model = create_hf_model(AutoModelForCausalLM, args.model_name_or_path, tokenizer, ds_config, disable_dropout=args.disable_dropout) # smart_tokenizer_and_embedding_resize( # special_tokens_dict=special_tokens_dict, # tokenizer=tokenizer, # model=model, # ) # model.resize_token_embeddings(len(tokenizer)) if args.lora_dim > 0: model = convert_linear_layer_to_lora(model, args.lora_module_name, args.lora_dim) if args.only_optimize_lora: model = only_optimize_lora_parameters(model) # Prepare the data train_phase = 1 train_dataset, eval_dataset = create_prompt_dataset( args.local_rank, args.data_path, args.data_split, args.data_output_path, train_phase, args.seed, tokenizer, args.max_seq_len, end_of_conversation_token = tokenizer.eos_token, sft_only_data_path=args.sft_only_data_path) # DataLoaders creation: if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) eval_sampler = SequentialSampler(eval_dataset) else: train_sampler = DistributedSampler(train_dataset) eval_sampler = DistributedSampler(eval_dataset) train_dataloader = DataLoader(train_dataset, collate_fn=default_data_collator, sampler=train_sampler, batch_size=args.per_device_train_batch_size) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, sampler=eval_sampler, batch_size=args.per_device_eval_batch_size) def evaluation(model, eval_dataloader): model.eval() losses = 0 for step, batch in enumerate(eval_dataloader): batch = to_device(batch, device) with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses += loss.float() losses = losses / (step + 1) try: perplexity = torch.exp(losses) except OverflowError: perplexity = float("inf") try: perplexity = get_all_reduce_mean(perplexity).item() except: pass try: loss = get_all_reduce_mean(losses).item() except: loss = float("inf") return loss, perplexity # Split weights in two groups, one with weight decay and the other not. optimizer_grouped_parameters = get_optimizer_grouped_parameters( model, args.weight_decay) AdamOptimizer = DeepSpeedCPUAdam if args.offload else FusedAdam optimizer = AdamOptimizer(optimizer_grouped_parameters, lr=args.learning_rate, betas=(0.9, 0.95)) num_update_steps_per_epoch = math.ceil( len(train_dataloader) / args.gradient_accumulation_steps) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.num_train_epochs * num_update_steps_per_epoch, ) model, optimizer, _, lr_scheduler = deepspeed.initialize( model=model, optimizer=optimizer, args=args, config=ds_config, lr_scheduler=lr_scheduler, dist_init_required=True) if args.gradient_checkpointing: model.gradient_checkpointing_enable() # Train!
#!/usr/bin/env python # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # import matplotlib.pyplot as plt sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) def parse_args(): parser = argparse.ArgumentParser( description= "Finetune a transformers model on a causal language modeling task") parser.add_argument('--data_path', nargs='*', default=['Dahoas/rm-static'], help='Path to the training dataset. Accepted format:' '1) a single data path, 2) multiple datasets in the' 'form: dataset1-path dataset2-path ...') parser.add_argument('--data_split', type=str, default='2,4,4', help='Comma-separated list of proportions for training' 'phase 1, 2, and 3 data. For example the split `6,2,2`' 'will use 60% of data for phase 1, 20% for phase 2' 'and 20% for phase 3.') parser.add_argument( '--sft_only_data_path', nargs='*', default=[], help='Path to the dataset for only using in SFT phase.') parser.add_argument( '--data_output_path', type=str, default='/tmp/data_files/', help= 'Where to store the data-related files such as shuffle index. This needs to be on a local storage of a node (not on a shared storage)' ) parser.add_argument( "--model_name_or_path", type=str, help= "Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=16, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--max_seq_len", type=int, default=512, help="The maximum sequence length.", ) parser.add_argument( "--learning_rate", type=float, default=1e-3, help= "Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0., help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=1, help="Total number of training epochs to perform.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="cosine", help="The scheduler type to use.", choices=[ "linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup" ], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler.") parser.add_argument("--output_dir", type=str, default=None, help="Where to store the model.") parser.add_argument("--seed", type=int, default=1234, help="A seed for reproducible training.") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--gradient_checkpointing', action='store_true', help='Enable HF gradient checkpointing for model.') parser.add_argument('--disable_dropout', action='store_true', help='Disable the dropout of the model.') # deepspeed features parser.add_argument('--offload', action='store_true', help='Enable ZeRO Offload techniques.') parser.add_argument( '--zero_stage', type=int, default=0, help='ZeRO optimization stage for Actor model (and clones).') ## LoRA for efficient training setting parser.add_argument("--lora_dim", type=int, default=0, help="If > 0, use LoRA for efficient training.") parser.add_argument("--lora_module_name", type=str, default="decoder.layers.", help="The scope of LoRA.") parser.add_argument('--only_optimize_lora', action='store_true', help='Only optimize the LoRA parameters.') parser = deepspeed.add_config_arguments(parser) args = parser.parse_args() # Validate settings if args.gradient_checkpointing and args.lora_dim > 0: assert ( not args.only_optimize_lora ), "--gradient_checkpointing and --only_optimize_lora cannot be enabled at the same time." return args def smart_tokenizer_and_embedding_resize( special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel, ): """Resize tokenizer and embedding. Note: This is the unoptimized version that may make your embedding size not be divisible by 64. """ num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) model.resize_token_embeddings(len(tokenizer)) if num_new_tokens > 0: input_embeddings = model.get_input_embeddings().weight.data output_embeddings = model.get_output_embeddings().weight.data input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) input_embeddings[-num_new_tokens:] = input_embeddings_avg output_embeddings[-num_new_tokens:] = output_embeddings_avg def main(): # if args.tensorboard_path != "": args = parse_args() writer = tensorboard.SummaryWriter(f'{args.output_dir}/runs') if args.local_rank == -1: device = torch.device("cuda") else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) # Initializes the distributed backend which will take care of sychronizing nodes/GPUs # torch.distributed.init_process_group(backend='nccl') deepspeed.init_distributed() args.global_rank = torch.distributed.get_rank() ds_config = get_train_ds_config(offload=args.offload, stage=args.zero_stage) ds_config[ 'train_micro_batch_size_per_gpu'] = args.per_device_train_batch_size ds_config[ 'train_batch_size'] = args.per_device_train_batch_size * torch.distributed.get_world_size( ) * args.gradient_accumulation_steps # If passed along, set the training seed now. set_random_seed(args.seed) torch.distributed.barrier() print('loading from ...', args.model_name_or_path) tokenizer = LlamaTokenizer.from_pretrained(args.model_name_or_path, fast_tokenizer=True) # tokenizer = load_hf_tokenizer(args.model_name_or_path, fast_tokenizer=True) # tokenizer.pad_token_id = 0 # make sure tokenizer is right pad in our logic tokenizer.padding_side = 'right' if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # special_tokens_dict = dict() # if tokenizer.pad_token is None: # tokenizer.pad_token = tokenizer.unk_token # special_tokens_dict["pad_token"] = "[PAD]" # tokenizer.add_special_tokens(special_tokens_dict) # print_rank_0(special_tokens_dict, args.global_rank) # tokenizer.pad_token_id = 0 # tokenizer.bos_token_id = 1 # tokenizer.eos_token_id = 2 model = create_hf_model(AutoModelForCausalLM, args.model_name_or_path, tokenizer, ds_config, disable_dropout=args.disable_dropout) # smart_tokenizer_and_embedding_resize( # special_tokens_dict=special_tokens_dict, # tokenizer=tokenizer, # model=model, # ) # model.resize_token_embeddings(len(tokenizer)) if args.lora_dim > 0: model = convert_linear_layer_to_lora(model, args.lora_module_name, args.lora_dim) if args.only_optimize_lora: model = only_optimize_lora_parameters(model) # Prepare the data train_phase = 1 train_dataset, eval_dataset = create_prompt_dataset( args.local_rank, args.data_path, args.data_split, args.data_output_path, train_phase, args.seed, tokenizer, args.max_seq_len, end_of_conversation_token = tokenizer.eos_token, sft_only_data_path=args.sft_only_data_path) # DataLoaders creation: if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) eval_sampler = SequentialSampler(eval_dataset) else: train_sampler = DistributedSampler(train_dataset) eval_sampler = DistributedSampler(eval_dataset) train_dataloader = DataLoader(train_dataset, collate_fn=default_data_collator, sampler=train_sampler, batch_size=args.per_device_train_batch_size) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, sampler=eval_sampler, batch_size=args.per_device_eval_batch_size) def evaluation(model, eval_dataloader): model.eval() losses = 0 for step, batch in enumerate(eval_dataloader): batch = to_device(batch, device) with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses += loss.float() losses = losses / (step + 1) try: perplexity = torch.exp(losses) except OverflowError: perplexity = float("inf") try: perplexity = get_all_reduce_mean(perplexity).item() except: pass try: loss = get_all_reduce_mean(losses).item() except: loss = float("inf") return loss, perplexity # Split weights in two groups, one with weight decay and the other not. optimizer_grouped_parameters = get_optimizer_grouped_parameters( model, args.weight_decay) AdamOptimizer = DeepSpeedCPUAdam if args.offload else FusedAdam optimizer = AdamOptimizer(optimizer_grouped_parameters, lr=args.learning_rate, betas=(0.9, 0.95)) num_update_steps_per_epoch = math.ceil( len(train_dataloader) / args.gradient_accumulation_steps) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.num_train_epochs * num_update_steps_per_epoch, ) model, optimizer, _, lr_scheduler = deepspeed.initialize( model=model, optimizer=optimizer, args=args, config=ds_config, lr_scheduler=lr_scheduler, dist_init_required=True) if args.gradient_checkpointing: model.gradient_checkpointing_enable() # Train!
print_rank_0("***** Running training *****", args.global_rank)
1
2023-10-25 05:36:54+00:00
8k
ATR-DBI/CityRefer
lib/solver.py
[ { "identifier": "get_loss", "path": "lib/loss_helper.py", "snippet": "def get_loss(args, data_dict, config, reference=True, use_lang_classifier=False):\n if args.model == 'cityrefer':\n data_dict = get_ref_loss(args, data_dict, config, reference, use_lang_classifier)\n elif args.model == 'refnet':\n data_dict = get_ref_loss(args, data_dict, config, reference, use_lang_classifier)\n else:\n raise NotImplementedError \n return data_dict" }, { "identifier": "get_eval", "path": "lib/eval_helper.py", "snippet": "def get_eval(args, data_dict, config):\n \"\"\" Loss functions\n Args:\n data_dict: dict\n config: dataset config instance\n reference: flag (False/True)\n post_processing: config dict\n Returns:\n loss: pytorch scalar tensor\n data_dict: dict\n \"\"\"\n lang_scores = data_dict[\"lang_scores\"]\n lang_cls_pred = torch.argmax(lang_scores, dim=1)\n batch_size = lang_scores.shape[0]\n\n data_dict[\"lang_acc\"] = (lang_cls_pred == data_dict[\"object_cat\"]).float().mean()\n\n object_scores = data_dict['object_scores']\n pred_obb_batch = data_dict['pred_obb_batch']\n cluster_labels = data_dict['cluster_label']\n\n ref_center_label = data_dict[\"ref_center_label\"].detach().cpu().numpy()\n ref_heading_class_label = data_dict[\"ref_heading_class_label\"].detach().cpu().numpy()\n ref_heading_residual_label = data_dict[\"ref_heading_residual_label\"].detach().cpu().numpy()\n ref_size_class_label = data_dict[\"ref_size_class_label\"].detach().cpu().numpy()\n ref_size_residual_label = data_dict[\"ref_size_residual_label\"].detach().cpu().numpy()\n ref_gt_obb = config.param2obb_batch(ref_center_label, ref_heading_class_label, ref_heading_residual_label,\n ref_size_class_label, ref_size_residual_label)\n\n ious = []\n pred_bboxes = []\n gt_bboxes = []\n ref_acc = []\n multiple = []\n others = []\n num_missed = 0\n for i in range(batch_size):\n pred_obb = pred_obb_batch[i] # (num, 7)\n num_filtered_obj = pred_obb.shape[0] \n if num_filtered_obj == 0:\n pred_obb = np.zeros(7)\n num_missed += 1\n elif num_filtered_obj == 1:\n pred_obb = pred_obb[0]\n else:\n # object_scores: batch, MAX_NUM_OBJ\n score = object_scores[i][:num_filtered_obj]\n\n cluster_pred = torch.argmax(score, dim=0)\n target = torch.argmax(cluster_labels[i], dim=0)\n if target == cluster_pred:\n ref_acc.append(1.)\n else:\n ref_acc.append(0.)\n\n pred_obb = pred_obb_batch[i][cluster_pred]\n\n gt_obb = ref_gt_obb[i]\n pred_bbox = get_3d_box(pred_obb[3:6], pred_obb[6], pred_obb[0:3])\n gt_bbox = get_3d_box(gt_obb[3:6], gt_obb[6], gt_obb[0:3])\n iou = box3d_iou(pred_bbox, gt_bbox)\n ious.append(iou)\n\n # NOTE: get_3d_box() will return problematic bboxes\n pred_bbox = construct_bbox_corners(pred_obb[0:3], pred_obb[3:6])\n gt_bbox = construct_bbox_corners(gt_obb[0:3], gt_obb[3:6])\n\n if num_filtered_obj <= 1:\n if iou > 0.25:\n ref_acc.append(1.)\n else:\n ref_acc.append(0.)\n\n pred_bboxes.append(pred_bbox)\n gt_bboxes.append(gt_bbox)\n\n # construct the multiple mask\n multiple.append(data_dict[\"unique_multiple\"][i].item())\n\n # construct the others mask\n flag = 1 if data_dict[\"object_cat\"][i] == -1 else 0\n others.append(flag)\n \n data_dict['ref_acc'] = ref_acc\n data_dict[\"ref_iou\"] = ious\n data_dict[\"ref_iou_rate_0.25\"] = np.array(ious)[np.array(ious) >= 0.25].shape[0] / np.array(ious).shape[0]\n data_dict[\"ref_iou_rate_0.5\"] = np.array(ious)[np.array(ious) >= 0.5].shape[0] / np.array(ious).shape[0]\n\n data_dict[\"ref_multiple_mask\"] = multiple\n data_dict[\"ref_others_mask\"] = others\n data_dict[\"pred_bboxes\"] = pred_bboxes\n data_dict[\"gt_bboxes\"] = gt_bboxes\n\n return data_dict" }, { "identifier": "BNMomentumScheduler", "path": "lib/scheduler_helper.py", "snippet": "class BNMomentumScheduler(object):\n\n def __init__(\n self, model, bn_lambda, last_epoch=-1,\n setter=set_bn_momentum_default\n ):\n if not isinstance(model, nn.Module):\n raise RuntimeError(\n \"Class '{}' is not a PyTorch nn Module\".format(\n type(model).__name__\n )\n )\n\n self.model = model\n self.setter = setter\n self.lmbd = bn_lambda\n\n self.step(last_epoch + 1)\n self.last_epoch = last_epoch\n\n def step(self, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n\n self.last_epoch = epoch\n self.model.apply(self.setter(self.lmbd(epoch)))" }, { "identifier": "decode_eta", "path": "utils/eta.py", "snippet": "def decode_eta(eta_sec):\n eta = {'h': 0, 'm': 0, 's': 0}\n if eta_sec < 60:\n eta['s'] = int(eta_sec)\n elif eta_sec >= 60 and eta_sec < 3600:\n eta['m'] = int(eta_sec / 60)\n eta['s'] = int(eta_sec % 60)\n else:\n eta['h'] = int(eta_sec / (60 * 60))\n eta['m'] = int(eta_sec % (60 * 60) / 60)\n eta['s'] = int(eta_sec % (60 * 60) % 60)\n\n return eta" }, { "identifier": "mask_tokens", "path": "models/util.py", "snippet": "def mask_tokens(inputs, tokenizer, mlm_probability):\n \"\"\"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.\n \"\"\"\n\n if tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n probability_matrix = torch.full(labels.shape, mlm_probability)\n special_tokens_mask = [\n tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)\n for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(\n torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0\n )\n if tokenizer._pad_token is not None:\n padding_mask = labels.eq(tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = (\n torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n )\n inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = (\n torch.bernoulli(torch.full(labels.shape, 0.5)).bool()\n & masked_indices\n & ~indices_replaced\n )\n random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels" }, { "identifier": "get_mask", "path": "models/util.py", "snippet": "def get_mask(lengths, max_length):\n \"\"\"Computes a batch of padding masks given batched lengths\"\"\"\n mask = 1 * (\n torch.arange(max_length).unsqueeze(1).to(lengths.device) < lengths\n ).transpose(0, 1)\n return mask" } ]
import os import sys import time import torch import numpy as np import importlib import lib from tqdm import tqdm from tensorboardX import SummaryWriter from torch.optim.lr_scheduler import StepLR, MultiStepLR from lib.loss_helper import get_loss from lib.eval_helper import get_eval from lib.scheduler_helper import BNMomentumScheduler from utils.eta import decode_eta from models.util import mask_tokens from models.util import get_mask
4,364
self.bn_scheduler = None def __call__(self, epoch, verbose): # setting self.epoch = epoch self.verbose = verbose self._total_iter["train"] = len(self.dataloader["train"]) * epoch self._total_iter["val"] = len(self.dataloader["val"]) * self.val_step for epoch_id in range(epoch): try: self._log("epoch {} starting...".format(epoch_id + 1)) # feed self._feed(self.dataloader["train"], "train", epoch_id) # save model self._log("saving last models...\n") model_root = os.path.join(self.CONF.PATH.OUTPUT, self.stamp) torch.save(self.model.state_dict(), os.path.join(model_root, "model_last.pth")) print("evaluating...") self.init_log() # val self._feed(self.dataloader["val"], "val", epoch_id) # update lr scheduler if self.lr_scheduler: self.lr_scheduler.step() self._log("update learning rate --> {}\n".format(self.lr_scheduler.get_last_lr())) # update bn scheduler if self.bn_scheduler: self.bn_scheduler.step() self._log("update batch normalization momentum --> {}\n".format( self.bn_scheduler.lmbd(self.bn_scheduler.last_epoch))) except KeyboardInterrupt: # finish training self._finish(epoch_id) exit() # finish training self._finish(epoch_id) def _log(self, info_str): self.log_fout.write(info_str + "\n") self.log_fout.flush() print(info_str) def _set_phase(self, phase): if phase == "train": self.model.train() elif phase == "val": self.model.eval() else: raise ValueError("invalid phase") def _forward(self, data_dict): data_dict = self.model(data_dict) return data_dict def _backward(self): # optimize self.optimizer.zero_grad() self.scaler.scale(self._running_log["loss"]).backward() self.scaler.step(self.optimizer) self.scaler.update() def _compute_loss(self, data_dict): data_dict = get_loss( args=self.args, data_dict=data_dict, config=self.DC, ) # dump self._running_log["ref_loss"] = data_dict["ref_loss"] self._running_log["lang_loss"] = data_dict["lang_loss"] self._running_log["mlm_loss"] = data_dict["mlm_loss"] self._running_log["loss"] = data_dict["loss"] def _eval(self, data_dict): data_dict = get_eval( args=self.args, data_dict=data_dict, config=self.DC, ) # dump self._running_log["lang_acc"] = data_dict["lang_acc"].item() self._running_log["ref_acc"] = np.mean(data_dict["ref_acc"]) self._running_log['ref_iou'] = data_dict['ref_iou'] def _feed(self, dataloader, phase, epoch_id): # switch mode self._set_phase(phase) # change dataloader dataloader = dataloader if phase == "train" else tqdm(dataloader) fetch_time_start = time.time() for data_dict in dataloader: # move to cuda for key in data_dict: if key in ['object_cat', 'lidar', 'point_min', 'point_max', 'mlm_label', 'ref_center_label', 'ref_size_residual_label']: data_dict[key] = data_dict[key].cuda() # text encoding query = data_dict["query"] encoded_query = self.tokenizer( query, add_special_tokens=True, max_length=self.max_desc_len, padding="longest", truncation=True, return_tensors="pt", ) # mlm for description
sys.path.append(os.path.join(os.getcwd(), "lib")) # HACK add the lib folder sys.path.append(os.path.join(os.getcwd(), "utils")) # HACK add the lib folder importlib.reload(lib) ITER_REPORT_TEMPLATE = """ -------------------------------iter: [{epoch_id}: {iter_id}/{total_iter}]------------------------------- [loss] train_loss: {train_loss} [loss] train_ref_loss: {train_ref_loss} [loss] train_lang_loss: {train_lang_loss} [loss] train_mlm_loss: {train_mlm_loss} [loss] train_lang_acc: {train_lang_acc} [sco.] train_ref_acc: {train_ref_acc} [sco.] train_iou_rate_0.25: {train_iou_rate_25}, train_iou_rate_0.5: {train_iou_rate_5} [info] mean_fetch_time: {mean_fetch_time}s [info] mean_forward_time: {mean_forward_time}s [info] mean_backward_time: {mean_backward_time}s [info] mean_eval_time: {mean_eval_time}s [info] mean_iter_time: {mean_iter_time}s [info] ETA: {eta_h}h {eta_m}m {eta_s}s """ EPOCH_REPORT_TEMPLATE = """ ---------------------------------summary--------------------------------- [val] val_loss: {val_loss} [val] val_lang_loss: {val_lang_loss} [val] val_lang_acc: {val_lang_acc} [val] val_ref_acc: {val_ref_acc} [val] val_iou_rate_0.25: {val_iou_rate_25}, val_iou_rate_0.5: {val_iou_rate_5} """ BEST_REPORT_TEMPLATE = """ --------------------------------------best-------------------------------------- [best] epoch: {epoch} [loss] loss: {loss} [loss] ref_loss: {ref_loss} [loss] lang_loss: {lang_loss} [loss] mlm_loss: {mlm_loss} [sco.] ref_acc: {ref_acc} [sco.] lang_acc: {lang_acc} [sco.] iou_rate_0.25: {iou_rate_25}, iou_rate_0.5: {iou_rate_5} """ class Solver(): def __init__(self, args, model, DC, CONF, dataloader, optimizer, scaler, stamp, tokenizer=None, val_step=10, reference=True, use_lang_classifier=True, lr_decay_step=None, lr_decay_rate=None, bn_decay_step=None, bn_decay_rate=None, use_amp=False): self.epoch = 0 # set in __call__ self.verbose = 0 # set in __call__ self.model = model self.tokenizer = tokenizer self.args = args self.CONF = CONF self.DC = DC self.dataloader = dataloader self.optimizer = optimizer self.scaler = scaler self.stamp = stamp self.val_step = val_step self.use_amp = use_amp self.reference = reference self.use_lang_classifier = use_lang_classifier self.max_desc_len = args.max_desc_len self.max_land_len = args.max_land_len self.max_num_object = args.max_num_object if args.num_cands < 0 else args.num_cands self.max_num_landmark = args.max_num_landmark self.mlm_prob = args.mlm_prob self.lr_decay_step = lr_decay_step self.lr_decay_rate = lr_decay_rate self.bn_decay_step = bn_decay_step self.bn_decay_rate = bn_decay_rate self.best = { "epoch": 0, "loss": float("inf"), "ref_loss": float("inf"), "lang_loss": float("inf"), "lang_acc": -float("inf"), "ref_acc": -float("inf"), "iou_rate_0.25": -float("inf"), "iou_rate_0.5": -float("inf") } # log self.init_log() # tensorboard os.makedirs(os.path.join(self.CONF.PATH.OUTPUT, stamp, "tensorboard/train"), exist_ok=True) os.makedirs(os.path.join(self.CONF.PATH.OUTPUT, stamp, "tensorboard/val"), exist_ok=True) self._log_writer = { "train": SummaryWriter(os.path.join(self.CONF.PATH.OUTPUT, stamp, "tensorboard/train")), "val": SummaryWriter(os.path.join(self.CONF.PATH.OUTPUT, stamp, "tensorboard/val")) } # training log log_path = os.path.join(self.CONF.PATH.OUTPUT, stamp, "log.txt") self.log_fout = open(log_path, "a") # private # only for internal access and temporary results self._running_log = {} self._global_iter_id = 0 self._total_iter = {} # set in __call__ # templates self.__iter_report_template = ITER_REPORT_TEMPLATE self.__epoch_report_template = EPOCH_REPORT_TEMPLATE self.__best_report_template = BEST_REPORT_TEMPLATE # lr scheduler if lr_decay_step and lr_decay_rate: if isinstance(lr_decay_step, list): self.lr_scheduler = MultiStepLR(optimizer, lr_decay_step, lr_decay_rate) else: self.lr_scheduler = StepLR(optimizer, lr_decay_step, lr_decay_rate) else: self.lr_scheduler = None # bn scheduler if bn_decay_step and bn_decay_rate: it = -1 start_epoch = 0 BN_MOMENTUM_INIT = 0.5 BN_MOMENTUM_MAX = 0.001 bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * bn_decay_rate ** (int(it / bn_decay_step)), BN_MOMENTUM_MAX) self.bn_scheduler = BNMomentumScheduler(model, bn_lambda=bn_lbmd, last_epoch=start_epoch - 1) else: self.bn_scheduler = None def __call__(self, epoch, verbose): # setting self.epoch = epoch self.verbose = verbose self._total_iter["train"] = len(self.dataloader["train"]) * epoch self._total_iter["val"] = len(self.dataloader["val"]) * self.val_step for epoch_id in range(epoch): try: self._log("epoch {} starting...".format(epoch_id + 1)) # feed self._feed(self.dataloader["train"], "train", epoch_id) # save model self._log("saving last models...\n") model_root = os.path.join(self.CONF.PATH.OUTPUT, self.stamp) torch.save(self.model.state_dict(), os.path.join(model_root, "model_last.pth")) print("evaluating...") self.init_log() # val self._feed(self.dataloader["val"], "val", epoch_id) # update lr scheduler if self.lr_scheduler: self.lr_scheduler.step() self._log("update learning rate --> {}\n".format(self.lr_scheduler.get_last_lr())) # update bn scheduler if self.bn_scheduler: self.bn_scheduler.step() self._log("update batch normalization momentum --> {}\n".format( self.bn_scheduler.lmbd(self.bn_scheduler.last_epoch))) except KeyboardInterrupt: # finish training self._finish(epoch_id) exit() # finish training self._finish(epoch_id) def _log(self, info_str): self.log_fout.write(info_str + "\n") self.log_fout.flush() print(info_str) def _set_phase(self, phase): if phase == "train": self.model.train() elif phase == "val": self.model.eval() else: raise ValueError("invalid phase") def _forward(self, data_dict): data_dict = self.model(data_dict) return data_dict def _backward(self): # optimize self.optimizer.zero_grad() self.scaler.scale(self._running_log["loss"]).backward() self.scaler.step(self.optimizer) self.scaler.update() def _compute_loss(self, data_dict): data_dict = get_loss( args=self.args, data_dict=data_dict, config=self.DC, ) # dump self._running_log["ref_loss"] = data_dict["ref_loss"] self._running_log["lang_loss"] = data_dict["lang_loss"] self._running_log["mlm_loss"] = data_dict["mlm_loss"] self._running_log["loss"] = data_dict["loss"] def _eval(self, data_dict): data_dict = get_eval( args=self.args, data_dict=data_dict, config=self.DC, ) # dump self._running_log["lang_acc"] = data_dict["lang_acc"].item() self._running_log["ref_acc"] = np.mean(data_dict["ref_acc"]) self._running_log['ref_iou'] = data_dict['ref_iou'] def _feed(self, dataloader, phase, epoch_id): # switch mode self._set_phase(phase) # change dataloader dataloader = dataloader if phase == "train" else tqdm(dataloader) fetch_time_start = time.time() for data_dict in dataloader: # move to cuda for key in data_dict: if key in ['object_cat', 'lidar', 'point_min', 'point_max', 'mlm_label', 'ref_center_label', 'ref_size_residual_label']: data_dict[key] = data_dict[key].cuda() # text encoding query = data_dict["query"] encoded_query = self.tokenizer( query, add_special_tokens=True, max_length=self.max_desc_len, padding="longest", truncation=True, return_tensors="pt", ) # mlm for description
inputs, labels = mask_tokens(encoded_query["input_ids"], self.tokenizer, mlm_probability=self.mlm_prob)
4
2023-10-25 10:02:28+00:00
8k
OATML-Markslab/ProteinNPT
utils/tranception/model_pytorch.py
[ { "identifier": "tranception_ACT2FN", "path": "utils/tranception/activations.py", "snippet": "def _gelu_python(x):\ndef gelu_new(x):\ndef gelu_fast(x):\ndef quick_gelu(x):\ndef _silu_python(x):\ndef _mish_python(x):\ndef linear_act(x):\ndef squared_relu(x):\ndef squared_relu_xla(x):\ndef get_activation(activation_string):" }, { "identifier": "TranceptionConfig", "path": "utils/tranception/config.py", "snippet": "class TranceptionConfig(GPT2Config):\n \"\"\"\n Config subclass for Tranception model architecture.\n \"\"\"\n def __init__(\n self,\n attention_mode=\"tranception\",\n position_embedding=\"grouped_alibi\",\n tokenizer=None,\n retrieval_aggregation_mode=None,\n retrieval_inference_weight=0.6,\n MSA_filename=None,\n MSA_weight_file_name=None,\n MSA_start=None,\n MSA_end=None,\n full_protein_length=None,\n clustal_omega_location=None,\n scoring_window=\"optimal\",\n **kwargs\n ):\n super().__init__(**kwargs)\n self.model_type=\"tranception\"\n self.attention_mode=attention_mode\n self.position_embedding=position_embedding\n self.tokenizer = tokenizer\n self.retrieval_aggregation_mode = retrieval_aggregation_mode\n self.retrieval_inference_weight = retrieval_inference_weight\n self.MSA_filename = MSA_filename\n self.MSA_weight_file_name = MSA_weight_file_name\n self.MSA_start=MSA_start\n self.MSA_end=MSA_end\n self.full_protein_length = full_protein_length\n self.clustal_omega_location = clustal_omega_location\n self.scoring_window=scoring_window" }, { "identifier": "TranceptionCausalLMOutputWithCrossAttentions", "path": "utils/tranception/outputs.py", "snippet": "class TranceptionCausalLMOutputWithCrossAttentions(ModelOutput):\n \"\"\"\n Class for Tranception causal language model (or autoregressive) outputs.\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n Cross attentions weights after the attention softmax, used to compute the weighted average in the\n cross-attention heads.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `torch.FloatTensor` tuples of length `config.n_layers`, with each tuple containing the cached key,\n value states of the self-attention and the cross-attention layers if model is used in encoder-decoder\n setting. Only relevant if `config.is_decoder = True`.\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n fused_shift_log_probas (`torch.FloatTensor` of shape (batch_size, sequence_length, config.vocab_size), *optional*, returned when config.retrieval_aggregation_mode is not None.\n log_probas for each residue position after aggregating autoregressive logits and retrieval logits.\n \n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n fused_shift_log_probas: Optional[torch.FloatTensor] = None" }, { "identifier": "msa_utils", "path": "utils/tranception/utils/msa_utils.py", "snippet": "def filter_msa(msa_data, num_sequences_kept=3):\ndef process_msa_data(MSA_data_file):\ndef get_one_hot_sequences_dict(msa_data,MSA_start,MSA_end,vocab):\ndef one_hot(sequence_string,vocab):\ndef get_msa_prior(MSA_data_file, MSA_weight_file_name, MSA_start, MSA_end, len_target_seq, vocab, retrieval_aggregation_mode=\"aggregate_substitution\", filter_MSA=True, verbose=False):\ndef update_retrieved_MSA_log_prior_indel(model, MSA_log_prior, MSA_start, MSA_end, mutated_sequence):\n def __init__(self,\n MSA_location=\"\",\n theta=0.2,\n use_weights=True,\n weights_location=\"./data/weights\",\n preprocess_MSA=True,\n threshold_sequence_frac_gaps=0.5,\n threshold_focus_cols_frac_gaps=1.0,\n remove_sequences_with_indeterminate_AA_in_focus_cols=True\n ):\n def gen_alignment(self, verbose=False):\n def compute_weight(seq):\n MSA_EVE = MSA_processing(\n MSA_location=MSA_data_file,\n use_weights=True,\n weights_location=MSA_weight_file_name\n )\nclass MSA_processing:" }, { "identifier": "scoring_utils", "path": "utils/tranception/utils/scoring_utils.py", "snippet": "def get_mutated_sequence(focus_seq, mutant, start_idx=1, AA_vocab=AA_vocab):\ndef nanmean(v, *args, inplace=False, **kwargs):\ndef nansum(v, *args, inplace=False, **kwargs):\ndef get_optimal_window(mutation_position_relative, seq_len_wo_special, model_window):\ndef sequence_replace_single(sequence, char_to_replace, char_replacements):\ndef sequence_replace(sequences, char_to_replace, char_replacements):\ndef get_tranception_scores_mutated_sequences(model, mutated_sequence_df, batch_size_inference, score_var_name, target_seq, num_workers=10, reverse=False, indel_mode=False):\ndef get_sequence_slices(df, target_seq, model_context_len, start_idx=1, scoring_window=\"optimal\", indel_mode=False):" } ]
from dataclasses import dataclass from typing import Optional, Tuple from torch import nn from torch.nn import CrossEntropyLoss, NLLLoss from transformers import GPT2PreTrainedModel from transformers import PreTrainedTokenizerFast from transformers.modeling_utils import ( Conv1D, PreTrainedModel, SequenceSummary, find_pruneable_heads_and_indices, prune_conv1d_layer, ) from transformers.file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput ) from transformers.utils.model_parallel_utils import assert_device_map, get_device_map from .activations import tranception_ACT2FN from .config import TranceptionConfig from .outputs import ( TranceptionCausalLMOutputWithCrossAttentions, ) from .utils import msa_utils from .utils import scoring_utils import math import os import pandas as pd import torch import torch.nn.functional as F import utils
4,851
past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, flip=None, start_slice=None, end_slice=None, mutated_sequence=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) self.MSA_log_prior = self.MSA_log_prior.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states) loss = None fused_shift_log_probas = None if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() if self.retrieval_aggregation_mode is not None: batch_size = input_ids.size(0) if self.retrieval_aggregation_mode=="aggregate_indel": assert batch_size==1, "Aggregate indel is only supported for batch size of 1" truncated_sequence_text = mutated_sequence[0][start_slice[0]:end_slice[0]] if len(truncated_sequence_text)!=shift_logits.shape[1]-1: # shift_logits only has one extra token compared to truncated_sequence_text (the BOS token) print("Tokenization error -- seq length: {} and shift_logits length - 1 : {}".format(len(mutated_sequence),shift_logits.shape[1]-1)) MSA_log_prior, MSA_start, MSA_end = msa_utils.update_retrieved_MSA_log_prior_indel(self, self.MSA_log_prior, self.MSA_start, self.MSA_end, mutated_sequence[0]) elif self.retrieval_aggregation_mode=="aggregate_substitution": MSA_log_prior=self.MSA_log_prior MSA_start=self.MSA_start MSA_end=self.MSA_end shift_log_probas = torch.log_softmax(shift_logits,dim=-1) fused_shift_log_probas = shift_log_probas.clone() if flip is None: flip = torch.zeros(batch_size).to(fused_shift_log_probas.device) flip = flip > 0 for seq_index in range(batch_size): min_prior_slice = max(start_slice[seq_index], MSA_start) max_prior_slice = min(end_slice[seq_index], MSA_end) if max_prior_slice <= min_prior_slice: print("Non overlapping region detected: min_prior_slice {} and max_prior_slice {}".format(min_prior_slice,max_prior_slice)) continue slice_prior = MSA_log_prior[min_prior_slice:max_prior_slice,:].to(fused_shift_log_probas.device) if flip[seq_index]: slice_prior = torch.flip(slice_prior,dims=(0,)) min_logits_slice = max(0,end_slice[seq_index]-MSA_end) max_logits_slice = min_logits_slice + (max_prior_slice-min_prior_slice) fused_shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] = (1-self.retrieval_inference_weight_RL)*shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] + self.retrieval_inference_weight_RL*slice_prior else: min_logits_slice = max(0, MSA_start-start_slice[seq_index]) max_logits_slice = min_logits_slice + (max_prior_slice-min_prior_slice) fused_shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] = (1-self.retrieval_inference_weight_LR)*shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] + self.retrieval_inference_weight_LR*slice_prior if self.retrieval_aggregation_mode=="aggregate_indel": try: # If a given residue colume is an added zero-column, then we overwrite prior fusion and only predict based on the autoregressive transformer inference mode. inserted_retrieval_positions = [True if slice_prior[i].sum()==0 else False for i in range(len(slice_prior))]+[True] #Last True is for the end of sentence token fused_shift_log_probas[:,inserted_retrieval_positions,:]=shift_log_probas[:,inserted_retrieval_positions,:] except: print("Error when adding zero column(s) to account for insertion mutations.") loss_fct = NLLLoss(reduction='none') loss = loss_fct(input=fused_shift_log_probas.view(-1, fused_shift_log_probas.size(-1)), target=shift_labels.view(-1)).view(fused_shift_log_probas.shape[0],fused_shift_log_probas.shape[1]) mask = attention_mask[..., 1:].float() mask[mask==0]=float('nan') loss *= mask loss = nanmean(loss, dim=1).mean() else: loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output
def nanmean(v, *args, inplace=False, **kwargs): if not inplace: v = v.clone() is_nan = torch.isnan(v) v[is_nan] = 0 return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs) def get_slopes(n, mode="standard_alibi", verbose=False): """ Function to compute the m constant for each attention head. Code has been adapted from the official ALiBi codebase at: https://github.com/ofirpress/attention_with_linear_biases/blob/master/fairseq/models/transformer.py """ def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if mode=="grouped_alibi": n = n // 4 if math.log2(n).is_integer(): result = get_slopes_power_of_2(n) else: #Workaround when the number of heads is not a power of 2 closest_power_of_2 = 2**math.floor(math.log2(n)) result = get_slopes_power_of_2(closest_power_of_2) + get_slopes(2*closest_power_of_2)[0::2][:n-closest_power_of_2] if mode=="grouped_alibi": result = result * 4 if verbose: print("ALiBi slopes: {}".format(result)) return result class SpatialDepthWiseConvolution(nn.Module): def __init__(self, head_dim: int, kernel_size: int = 3): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv1d(in_channels=head_dim, out_channels=head_dim, kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=head_dim) def forward(self, x: torch.Tensor): batch_size, heads, seq_len, head_dim = x.shape x = x.permute(0, 1, 3, 2).contiguous() x = x.view(batch_size * heads, head_dim, seq_len) x = self.conv(x) if self.kernel_size>1: x = x[:, :, :-(self.kernel_size - 1)] x = x.view(batch_size, heads, head_dim, seq_len) x = x.permute(0, 1, 3, 2) return x class TranceptionBlockAttention(nn.Module): def __init__(self, config, is_cross_attention=False, SDWC_kernel_size=None): super().__init__() max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view( 1, 1, max_positions, max_positions ), ) self.register_buffer("masked_bias", torch.tensor(-1e4)) self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.split_size = self.embed_dim if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.is_cross_attention = is_cross_attention if self.is_cross_attention: self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) self.q_attn = Conv1D(self.embed_dim, self.embed_dim) else: self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) self.c_proj = Conv1D(self.embed_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.pruned_heads = set() self.attention_mode=config.attention_mode if self.attention_mode=="tranception": assert self.num_heads%4==0, "Invalid number of heads. Tranception requires the number of heads to be a multiple of 4." self.num_heads_per_kernel_size = self.num_heads // 4 self.query_depthwiseconv = nn.ModuleDict() self.key_depthwiseconv = nn.ModuleDict() self.value_depthwiseconv = nn.ModuleDict() for kernel_idx, kernel in enumerate([3,5,7]): self.query_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel) self.key_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel) self.value_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel) def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads)) self.num_heads = self.num_heads - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _attn(self, query, key, value, attention_mask=None, head_mask=None, alibi_bias=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / (float(value.size(-1)) ** 0.5) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask query_length, key_length = query.size(-2), key.size(-2) causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool() attn_weights = torch.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype)) if alibi_bias is not None: attn_weights = attn_weights + alibi_bias[:,:,:attn_weights.size(-1)] if attention_mask is not None: # Apply the attention mask attn_weights = attn_weights + attention_mask attn_weights = nn.Softmax(dim=-1)(attn_weights) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _split_heads(self, tensor, num_heads, attn_head_size): """ Splits hidden_size dim into attn_head_size and num_heads """ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(*new_shape) return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def _merge_heads(self, tensor, num_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden_size """ tensor = tensor.permute(0, 2, 1, 3).contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def forward( self, hidden_states, layer_past=None, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=False, output_attentions=False, alibi_bias=None, ): if encoder_hidden_states is not None: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." ) query = self.q_attn(hidden_states) key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) attention_mask = encoder_attention_mask else: query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if layer_past is not None: past_key, past_value = layer_past key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: present = (key, value) else: present = None if self.attention_mode=="tranception": # We do not do anything on the first self.num_heads_per_kernel_size heads (kernel =1) query_list=[query[:,:self.num_heads_per_kernel_size,:,:]] key_list=[key[:,:self.num_heads_per_kernel_size,:,:]] value_list=[value[:,:self.num_heads_per_kernel_size,:,:]] for kernel_idx in range(3): query_list.append(self.query_depthwiseconv[str(kernel_idx)](query[:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:,:])) key_list.append(self.key_depthwiseconv[str(kernel_idx)](key[:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:,:])) value_list.append(self.value_depthwiseconv[str(kernel_idx)](value[:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:,:])) query=torch.cat(query_list, dim=1) key=torch.cat(key_list, dim=1) value=torch.cat(value_list, dim=1) attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask, alibi_bias=alibi_bias) attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present) if output_attentions: outputs += (attn_weights,) return outputs # a, present, (attentions) class TranceptionBlockMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = tranception_ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class TranceptionBlock(nn.Module): def __init__(self, config, SDWC_kernel_size=None): super().__init__() hidden_size = config.hidden_size inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = TranceptionBlockAttention(config, SDWC_kernel_size=SDWC_kernel_size) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: self.crossattention = TranceptionBlockAttention(config, is_cross_attention=True, SDWC_kernel_size=SDWC_kernel_size) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = TranceptionBlockMLP(inner_dim, config) def forward( self, hidden_states, layer_past=None, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=False, output_attentions=False, alibi_bias=None, ): residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, alibi_bias=alibi_bias, ) attn_output = attn_outputs[0] # output_attn: a, present, (attentions) outputs = attn_outputs[1:] # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attn_output = cross_attn_outputs[0] # residual connection hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) # residual connection hidden_states = residual + feed_forward_hidden_states if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs # hidden_states, present, (attentions, cross_attentions) class TranceptionModel(GPT2PreTrainedModel): _keys_to_ignore_on_load_missing = ["attn.masked_bias"] def __init__(self, config): super().__init__(config) self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.position_embedding = config.position_embedding if hasattr(config, "position_embedding") else "learned" if self.position_embedding=="learned": self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.alibi = None elif self.position_embedding=="grouped_alibi": maxpos = config.n_positions attn_heads = config.n_head self.slopes = torch.Tensor(get_slopes(attn_heads, mode=self.position_embedding)) #The softmax operation is invariant to translation, and bias functions used are always linear. alibi = self.slopes.unsqueeze(1).unsqueeze(1) * torch.arange(maxpos).unsqueeze(0).unsqueeze(0).expand(attn_heads, -1, -1) alibi = alibi.view(attn_heads, 1, maxpos) self.register_buffer('alibi',alibi) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([TranceptionBlock(config) for _ in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False def parallelize(self, device_map=None, num_cores=None): self.device_map = ( get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map ) device_prefix="cuda:" assert_device_map(self.device_map, len(self.h)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else device_prefix + str(min(self.device_map.keys())) self.last_device = device_prefix + str(max(self.device_map.keys())) self.wte = self.wte.to(self.first_device) if self.position_embedding=="learned": self.wpe = self.wpe.to(self.first_device) for k, v in self.device_map.items(): print("k,v :"+str(k)+","+str(v)) for block in v: cuda_device = device_prefix + str(k) self.h[block] = self.h[block].to(cuda_device) self.ln_f = self.ln_f.to(self.last_device) def deparallelize(self): self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" self.wte = self.wte.to("cpu") if self.position_embedding=="learned": self.wpe = self.wpe.to("cpu") for index in range(len(self.h)): self.h[index] = self.h[index].to("cpu") self.ln_f = self.ln_f.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) def forward( self, input_ids=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) # GPT2Attention mask. if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.add_cross_attention and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if self.position_embedding=="learned": position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds else: hidden_states = inputs_embeds if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure layer_past is on same device as hidden_states (might not be correct) if layer_past is not None: layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: print("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...") use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, alibi_bias=self.alibi if hasattr(self, "alibi") else None ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) if self.model_parallel: device_prefix="cuda:" for k, v in self.device_map.items(): if i == v[-1] and device_prefix + str(k) != self.last_device: hidden_states = hidden_states.to(device_prefix + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(*output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class TranceptionLMHeadModel(GPT2PreTrainedModel): _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = TranceptionModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.config = config self.init_weights() self.default_model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Model parallel self.model_parallel = False self.device_map = None self.retrieval_aggregation_mode = config.retrieval_aggregation_mode if hasattr(config, "retrieval_aggregation_mode") else None if self.retrieval_aggregation_mode is not None: print("Model leverages both autoregressive and retrieval inference") self.MSA_filename = config.MSA_filename if hasattr(config, "MSA_filename") else False self.MSA_folder = '/'.join(self.MSA_filename.split(os.sep)[:-1]) self.MSA_name = self.MSA_filename.split(os.sep)[-1] self.retrieval_inference_weight_LR = config.retrieval_inference_weight if hasattr(config, "retrieval_inference_weight") else 0.6 self.retrieval_inference_weight_RL = config.retrieval_inference_weight if hasattr(config, "retrieval_inference_weight") else 0.6 self.MSA_start=config.MSA_start self.MSA_end=config.MSA_end self.full_protein_length = config.full_protein_length if hasattr(config, "full_protein_length") else -1 self.MSA_log_prior = torch.log(torch.tensor( msa_utils.get_msa_prior( MSA_data_file=self.MSA_filename, MSA_weight_file_name=config.MSA_weight_file_name, retrieval_aggregation_mode=self.retrieval_aggregation_mode, MSA_start=self.MSA_start, MSA_end=self.MSA_end, len_target_seq=self.full_protein_length, vocab=config.tokenizer.get_vocab(), verbose=False ) ).float().to(self.default_model_device)) else: print("Model only uses autoregressive inference") def parallelize(self, device_map=None, num_cores=None, num_pipelines=1): self.num_pipelines=num_pipelines self.device_map = ( get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map, num_cores=num_cores) self.lm_head = self.lm_head.to(self.transformer.first_device) self.model_parallel = True def deparallelize(self): self.transformer.deparallelize() self.transformer = self.transformer.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False torch.cuda.empty_cache() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # only last token for inputs_ids if past is defined in kwargs if past: input_ids = input_ids[:, -1].unsqueeze(-1) if token_type_ids is not None: token_type_ids = token_type_ids[:, -1].unsqueeze(-1) attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past: position_ids = position_ids[:, -1].unsqueeze(-1) else: position_ids = None return { "input_ids": input_ids, "past_key_values": past, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, "flip": kwargs.get("flip", None), } def forward( self, input_ids=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, flip=None, start_slice=None, end_slice=None, mutated_sequence=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) self.MSA_log_prior = self.MSA_log_prior.to(self.lm_head.weight.device) lm_logits = self.lm_head(hidden_states) loss = None fused_shift_log_probas = None if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() if self.retrieval_aggregation_mode is not None: batch_size = input_ids.size(0) if self.retrieval_aggregation_mode=="aggregate_indel": assert batch_size==1, "Aggregate indel is only supported for batch size of 1" truncated_sequence_text = mutated_sequence[0][start_slice[0]:end_slice[0]] if len(truncated_sequence_text)!=shift_logits.shape[1]-1: # shift_logits only has one extra token compared to truncated_sequence_text (the BOS token) print("Tokenization error -- seq length: {} and shift_logits length - 1 : {}".format(len(mutated_sequence),shift_logits.shape[1]-1)) MSA_log_prior, MSA_start, MSA_end = msa_utils.update_retrieved_MSA_log_prior_indel(self, self.MSA_log_prior, self.MSA_start, self.MSA_end, mutated_sequence[0]) elif self.retrieval_aggregation_mode=="aggregate_substitution": MSA_log_prior=self.MSA_log_prior MSA_start=self.MSA_start MSA_end=self.MSA_end shift_log_probas = torch.log_softmax(shift_logits,dim=-1) fused_shift_log_probas = shift_log_probas.clone() if flip is None: flip = torch.zeros(batch_size).to(fused_shift_log_probas.device) flip = flip > 0 for seq_index in range(batch_size): min_prior_slice = max(start_slice[seq_index], MSA_start) max_prior_slice = min(end_slice[seq_index], MSA_end) if max_prior_slice <= min_prior_slice: print("Non overlapping region detected: min_prior_slice {} and max_prior_slice {}".format(min_prior_slice,max_prior_slice)) continue slice_prior = MSA_log_prior[min_prior_slice:max_prior_slice,:].to(fused_shift_log_probas.device) if flip[seq_index]: slice_prior = torch.flip(slice_prior,dims=(0,)) min_logits_slice = max(0,end_slice[seq_index]-MSA_end) max_logits_slice = min_logits_slice + (max_prior_slice-min_prior_slice) fused_shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] = (1-self.retrieval_inference_weight_RL)*shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] + self.retrieval_inference_weight_RL*slice_prior else: min_logits_slice = max(0, MSA_start-start_slice[seq_index]) max_logits_slice = min_logits_slice + (max_prior_slice-min_prior_slice) fused_shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] = (1-self.retrieval_inference_weight_LR)*shift_log_probas[seq_index,min_logits_slice:max_logits_slice,:] + self.retrieval_inference_weight_LR*slice_prior if self.retrieval_aggregation_mode=="aggregate_indel": try: # If a given residue colume is an added zero-column, then we overwrite prior fusion and only predict based on the autoregressive transformer inference mode. inserted_retrieval_positions = [True if slice_prior[i].sum()==0 else False for i in range(len(slice_prior))]+[True] #Last True is for the end of sentence token fused_shift_log_probas[:,inserted_retrieval_positions,:]=shift_log_probas[:,inserted_retrieval_positions,:] except: print("Error when adding zero column(s) to account for insertion mutations.") loss_fct = NLLLoss(reduction='none') loss = loss_fct(input=fused_shift_log_probas.view(-1, fused_shift_log_probas.size(-1)), target=shift_labels.view(-1)).view(fused_shift_log_probas.shape[0],fused_shift_log_probas.shape[1]) mask = attention_mask[..., 1:].float() mask[mask==0]=float('nan') loss *= mask loss = nanmean(loss, dim=1).mean() else: loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output
return TranceptionCausalLMOutputWithCrossAttentions(
2
2023-10-28 11:41:05+00:00
8k
CVHub520/yolov5_obb
models/yolo.py
[ { "identifier": "check_anchor_order", "path": "utils/autoanchor.py", "snippet": "def check_anchor_order(m):\n # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n a = m.anchors.prod(-1).view(-1) # anchor area\n da = a[-1] - a[0] # delta a\n ds = m.stride[-1] - m.stride[0] # delta s\n if da.sign() != ds.sign(): # same order\n LOGGER.info(f'{PREFIX}Reversing anchor order')\n m.anchors[:] = m.anchors.flip(0)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_version", "path": "utils/general.py", "snippet": "def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n # Check version vs. required version\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string\n if hard:\n assert result, s # assert min requirements met\n if verbose and not result:\n LOGGER.warning(s)\n return result" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "make_divisible", "path": "utils/general.py", "snippet": "def make_divisible(x, divisor):\n # Returns nearest x divisible by divisor\n if isinstance(divisor, torch.Tensor):\n divisor = int(divisor.max()) # to int\n return math.ceil(x / divisor) * divisor" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "feature_visualization", "path": "utils/plots.py", "snippet": "def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\n \"\"\"\n x: Features to be visualized\n module_type: Module type\n stage: Module stage within model\n n: Maximum number of feature maps to plot\n save_dir: Directory to save results\n \"\"\"\n if 'Detect' not in module_type:\n batch, channels, height, width = x.shape # batch, channels, height, width\n if height > 1 and width > 1:\n f = save_dir / f\"stage{stage}_{module_type.split('.')[-1]}_features.png\" # filename\n\n blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels\n n = min(n, channels) # number of plots\n fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols\n ax = ax.ravel()\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n for i in range(n):\n ax[i].imshow(blocks[i].squeeze()) # cmap='gray'\n ax[i].axis('off')\n\n print(f'Saving {f}... ({n}/{channels})')\n plt.savefig(f, dpi=300, bbox_inches='tight')\n plt.close()\n np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save" }, { "identifier": "fuse_conv_and_bn", "path": "utils/torch_utils.py", "snippet": "def fuse_conv_and_bn(conv, bn):\n # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n groups=conv.groups,\n bias=True).requires_grad_(False).to(conv.weight.device)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv" }, { "identifier": "initialize_weights", "path": "utils/torch_utils.py", "snippet": "def initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True" }, { "identifier": "model_info", "path": "utils/torch_utils.py", "snippet": "def model_info(model, verbose=False, img_size=640):\n # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print(f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPs\n from thop import profile\n stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32\n img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input\n flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs\n img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float\n fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs\n except (ImportError, Exception):\n fs = ''\n\n LOGGER.info(f\"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")" }, { "identifier": "scale_img", "path": "utils/torch_utils.py", "snippet": "def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)\n # scales img(bs,3,y,x) by ratio constrained to gs-multiple\n if ratio == 1.0:\n return img\n else:\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import sys import thop # for FLOPs computation import yaml # for torch hub from copy import deepcopy from pathlib import Path from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync
5,876
initialize_weights(self) self.info() LOGGER.info('') def forward(self, x, augment=False, profile=False, visualize=False): """ Args: x (tensor): (b, 3, height, width), RGB Return: if not augment: x (list[P3_out, ...]): tensor.Size(b, self.na, h_i, w_i, c), self.na means the number of anchors scales else: """ if augment: return self._forward_augment(x) # augmented inference, None return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_augment(self, x): img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) yi = self._forward_once(xi)[0] # forward # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, 1), None # augmented inference, train def _forward_once(self, x, profile=False, visualize=False): """ Args: x (tensor): (b, 3, height, width), RGB Return: x (list[P3_out, ...]): tensor.Size(b, self.na, h_i, w_i, c), self.na means the number of anchors scales """ y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) return x def _descale_pred(self, p, flips, scale, img_size): # de-scale predictions following augmented inference (inverse operation) if self.inplace: p[..., :4] /= scale # de-scale if flips == 2: p[..., 1] = img_size[0] - p[..., 1] # de-flip ud elif flips == 3: p[..., 0] = img_size[1] - p[..., 0] # de-flip lr else: x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale if flips == 2: y = img_size[0] - y # de-flip ud elif flips == 3: x = img_size[1] - x # de-flip lr p = torch.cat((x, y, wh, p[..., 4:]), -1) return p def _clip_augmented(self, y): # Clip YOLOv5 augmented inference tails nl = self.model[-1].nl # number of detection layers (P3-P5) g = sum(4 ** x for x in range(nl)) # grid points e = 1 # exclude layer count i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices y[0] = y[0][:, :-i] # large i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices y[-1] = y[-1][:, i:] # small return y def _profile_one_layer(self, m, x, dt): c = isinstance(m, Detect) # is final layer, copy input as inplace fix o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b.data[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) LOGGER.info( ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ YOLO-specific modules Usage: $ python path/to/models/yolo.py --cfg yolov5s.yaml """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH # ROOT = ROOT.relative_to(Path.cwd()) # relative try: except ImportError: thop = None class Detect(nn.Module): stride = None # strides computed during build onnx_dynamic = False # ONNX export parameter export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=False): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 + 180 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.zeros(1)] * self.nl # init grid self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use in-place ops (e.g. slice assignment) def forward(self, x): """ Args: x (list[P3_in,...]): torch.Size(b, c_i, h_i, w_i) Return: if train: x (list[P3_out,...]): torch.Size(b, self.na, h_i, w_i, self.no), self.na means the number of anchors scales else: inference (tensor): (b, n_all_anchors, self.no) x (list[P3_in,...]): torch.Size(b, c_i, h_i, w_i) """ z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x[i](bs,self.no * self.na,20,20) to x[i](bs,self.na,20,20,self.no) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) y = x[i].sigmoid() # (tensor): (b, self.na, h, w, self.no) if self.inplace: y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) # z (list[P3_pred]): Torch.Size(b, n_anchors, self.no) return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij') else: yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)]) grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() return grid, anchor_grid class Model(nn.Module): def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml self.yaml_file = Path(cfg).name with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value if anchors: LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) # Build strides, anchors m = self.model[-1] # Detect() if isinstance(m, Detect): s = 256 # 2x min stride m.inplace = self.inplace m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward m.anchors /= m.stride.view(-1, 1, 1) # featuremap pixel check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once # Init weights, biases initialize_weights(self) self.info() LOGGER.info('') def forward(self, x, augment=False, profile=False, visualize=False): """ Args: x (tensor): (b, 3, height, width), RGB Return: if not augment: x (list[P3_out, ...]): tensor.Size(b, self.na, h_i, w_i, c), self.na means the number of anchors scales else: """ if augment: return self._forward_augment(x) # augmented inference, None return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_augment(self, x): img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) yi = self._forward_once(xi)[0] # forward # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, 1), None # augmented inference, train def _forward_once(self, x, profile=False, visualize=False): """ Args: x (tensor): (b, 3, height, width), RGB Return: x (list[P3_out, ...]): tensor.Size(b, self.na, h_i, w_i, c), self.na means the number of anchors scales """ y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) return x def _descale_pred(self, p, flips, scale, img_size): # de-scale predictions following augmented inference (inverse operation) if self.inplace: p[..., :4] /= scale # de-scale if flips == 2: p[..., 1] = img_size[0] - p[..., 1] # de-flip ud elif flips == 3: p[..., 0] = img_size[1] - p[..., 0] # de-flip lr else: x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale if flips == 2: y = img_size[0] - y # de-flip ud elif flips == 3: x = img_size[1] - x # de-flip lr p = torch.cat((x, y, wh, p[..., 4:]), -1) return p def _clip_augmented(self, y): # Clip YOLOv5 augmented inference tails nl = self.model[-1].nl # number of detection layers (P3-P5) g = sum(4 ** x for x in range(nl)) # grid points e = 1 # exclude layer count i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices y[0] = y[0][:, :-i] # large i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices y[-1] = y[-1][:, i:] # small return y def _profile_one_layer(self, m, x, dt): c = isinstance(m, Detect) # is final layer, copy input as inplace fix o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) b.data[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) LOGGER.info( ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
7
2023-10-31 06:06:41+00:00
8k
hyw-dev/AFI-ForwardDeduplicate
models/model_pg104/RIFE.py
[ { "identifier": "GMFlow", "path": "models/gmflow/gmflow.py", "snippet": "class GMFlow(nn.Module):\r\n def __init__(self,\r\n num_scales=2,\r\n upsample_factor=4,\r\n feature_channels=128,\r\n attention_type='swin',\r\n num_transformer_layers=6,\r\n ffn_dim_expansion=4,\r\n num_head=1,\r\n **kwargs,\r\n ):\r\n super(GMFlow, self).__init__()\r\n\r\n self.num_scales = num_scales\r\n self.feature_channels = feature_channels\r\n self.upsample_factor = upsample_factor\r\n self.attention_type = attention_type\r\n self.num_transformer_layers = num_transformer_layers\r\n\r\n # CNN backbone\r\n self.backbone = CNNEncoder(output_dim=feature_channels, num_output_scales=num_scales)\r\n\r\n # Transformer\r\n self.transformer = FeatureTransformer(num_layers=num_transformer_layers,\r\n d_model=feature_channels,\r\n nhead=num_head,\r\n attention_type=attention_type,\r\n ffn_dim_expansion=ffn_dim_expansion,\r\n )\r\n\r\n # flow propagation with self-attn\r\n self.feature_flow_attn = FeatureFlowAttention(in_channels=feature_channels)\r\n\r\n # convex upsampling: concat feature0 and flow as input\r\n self.upsampler = nn.Sequential(nn.Conv2d(2 + feature_channels, 256, 3, 1, 1),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(256, upsample_factor ** 2 * 9, 1, 1, 0))\r\n\r\n def extract_feature(self, img0, img1):\r\n concat = torch.cat((img0, img1), dim=0) # [2B, C, H, W]\r\n features = self.backbone(concat) # list of [2B, C, H, W], resolution from high to low\r\n\r\n # reverse: resolution from low to high\r\n features = features[::-1]\r\n\r\n feature0, feature1 = [], []\r\n\r\n for i in range(len(features)):\r\n feature = features[i]\r\n chunks = torch.chunk(feature, 2, 0) # tuple\r\n feature0.append(chunks[0])\r\n feature1.append(chunks[1])\r\n\r\n return feature0, feature1\r\n\r\n def upsample_flow(self, flow, feature, bilinear=False, upsample_factor=8,\r\n ):\r\n if bilinear:\r\n up_flow = F.interpolate(flow, scale_factor=upsample_factor,\r\n mode='bilinear', align_corners=True) * upsample_factor\r\n\r\n else:\r\n # convex upsampling\r\n concat = torch.cat((flow, feature), dim=1)\r\n\r\n mask = self.upsampler(concat)\r\n b, flow_channel, h, w = flow.shape\r\n mask = mask.view(b, 1, 9, self.upsample_factor, self.upsample_factor, h, w) # [B, 1, 9, K, K, H, W]\r\n mask = torch.softmax(mask, dim=2)\r\n\r\n up_flow = F.unfold(self.upsample_factor * flow, [3, 3], padding=1)\r\n up_flow = up_flow.view(b, flow_channel, 9, 1, 1, h, w) # [B, 2, 9, 1, 1, H, W]\r\n\r\n up_flow = torch.sum(mask * up_flow, dim=2) # [B, 2, K, K, H, W]\r\n up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) # [B, 2, K, H, K, W]\r\n up_flow = up_flow.reshape(b, flow_channel, self.upsample_factor * h,\r\n self.upsample_factor * w) # [B, 2, K*H, K*W]\r\n\r\n return up_flow\r\n\r\n def forward(self, img0, img1,\r\n attn_splits_list=[2, 8],\r\n corr_radius_list=[-1, 4],\r\n prop_radius_list=[-1, 1],\r\n pred_bidir_flow=False,\r\n **kwargs,\r\n ):\r\n\r\n img0, img1 = normalize_img(img0, img1) # [B, 3, H, W]\r\n\r\n # resolution low to high\r\n feature0_list, feature1_list = self.extract_feature(img0, img1) # list of features\r\n\r\n flow = None\r\n\r\n assert len(attn_splits_list) == len(corr_radius_list) == len(prop_radius_list) == self.num_scales\r\n\r\n for scale_idx in range(self.num_scales):\r\n feature0, feature1 = feature0_list[scale_idx], feature1_list[scale_idx]\r\n\r\n if pred_bidir_flow and scale_idx > 0:\r\n # predicting bidirectional flow with refinement\r\n feature0, feature1 = torch.cat((feature0, feature1), dim=0), torch.cat((feature1, feature0), dim=0)\r\n\r\n upsample_factor = self.upsample_factor * (2 ** (self.num_scales - 1 - scale_idx))\r\n\r\n if scale_idx > 0:\r\n flow = F.interpolate(flow, scale_factor=2, mode='bilinear', align_corners=True) * 2\r\n\r\n if flow is not None:\r\n flow = flow.detach()\r\n feature1 = flow_warp(feature1, flow) # [B, C, H, W]\r\n\r\n attn_splits = attn_splits_list[scale_idx]\r\n corr_radius = corr_radius_list[scale_idx]\r\n prop_radius = prop_radius_list[scale_idx]\r\n\r\n # add position to features\r\n feature0, feature1 = feature_add_position(feature0, feature1, attn_splits, self.feature_channels)\r\n\r\n # Transformer\r\n feature0, feature1 = self.transformer(feature0, feature1, attn_num_splits=attn_splits)\r\n\r\n # correlation and softmax\r\n if corr_radius == -1: # global matching\r\n flow_pred = global_correlation_softmax(feature0, feature1, pred_bidir_flow)[0]\r\n else: # local matching\r\n flow_pred = local_correlation_softmax(feature0, feature1, corr_radius)[0]\r\n\r\n # flow or residual flow\r\n flow = flow + flow_pred if flow is not None else flow_pred\r\n\r\n # upsample to the original resolution for supervison\r\n if self.training: # only need to upsample intermediate flow predictions at training time\r\n flow_bilinear = self.upsample_flow(flow, None, bilinear=True, upsample_factor=upsample_factor)\r\n\r\n # flow propagation with self-attn\r\n if pred_bidir_flow and scale_idx == 0:\r\n feature0 = torch.cat((feature0, feature1), dim=0) # [2*B, C, H, W] for propagation\r\n flow = self.feature_flow_attn(feature0, flow.detach(),\r\n local_window_attn=prop_radius > 0,\r\n local_window_radius=prop_radius)\r\n\r\n # bilinear upsampling at training time except the last one\r\n if self.training and scale_idx < self.num_scales - 1:\r\n flow_up = self.upsample_flow(flow, feature0, bilinear=True, upsample_factor=upsample_factor)\r\n\r\n if scale_idx == self.num_scales - 1:\r\n flow_up = self.upsample_flow(flow, feature0)\r\n\r\n return flow_up\r" }, { "identifier": "IFNet", "path": "models/model_pg104/IFNet_HDv3.py", "snippet": "class IFNet(nn.Module):\r\n def __init__(self):\r\n super(IFNet, self).__init__()\r\n self.block0 = IFBlock(7, c=192)\r\n self.block1 = IFBlock(8+4, c=128)\r\n self.block2 = IFBlock(8+4, c=96)\r\n self.block3 = IFBlock(8+4, c=64)\r\n\r\n def forward( self, x, timestep=0.5, scale_list=[8, 4, 2, 1], training=False, fastmode=True, ensemble=False):\r\n if training == False:\r\n channel = x.shape[1] // 2\r\n img0 = x[:, :channel]\r\n img1 = x[:, channel:]\r\n if not torch.is_tensor(timestep):\r\n timestep = (x[:, :1].clone() * 0 + 1) * timestep\r\n else:\r\n timestep = timestep.repeat(1, 1, img0.shape[2], img0.shape[3])\r\n flow = None\r\n block = [self.block0, self.block1, self.block2, self.block3]\r\n for i in range(4):\r\n if flow is None:\r\n flow, mask = block[i](torch.cat((img0[:, :3], img1[:, :3], timestep), 1), None, scale=scale_list[i])\r\n if ensemble:\r\n f1, m1 = block[i](torch.cat((img1[:, :3], img0[:, :3], 1-timestep), 1), None, scale=scale_list[i])\r\n flow = (flow + torch.cat((f1[:, 2:4], f1[:, :2]), 1)) / 2\r\n mask = (mask + (-m1)) / 2\r\n else:\r\n f0, m0 = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], timestep, mask), 1), flow, scale=scale_list[i])\r\n if ensemble:\r\n f1, m1 = block[i](torch.cat((warped_img1[:, :3], warped_img0[:, :3], 1-timestep, -mask), 1), torch.cat((flow[:, 2:4], flow[:, :2]), 1), scale=scale_list[i])\r\n f0 = (f0 + torch.cat((f1[:, 2:4], f1[:, :2]), 1)) / 2\r\n m0 = (m0 + (-m1)) / 2\r\n flow = flow + f0\r\n mask = mask + m0\r\n warped_img0 = warp(img0, flow[:, :2])\r\n warped_img1 = warp(img1, flow[:, 2:4])\r\n mask = torch.sigmoid(mask)\r\n merged = warped_img0 * mask + warped_img1 * (1 - mask)\r\n return merged\r" }, { "identifier": "MetricNet", "path": "models/model_pg104/MetricNet.py", "snippet": "class MetricNet(nn.Module):\n def __init__(self):\n super(MetricNet, self).__init__()\n self.metric_in = nn.Conv2d(14, 64, 3, 1, 1)\n self.metric_net1 = nn.Sequential(\n nn.PReLU(),\n nn.Conv2d(64, 64, 3, 1, 1)\n )\n self.metric_net2 = nn.Sequential(\n nn.PReLU(),\n nn.Conv2d(64, 64, 3, 1, 1)\n )\n self.metric_net3 = nn.Sequential(\n nn.PReLU(),\n nn.Conv2d(64, 64, 3, 1, 1)\n )\n self.metric_out = nn.Sequential(\n nn.PReLU(),\n nn.Conv2d(64, 2, 3, 1, 1),\n nn.Tanh()\n )\n\n def forward(self, img0, img1, flow01, flow10):\n metric0 = F.l1_loss(img0, backwarp(img1, flow01), reduction='none').mean([1], True)\n metric1 = F.l1_loss(img1, backwarp(img0, flow10), reduction='none').mean([1], True)\n\n fwd_occ, bwd_occ = forward_backward_consistency_check(flow01, flow10)\n\n flow01 = torch.cat([flow01[:, 0:1, :, :] / ((flow01.shape[3] - 1.0) / 2.0), flow01[:, 1:2, :, :] / ((flow01.shape[2] - 1.0) / 2.0)], 1)\n flow10 = torch.cat([flow10[:, 0:1, :, :] / ((flow10.shape[3] - 1.0) / 2.0), flow10[:, 1:2, :, :] / ((flow10.shape[2] - 1.0) / 2.0)], 1)\n \n img = torch.cat((img0, img1), 1)\n metric = torch.cat((-metric0, -metric1), 1)\n flow = torch.cat((flow01, flow10), 1)\n occ = torch.cat((fwd_occ.unsqueeze(1), bwd_occ.unsqueeze(1)), 1)\n\n feat = self.metric_in(torch.cat((img, metric, flow, occ), 1))\n feat = self.metric_net1(feat) + feat\n feat = self.metric_net2(feat) + feat\n feat = self.metric_net3(feat) + feat\n metric = self.metric_out(feat) * 10\n\n return metric[:, :1], metric[:, 1:2]" }, { "identifier": "FeatureNet", "path": "models/model_pg104/FeatureNet.py", "snippet": "class FeatureNet(nn.Module):\n \"\"\"The quadratic model\"\"\"\n def __init__(self):\n super(FeatureNet, self).__init__()\n self.block1 = nn.Sequential(\n nn.PReLU(),\n nn.Conv2d(3, 64, 3, 2, 1),\n nn.PReLU(),\n nn.Conv2d(64, 64, 3, 1, 1),\n )\n self.block2 = nn.Sequential(\n nn.PReLU(),\n nn.Conv2d(64, 128, 3, 2, 1),\n nn.PReLU(),\n nn.Conv2d(128, 128, 3, 1, 1),\n )\n self.block3 = nn.Sequential(\n nn.PReLU(),\n nn.Conv2d(128, 192, 3, 2, 1),\n nn.PReLU(),\n nn.Conv2d(192, 192, 3, 1, 1),\n )\n\n def forward(self, x):\n x1 = self.block1(x)\n x2 = self.block2(x1)\n x3 = self.block3(x2)\n\n return x1, x2, x3" }, { "identifier": "GridNet", "path": "models/model_pg104/FusionNet.py", "snippet": "class GridNet(nn.Module):\n def __init__(self, in_channels, in_channels1, in_channels2, in_channels3, out_channels):\n super(GridNet, self).__init__()\n\n self.residual_model_head0 = ResidualBlock(in_channels, 64)\n self.residual_model_head1 = ResidualBlock(in_channels1, 64)\n self.residual_model_head2 = ResidualBlock(in_channels2, 128)\n self.residual_model_head3 = ResidualBlock(in_channels3, 192)\n\n self.residual_model_01=ResidualBlock(64, 64)\n #self.residual_model_02=ResidualBlock(64, 64)\n #self.residual_model_03=ResidualBlock(64, 64)\n self.residual_model_04=ResidualBlock(64, 64)\n self.residual_model_05=ResidualBlock(64, 64)\n self.residual_model_tail=PixelShuffleBlcok(64, 64, out_channels)\n\n\n self.residual_model_11=ResidualBlock(128, 128)\n #self.residual_model_12=ResidualBlock(128, 128)\n #self.residual_model_13=ResidualBlock(128, 128)\n self.residual_model_14=ResidualBlock(128, 128)\n self.residual_model_15=ResidualBlock(128, 128)\n\n self.residual_model_21=ResidualBlock(192, 192)\n #self.residual_model_22=ResidualBlock(192, 192)\n #self.residual_model_23=ResidualBlock(192, 192)\n self.residual_model_24=ResidualBlock(192, 192)\n self.residual_model_25=ResidualBlock(192, 192)\n\n #\n\n self.downsample_model_10=DownsampleBlock(64, 128)\n self.downsample_model_20=DownsampleBlock(128, 192)\n\n self.downsample_model_11=DownsampleBlock(64, 128)\n self.downsample_model_21=DownsampleBlock(128, 192)\n\n #self.downsample_model_12=DownsampleBlock(64, 128)\n #self.downsample_model_22=DownsampleBlock(128, 192)\n\n #\n\n #self.upsample_model_03=UpsampleBlock(128, 64)\n #self.upsample_model_13=UpsampleBlock(192, 128)\n\n self.upsample_model_04=UpsampleBlock(128, 64)\n self.upsample_model_14=UpsampleBlock(192, 128)\n\n self.upsample_model_05=UpsampleBlock(128, 64)\n self.upsample_model_15=UpsampleBlock(192, 128)\n\n def forward(self, x, x1, x2, x3):\n X00=self.residual_model_head0(x) + self.residual_model_head1(x1) #--- 182 ~ 185\n # X10 = self.residual_model_head1(x1)\n \n X01=self.residual_model_01(X00) + X00#--- 208 ~ 211 ,AddBackward1213\n\n X10=self.downsample_model_10(X00) + self.residual_model_head2(x2) #--- 186 ~ 189\n X20=self.downsample_model_20(X10) + self.residual_model_head3(x3) #--- 190 ~ 193\n\n residual_11=self.residual_model_11(X10) + X10 #201 ~ 204 , sum AddBackward1206 \n downsample_11=self.downsample_model_11(X01) #214 ~ 217\n X11=residual_11 + downsample_11 #--- AddBackward1218 \n\n residual_21=self.residual_model_21(X20) + X20 #194 ~ 197 , sum AddBackward1199\n downsample_21=self.downsample_model_21(X11) #219 ~ 222\n X21=residual_21 + downsample_21 # AddBackward1223\n\n \n X24=self.residual_model_24(X21) + X21 #--- 224 ~ 227 , AddBackward1229 \n X25=self.residual_model_25(X24) + X24 #--- 230 ~ 233 , AddBackward1235 \n\n\n upsample_14=self.upsample_model_14(X24) #242 ~ 246\n residual_14=self.residual_model_14(X11) + X11 #248 ~ 251, AddBackward1253\n X14=upsample_14 + residual_14 #--- AddBackward1254 \n\n upsample_04=self.upsample_model_04(X14) #268 ~ 272\n residual_04=self.residual_model_04(X01) + X01 #274 ~ 277, AddBackward1279\n X04=upsample_04 + residual_04 #--- AddBackward1280 \n\n upsample_15=self.upsample_model_15(X25) #236 ~ 240\n residual_15=self.residual_model_15(X14) + X14 #255 ~ 258, AddBackward1260\n X15=upsample_15 + residual_15 # AddBackward1261\n\n upsample_05=self.upsample_model_05(X15) # 262 ~ 266\n residual_05=self.residual_model_05(X04) + X04 #281 ~ 284,AddBackward1286\n X05=upsample_05 + residual_05 # AddBackward1287\n\n X_tail=self.residual_model_tail(X05) #288 ~ 291\n\n return X_tail" }, { "identifier": "softsplat", "path": "models/model_pg104/softsplat.py", "snippet": "def softsplat(tenIn:torch.Tensor, tenFlow:torch.Tensor, tenMetric:torch.Tensor, strMode:str):\n\n def float(x):\n return x.float()\n\n tenIn, tenFlow, tenMetric = map(float, [tenIn, tenFlow, tenMetric])\n\n assert(strMode.split('-')[0] in ['sum', 'avg', 'linear', 'soft'])\n\n if strMode == 'sum': assert(tenMetric is None)\n if strMode == 'avg': assert(tenMetric is None)\n if strMode.split('-')[0] == 'linear': assert(tenMetric is not None)\n if strMode.split('-')[0] == 'soft': assert(tenMetric is not None)\n\n if strMode == 'avg':\n tenIn = torch.cat([tenIn, tenIn.new_ones([tenIn.shape[0], 1, tenIn.shape[2], tenIn.shape[3]])], 1)\n\n elif strMode.split('-')[0] == 'linear':\n tenIn = torch.cat([tenIn * tenMetric, tenMetric], 1)\n\n elif strMode.split('-')[0] == 'soft':\n tenIn = torch.cat([tenIn * tenMetric.exp(), tenMetric.exp()], 1)\n\n # end\n\n tenOut = softsplat_func.apply(tenIn, tenFlow)\n\n if strMode.split('-')[0] in ['avg', 'linear', 'soft']:\n tenNormalize = tenOut[:, -1:, :, :]\n\n if len(strMode.split('-')) == 1:\n tenNormalize = tenNormalize + 0.0000001\n\n elif strMode.split('-')[1] == 'addeps':\n tenNormalize = tenNormalize + 0.0000001\n\n elif strMode.split('-')[1] == 'zeroeps':\n tenNormalize[tenNormalize == 0.0] = 1.0\n\n elif strMode.split('-')[1] == 'clipeps':\n tenNormalize = tenNormalize.clip(0.0000001, None)\n\n # end\n\n tenOut = tenOut[:, :-1, :, :] / tenNormalize\n # end\n\n return tenOut.half()" } ]
import torch import torch.nn.functional as F from models.gmflow.gmflow import GMFlow from models.model_pg104.IFNet_HDv3 import IFNet from models.model_pg104.MetricNet import MetricNet from models.model_pg104.FeatureNet import FeatureNet from models.model_pg104.FusionNet import GridNet from models.model_pg104.softsplat import softsplat as warp
5,500
device = torch.device("cuda") class Model: def __init__(self): self.flownet = GMFlow().half() self.ifnet = IFNet().half() self.metricnet = MetricNet().half() self.feat_ext = FeatureNet().half()
device = torch.device("cuda") class Model: def __init__(self): self.flownet = GMFlow().half() self.ifnet = IFNet().half() self.metricnet = MetricNet().half() self.feat_ext = FeatureNet().half()
self.fusionnet = GridNet(9, 64 * 2, 128 * 2, 192 * 2, 3).half()
4
2023-10-29 18:25:36+00:00
8k
tencent-ailab/PCDMs
caculate_metrics_256.py
[ { "identifier": "FID", "path": "metrics.py", "snippet": "class FID():\n \"\"\"docstring for FID\n Calculates the Frechet Inception Distance (FID) to evalulate GANs\n The FID metric calculates the distance between two distributions of images.\n Typically, we have summary statistics (mean & covariance matrix) of one\n of these distributions, while the 2nd distribution is given by a GAN.\n When run as a stand-alone program, it compares the distribution of\n images that are stored as PNG/JPEG at a specified location with a\n distribution given by summary statistics (in pickle format).\n The FID is calculated by assuming that X_1 and X_2 are the activations of\n the pool_3 layer of the inception net for generated samples and real world\n samples respectivly.\n See --help to see further details.\n Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead\n of Tensorflow\n Copyright 2018 Institute of Bioinformatics, JKU Linz\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n \"\"\"\n def __init__(self):\n self.dims = 2048\n self.batch_size = 128\n self.cuda = True\n self.verbose=False\n\n block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[self.dims]\n self.model = InceptionV3([block_idx])\n if self.cuda:\n # TODO: put model into specific GPU\n self.model.cuda()\n\n def __call__(self, images, gt_path):\n \"\"\" images: list of the generated image. The values must lie between 0 and 1.\n gt_path: the path of the ground truth images. The values must lie between 0 and 1.\n \"\"\"\n if not os.path.exists(gt_path):\n raise RuntimeError('Invalid path: %s' % gt_path)\n\n\n print('calculate gt_path statistics...')\n m1, s1 = self.compute_statistics_of_path(gt_path, self.verbose)\n print('calculate generated_images statistics...')\n m2, s2 = self.calculate_activation_statistics(images, self.verbose)\n fid_value = self.calculate_frechet_distance(m1, s1, m2, s2)\n return fid_value\n\n\n def calculate_from_disk(self, generated_path, gt_path, img_size):\n \"\"\" \n \"\"\"\n if not os.path.exists(gt_path):\n raise RuntimeError('Invalid path: %s' % gt_path)\n if not os.path.exists(generated_path):\n raise RuntimeError('Invalid path: %s' % generated_path)\n\n print ('exp-path - '+generated_path)\n\n print('calculate gt_path statistics...')\n m1, s1 = self.compute_statistics_of_path(gt_path, self.verbose, img_size)\n print('calculate generated_path statistics...')\n m2, s2 = self.compute_statistics_of_path(generated_path, self.verbose, img_size)\n print('calculate frechet distance...')\n fid_value = self.calculate_frechet_distance(m1, s1, m2, s2)\n print('fid_distance %f' % (fid_value))\n return fid_value \n\n\n def compute_statistics_of_path(self, path , verbose, img_size):\n\n size_flag = '{}_{}'.format(img_size[0], img_size[1])\n npz_file = os.path.join(path, size_flag + '_statistics.npz')\n if os.path.exists(npz_file):\n f = np.load(npz_file)\n m, s = f['mu'][:], f['sigma'][:]\n f.close()\n\n else:\n\n path = pathlib.Path(path)\n files = list(path.glob('*.jpg')) + list(path.glob('*.png'))\n\n imgs = (np.array([(cv2.resize(imread(str(fn)).astype(np.float32),img_size,interpolation=cv2.INTER_CUBIC)) for fn in files]))/255.0\n # Bring images to shape (B, 3, H, W)\n imgs = imgs.transpose((0, 3, 1, 2))\n\n # Rescale images to be between 0 and 1\n\n\n m, s = self.calculate_activation_statistics(imgs, verbose)\n np.savez(npz_file, mu=m, sigma=s)\n\n return m, s \n\n def calculate_activation_statistics(self, images, verbose):\n \"\"\"Calculation of the statistics used by the FID.\n Params:\n -- images : Numpy array of dimension (n_images, 3, hi, wi). The values\n must lie between 0 and 1.\n -- model : Instance of inception model\n -- batch_size : The images numpy array is split into batches with\n batch size batch_size. A reasonable batch size\n depends on the hardware.\n -- dims : Dimensionality of features returned by Inception\n -- cuda : If set to True, use GPU\n -- verbose : If set to True and parameter out_step is given, the\n number of calculated batches is reported.\n Returns:\n -- mu : The mean over samples of the activations of the pool_3 layer of\n the inception model.\n -- sigma : The covariance matrix of the activations of the pool_3 layer of\n the inception model.\n \"\"\"\n act = self.get_activations(images, verbose)\n mu = np.mean(act, axis=0)\n sigma = np.cov(act, rowvar=False)\n return mu, sigma \n\n\n\n def get_activations(self, images, verbose=False):\n \"\"\"Calculates the activations of the pool_3 layer for all images.\n Params:\n -- images : Numpy array of dimension (n_images, 3, hi, wi). The values\n must lie between 0 and 1.\n -- model : Instance of inception model\n -- batch_size : the images numpy array is split into batches with\n batch size batch_size. A reasonable batch size depends\n on the hardware.\n -- dims : Dimensionality of features returned by Inception\n -- cuda : If set to True, use GPU\n -- verbose : If set to True and parameter out_step is given, the number\n of calculated batches is reported.\n Returns:\n -- A numpy array of dimension (num images, dims) that contains the\n activations of the given tensor when feeding inception with the\n query tensor.\n \"\"\"\n self.model.eval()\n\n d0 = images.shape[0]\n if self.batch_size > d0:\n print(('Warning: batch size is bigger than the data size. '\n 'Setting batch size to data size'))\n self.batch_size = d0\n\n n_batches = d0 // self.batch_size\n n_used_imgs = n_batches * self.batch_size\n\n pred_arr = np.empty((n_used_imgs, self.dims))\n for i in range(n_batches):\n if verbose:\n print('\\rPropagating batch %d/%d' % (i + 1, n_batches))\n # end='', flush=True)\n start = i * self.batch_size\n end = start + self.batch_size\n\n batch = torch.from_numpy(images[start:end]).type(torch.FloatTensor)\n # batch = Variable(batch, volatile=True)\n if self.cuda:\n batch = batch.cuda()\n\n pred = self.model(batch)[0]\n\n # If model output is not scalar, apply global spatial average pooling.\n # This happens if you choose a dimensionality not equal 2048.\n if pred.shape[2] != 1 or pred.shape[3] != 1:\n pred = adaptive_avg_pool2d(pred, output_size=(1, 1))\n\n pred_arr[start:end] = pred.cpu().data.numpy().reshape(self.batch_size, -1)\n\n if verbose:\n print(' done')\n\n return pred_arr\n\n\n def calculate_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6):\n \"\"\"Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n Stable version by Dougal J. Sutherland.\n Params:\n -- mu1 : Numpy array containing the activations of a layer of the\n inception net (like returned by the function 'get_predictions')\n for generated samples.\n -- mu2 : The sample mean over activations, precalculated on an \n representive data set.\n -- sigma1: The covariance matrix over activations for generated samples.\n -- sigma2: The covariance matrix over activations, precalculated on an \n representive data set.\n Returns:\n -- : The Frechet Distance.\n \"\"\"\n\n mu1 = np.atleast_1d(mu1)\n mu2 = np.atleast_1d(mu2)\n\n sigma1 = np.atleast_2d(sigma1)\n sigma2 = np.atleast_2d(sigma2)\n\n assert mu1.shape == mu2.shape, \\\n 'Training and test mean vectors have different lengths'\n assert sigma1.shape == sigma2.shape, \\\n 'Training and test covariances have different dimensions'\n\n diff = mu1 - mu2\n\n # Product might be almost singular\n covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)\n if not np.isfinite(covmean).all():\n msg = ('fid calculation produces singular product; '\n 'adding %s to diagonal of cov estimates') % eps\n print(msg)\n offset = np.eye(sigma1.shape[0]) * eps\n covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n raise ValueError('Imaginary component {}'.format(m))\n covmean = covmean.real\n\n tr_covmean = np.trace(covmean)\n\n return (diff.dot(diff) + np.trace(sigma1) +\n np.trace(sigma2) - 2 * tr_covmean)" }, { "identifier": "LPIPS", "path": "metrics.py", "snippet": "class LPIPS():\n def __init__(self, use_gpu=True):\n\n self.model = lpips.LPIPS(net='alex').eval().cuda()\n self.use_gpu=use_gpu\n\n def __call__(self, image_1, image_2):\n \"\"\"\n image_1: images with size (n, 3, w, h) with value [-1, 1]\n image_2: images with size (n, 3, w, h) with value [-1, 1]\n \"\"\"\n result = self.model.forward(image_1, image_2)\n return result\n\n def calculate_from_disk(self, path_1, path_2,img_size, batch_size=64, verbose=False, sort=True):\n\n if sort:\n files_1 = sorted(get_image_list(path_1))\n files_2 = sorted(get_image_list(path_2))\n else:\n files_1 = get_image_list(path_1)\n files_2 = get_image_list(path_2)\n\n\n results=[]\n\n\n d0 = len(files_1)\n if batch_size > d0:\n print(('Warning: batch size is bigger than the data size. '\n 'Setting batch size to data size'))\n batch_size = d0\n\n n_batches = d0 // batch_size\n\n\n for i in range(n_batches):\n if verbose:\n print('\\rPropagating batch %d/%d' % (i + 1, n_batches))\n # end='', flush=True)\n start = i * batch_size\n end = start + batch_size\n\n imgs_1 = np.array([cv2.resize(imread(str(fn)).astype(np.float32),img_size,interpolation=cv2.INTER_CUBIC)/255.0 for fn in files_1[start:end]])\n imgs_2 = np.array([cv2.resize(imread(str(fn)).astype(np.float32),img_size,interpolation=cv2.INTER_CUBIC)/255.0 for fn in files_2[start:end]])\n\n imgs_1 = imgs_1.transpose((0, 3, 1, 2))\n imgs_2 = imgs_2.transpose((0, 3, 1, 2))\n\n img_1_batch = torch.from_numpy(imgs_1).type(torch.FloatTensor)\n img_2_batch = torch.from_numpy(imgs_2).type(torch.FloatTensor)\n\n if self.use_gpu:\n img_1_batch = img_1_batch.cuda()\n img_2_batch = img_2_batch.cuda()\n\n with torch.no_grad():\n result = self.model.forward(img_1_batch, img_2_batch)\n\n results.append(result)\n\n\n distance = torch.cat(results,0)[:,0,0,0].mean()\n\n print('lpips: %.3f'%distance)\n return distance" }, { "identifier": "Reconstruction_Metrics", "path": "metrics.py", "snippet": "class Reconstruction_Metrics():\n def __init__(self, metric_list=['ssim', 'psnr', 'l1', 'mae'], data_range=1, win_size=51, multichannel=True):\n self.data_range = data_range\n self.win_size = win_size\n self.multichannel = multichannel\n for metric in metric_list:\n if metric in ['ssim', 'psnr', 'l1', 'mae']:\n setattr(self, metric, True)\n else:\n print('unsupport reconstruction metric: %s'%metric)\n\n\n def __call__(self, inputs, gts):\n \"\"\"\n inputs: the generated image, size (b,c,w,h), data range(0, data_range)\n gts: the ground-truth image, size (b,c,w,h), data range(0, data_range)\n \"\"\"\n result = dict() \n [b,n,w,h] = inputs.size()\n inputs = inputs.view(b*n, w, h).detach().cpu().numpy().astype(np.float32).transpose(1,2,0)\n gts = gts.view(b*n, w, h).detach().cpu().numpy().astype(np.float32).transpose(1,2,0)\n\n if hasattr(self, 'ssim'):\n ssim_value = compare_ssim(inputs, gts, data_range=self.data_range, \n win_size=self.win_size, multichannel=self.multichannel) \n result['ssim'] = ssim_value\n\n\n if hasattr(self, 'psnr'):\n psnr_value = compare_psnr(inputs, gts, self.data_range)\n result['psnr'] = psnr_value\n\n if hasattr(self, 'l1'):\n l1_value = compare_l1(inputs, gts)\n result['l1'] = l1_value \n\n if hasattr(self, 'mae'):\n mae_value = compare_mae(inputs, gts)\n result['mae'] = mae_value \n return result\n\n\n def calculate_from_disk(self, inputs, gts, save_path=None, img_size=(176,256), sort=True, debug=0):\n \"\"\"\n inputs: .txt files, floders, image files (string), image files (list)\n gts: .txt files, floders, image files (string), image files (list)\n \"\"\"\n if sort:\n input_image_list = sorted(get_image_list(inputs))\n gt_image_list = sorted(get_image_list(gts))\n else:\n input_image_list = get_image_list(inputs)\n gt_image_list = get_image_list(gts)\n\n size_flag = '{}_{}'.format(img_size[0], img_size[1])\n npz_file = os.path.join(save_path, size_flag + '_metrics.npz')\n if os.path.exists(npz_file):\n f = np.load(npz_file)\n psnr,ssim,ssim_256,mae,l1=f['psnr'],f['ssim'],f['ssim_256'],f['mae'],f['l1']\n else:\n psnr = []\n ssim = []\n ssim_256 = []\n mae = []\n l1 = []\n names = []\n\n for index in range(len(input_image_list)):\n name = os.path.basename(input_image_list[index])\n names.append(name)\n\n\n img_gt = (cv2.resize(imread(str(gt_image_list[index])).astype(np.float32), img_size,interpolation=cv2.INTER_CUBIC)) /255.0\n img_pred = (cv2.resize(imread(str(input_image_list[index])).astype(np.float32), img_size,interpolation=cv2.INTER_CUBIC)) / 255.0\n\n\n if debug != 0:\n plt.subplot('121')\n plt.imshow(img_gt)\n plt.title('Groud truth')\n plt.subplot('122')\n plt.imshow(img_pred)\n plt.title('Output')\n plt.show()\n\n psnr.append(compare_psnr(img_gt, img_pred, data_range=self.data_range))\n ssim.append(compare_ssim(img_gt, img_pred, data_range=self.data_range,\n win_size=self.win_size,multichannel=self.multichannel, channel_axis=2))\n mae.append(compare_mae(img_gt, img_pred))\n l1.append(compare_l1(img_gt, img_pred))\n\n img_gt_256 = img_gt*255.0\n img_pred_256 = img_pred*255.0\n ssim_256.append(compare_ssim(img_gt_256, img_pred_256, gaussian_weights=True, sigma=1.2,\n use_sample_covariance=False, multichannel=True, channel_axis=2,\n data_range=img_pred_256.max() - img_pred_256.min()))\n\n if np.mod(index, 200) == 0:\n print(\n str(index) + ' images processed',\n \"PSNR: %.4f\" % round(np.mean(psnr), 4),\n \"SSIM_256: %.4f\" % round(np.mean(ssim_256), 4),\n \"MAE: %.4f\" % round(np.mean(mae), 4),\n \"l1: %.4f\" % round(np.mean(l1), 4),\n )\n \n if save_path:\n np.savez(save_path + '/' + size_flag + '_metrics.npz', psnr=psnr, ssim=ssim, ssim_256=ssim_256, mae=mae, l1=l1, names=names)\n\n print(\n \"PSNR: %.4f\" % round(np.mean(psnr), 4),\n \"PSNR Variance: %.4f\" % round(np.var(psnr), 4),\n \"SSIM_256: %.4f\" % round(np.mean(ssim_256), 4),\n \"SSIM_256 Variance: %.4f\" % round(np.var(ssim_256), 4), \n \"MAE: %.4f\" % round(np.mean(mae), 4),\n \"MAE Variance: %.4f\" % round(np.var(mae), 4),\n \"l1: %.4f\" % round(np.mean(l1), 4),\n \"l1 Variance: %.4f\" % round(np.var(l1), 4) \n ) \n\n dic = {\"psnr\":[round(np.mean(psnr), 6)],\n \"psnr_variance\": [round(np.var(psnr), 6)],\n \"ssim_256\": [round(np.mean(ssim_256), 6)],\n \"ssim_256_variance\": [round(np.var(ssim_256), 6)],\n \"mae\": [round(np.mean(mae), 6)],\n \"mae_variance\": [round(np.var(mae), 6)],\n \"l1\": [round(np.mean(l1), 6)],\n \"l1_variance\": [round(np.var(l1), 6)] } \n\n return dic" }, { "identifier": "preprocess_path_for_deform_task", "path": "metrics.py", "snippet": "def preprocess_path_for_deform_task(gt_path, distorted_path):\n distorted_image_list = sorted(get_image_list(distorted_path))\n gt_list=[]\n distorated_list=[]\n\n for distorted_image in distorted_image_list:\n image = os.path.basename(distorted_image)[1:]\n image = image.split('_to_')[-1]\n gt_image = gt_path + '/' + image.replace('jpg', 'png')\n if not os.path.isfile(gt_image):\n print(distorted_image, gt_image)\n print('=====')\n continue\n gt_list.append(gt_image)\n distorated_list.append(distorted_image) \n\n return gt_list, distorated_list" } ]
from metrics import FID, LPIPS, Reconstruction_Metrics, preprocess_path_for_deform_task import torch
5,253
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") fid = FID() lpips_obj = LPIPS() rec = Reconstruction_Metrics() real_path = './datasets/deepfashing/train_lst_256_png' gt_path = '/datasets/deepfashing/test_lst_256_png' distorated_path = './PCDMs_Results/stage3_256_results' results_save_path = distorated_path + '_results.txt' # save path
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") fid = FID() lpips_obj = LPIPS() rec = Reconstruction_Metrics() real_path = './datasets/deepfashing/train_lst_256_png' gt_path = '/datasets/deepfashing/test_lst_256_png' distorated_path = './PCDMs_Results/stage3_256_results' results_save_path = distorated_path + '_results.txt' # save path
gt_list, distorated_list = preprocess_path_for_deform_task(gt_path, distorated_path)
3
2023-10-26 13:30:44+00:00
8k
Kiteretsu77/VCISR-official
train_code/train_master.py
[ { "identifier": "GANLoss", "path": "loss/gan_loss.py", "snippet": "class GANLoss(nn.Module):\n \"\"\"Define GAN loss.\n From Real-ESRGAN code\n Args:\n gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.\n real_label_val (float): The value for real label. Default: 1.0.\n fake_label_val (float): The value for fake label. Default: 0.0.\n loss_weight (float): Loss weight. Default: 1.0.\n Note that loss_weight is only for generators; and it is always 1.0\n for discriminators.\n \"\"\"\n\n def __init__(self, real_label_val=1.0, fake_label_val=0.0, loss_weight=1.0):\n super(GANLoss, self).__init__()\n self.loss_weight = loss_weight\n self.real_label_val = real_label_val\n self.fake_label_val = fake_label_val\n\n # gan type 就是vallina\n self.loss = nn.BCEWithLogitsLoss()\n\n\n def get_target_label(self, input, target_is_real):\n \"\"\"Get target label.\n\n Args:\n input (Tensor): Input tensor.\n target_is_real (bool): Whether the target is real or fake.\n\n Returns:\n (bool | Tensor): Target tensor. Return bool for wgan, otherwise,\n return Tensor.\n \"\"\"\n\n\n target_val = (self.real_label_val if target_is_real else self.fake_label_val)\n return input.new_ones(input.size()) * target_val\n\n def forward(self, input, target_is_real, is_disc=False):\n \"\"\"\n Args:\n input (Tensor): The input for the loss module, i.e., the network\n prediction.\n target_is_real (bool): Whether the targe is real or fake.\n is_disc (bool): Whether the loss for discriminators or not.\n Default: False.\n\n Returns:\n Tensor: GAN loss value.\n \"\"\"\n target_label = self.get_target_label(input, target_is_real)\n\n loss = self.loss(input, target_label)\n\n # loss_weight is always 1.0 for discriminators\n return loss if is_disc else loss * self.loss_weight" }, { "identifier": "PixelLoss", "path": "loss/pixel_loss.py", "snippet": "class PixelLoss(nn.Module):\n def __init__(self) -> None:\n super(PixelLoss, self).__init__()\n\n self.criterion = torch.nn.L1Loss().cuda() # its default will take the mean of this batch\n\n def forward(self, gen_hr, org_hr, batch_idx):\n\n # Calculate general PSNR\n pixel_loss = self.criterion(gen_hr, org_hr)\n\n return pixel_loss" }, { "identifier": "L1_Charbonnier_loss", "path": "loss/pixel_loss.py", "snippet": "class L1_Charbonnier_loss(nn.Module):\n \"\"\"L1 Charbonnierloss.\"\"\"\n def __init__(self):\n super(L1_Charbonnier_loss, self).__init__()\n self.eps = 1e-6 # already use square root\n\n def forward(self, X, Y, batch_idx):\n diff = torch.add(X, -Y)\n error = torch.sqrt(diff * diff + self.eps)\n loss = torch.mean(error)\n return loss" }, { "identifier": "MS_SSIM_L1_LOSS", "path": "loss/pixel_loss.py", "snippet": "class MS_SSIM_L1_LOSS(nn.Module):\n # Have to use cuda, otherwise the speed is too slow.\n def __init__(self, alpha,\n gaussian_sigmas=[0.5, 1.0, 2.0, 4.0, 8.0],\n data_range = 1.0,\n K=(0.01, 0.4),\n compensation=1.0,\n cuda_dev=0,):\n super(MS_SSIM_L1_LOSS, self).__init__()\n self.DR = data_range\n self.C1 = (K[0] * data_range) ** 2\n self.C2 = (K[1] * data_range) ** 2\n self.pad = int(2 * gaussian_sigmas[-1])\n self.alpha = alpha\n self.compensation=compensation\n filter_size = int(4 * gaussian_sigmas[-1] + 1)\n g_masks = torch.zeros((3*len(gaussian_sigmas), 1, filter_size, filter_size))\n for idx, sigma in enumerate(gaussian_sigmas):\n # r0,g0,b0,r1,g1,b1,...,rM,gM,bM\n g_masks[3*idx+0, 0, :, :] = self._fspecial_gauss_2d(filter_size, sigma)\n g_masks[3*idx+1, 0, :, :] = self._fspecial_gauss_2d(filter_size, sigma)\n g_masks[3*idx+2, 0, :, :] = self._fspecial_gauss_2d(filter_size, sigma)\n self.g_masks = g_masks.cuda(cuda_dev)\n\n from torch.utils.tensorboard import SummaryWriter\n self.writer = SummaryWriter() \n\n def _fspecial_gauss_1d(self, size, sigma):\n \"\"\"Create 1-D gauss kernel\n Args:\n size (int): the size of gauss kernel\n sigma (float): sigma of normal distribution\n\n Returns:\n torch.Tensor: 1D kernel (size)\n \"\"\"\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n return g.reshape(-1)\n\n def _fspecial_gauss_2d(self, size, sigma):\n \"\"\"Create 2-D gauss kernel\n Args:\n size (int): the size of gauss kernel\n sigma (float): sigma of normal distribution\n\n Returns:\n torch.Tensor: 2D kernel (size x size)\n \"\"\"\n gaussian_vec = self._fspecial_gauss_1d(size, sigma)\n return torch.outer(gaussian_vec, gaussian_vec)\n\n def forward(self, x, y, batch_idx):\n '''\n Args:\n x (tensor): the input for a tensor\n y (tensor): the input for another tensor\n batch_idx (int): the iteration now\n Returns:\n combined_loss (torch): loss value of L1 with MS-SSIM loss\n '''\n\n # b, c, h, w = x.shape\n mux = F.conv2d(x, self.g_masks, groups=3, padding=self.pad)\n muy = F.conv2d(y, self.g_masks, groups=3, padding=self.pad)\n\n mux2 = mux * mux\n muy2 = muy * muy\n muxy = mux * muy\n\n sigmax2 = F.conv2d(x * x, self.g_masks, groups=3, padding=self.pad) - mux2\n sigmay2 = F.conv2d(y * y, self.g_masks, groups=3, padding=self.pad) - muy2\n sigmaxy = F.conv2d(x * y, self.g_masks, groups=3, padding=self.pad) - muxy\n\n # l(j), cs(j) in MS-SSIM\n l = (2 * muxy + self.C1) / (mux2 + muy2 + self.C1) # [B, 15, H, W]\n cs = (2 * sigmaxy + self.C2) / (sigmax2 + sigmay2 + self.C2)\n\n lM = l[:, -1, :, :] * l[:, -2, :, :] * l[:, -3, :, :]\n PIcs = cs.prod(dim=1)\n\n loss_ms_ssim = 1 - lM*PIcs # [B, H, W]\n\n loss_l1 = F.l1_loss(x, y, reduction='none') # [B, 3, H, W]\n # average l1 loss in 3 channels\n gaussian_l1 = F.conv2d(loss_l1, self.g_masks.narrow(dim=0, start=-3, length=3),\n groups=3, padding=self.pad).mean(1) # [B, H, W]\n\n loss_mix = self.alpha * loss_ms_ssim + (1 - self.alpha) * gaussian_l1 / self.DR\n loss_mix = self.compensation*loss_mix # Currently, we set compensation to 1.0\n\n combined_loss = loss_mix.mean()\n \n self.writer.add_scalar('Loss/ms_ssim_loss-iteration', loss_ms_ssim.mean(), batch_idx)\n self.writer.add_scalar('Loss/l1_loss-iteration', gaussian_l1.mean(), batch_idx)\n\n return combined_loss" }, { "identifier": "PerceptualLoss", "path": "loss/perceptual_loss.py", "snippet": "class PerceptualLoss(nn.Module):\n \"\"\"Perceptual loss with commonly used style loss.\n\n Args:\n layer_weights (dict): The weight for each layer of vgg feature.\n Here is an example: {'conv5_4': 1.}, which means the conv5_4\n feature layer (before relu5_4) will be extracted with weight\n 1.0 in calculating losses.\n vgg_type (str): The type of vgg network used as feature extractor.\n Default: 'vgg19'.\n use_input_norm (bool): If True, normalize the input image in vgg.\n Default: True.\n range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].\n Default: False.\n perceptual_weight (float): If `perceptual_weight > 0`, the perceptual\n loss will be calculated and the loss will multiplied by the\n weight. Default: 1.0.\n style_weight (float): If `style_weight > 0`, the style loss will be\n calculated and the loss will multiplied by the weight.\n Default: 0.\n criterion (str): Criterion used for perceptual loss. Default: 'l1'.\n \"\"\"\n\n def __init__(self,\n layer_weights,\n vgg_type,\n use_input_norm=True,\n range_norm=False,\n perceptual_weight=1.0,\n style_weight=0.,\n criterion='l1'):\n super(PerceptualLoss, self).__init__()\n self.perceptual_weight = perceptual_weight\n self.layer_weights = layer_weights\n self.vgg = VGGFeatureExtractor(\n layer_name_list=list(layer_weights.keys()),\n vgg_type=vgg_type,\n use_input_norm=use_input_norm,\n range_norm=range_norm).cuda()\n\n self.criterion_type = criterion\n self.criterion = torch.nn.L1Loss()\n self.vgg_type = vgg_type\n\n def forward(self, x, gt):\n \"\"\"Forward function.\n\n Args:\n x (Tensor): Input tensor with shape (n, c, h, w).\n gt (Tensor): Ground-truth tensor with shape (n, c, h, w).\n\n Returns:\n Tensor: Forward results.\n \"\"\"\n # extract vgg features\n x_features = self.vgg(x)\n gt_features = self.vgg(gt.detach())\n\n # calculate perceptual loss\n if self.perceptual_weight > 0:\n percep_loss = 0\n for k in x_features.keys():\n\n percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k]\n percep_loss *= self.perceptual_weight\n else:\n percep_loss = None\n\n # style_loss 被省略了,因为style_weight默认为0\n\n return percep_loss\n\n\n def get_params_num(self):\n self.vgg.get_params_num()" }, { "identifier": "ImageDataset", "path": "architecture/dataset.py", "snippet": "class ImageDataset(Dataset):\n @torch.no_grad()\n def __init__(self, train_lr_paths, degrade_hr_paths, train_hr_paths):\n # print(\"low_res path sample is \", train_lr_paths[0])\n # print(train_hr_paths[0])\n # hr_height, hr_width = hr_shape\n self.transform = transforms.Compose(\n [\n transforms.ToTensor(),\n ]\n )\n\n self.files_lr = train_lr_paths\n self.files_degrade_hr = degrade_hr_paths\n self.files_hr = train_hr_paths\n\n assert(len(self.files_lr) == len(self.files_hr))\n assert(len(self.files_lr) == len(self.files_degrade_hr))\n\n\n def augment(self, imgs, hflip=True, rotation=True):\n \"\"\"Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).\n\n All the images in the list use the same augmentation.\n\n Args:\n imgs (list[ndarray] | ndarray): Images to be augmented. If the input\n is an ndarray, it will be transformed to a list.\n hflip (bool): Horizontal flip. Default: True.\n rotation (bool): Rotation. Default: True.\n\n Returns:\n imgs (list[ndarray] | ndarray): Augmented images and flows. If returned\n results only have one element, just return ndarray.\n\n \"\"\"\n hflip = hflip and random.random() < 0.5\n vflip = rotation and random.random() < 0.5\n rot90 = rotation and random.random() < 0.5\n\n def _augment(img):\n if hflip: # horizontal\n cv2.flip(img, 1, img)\n if vflip: # vertical\n cv2.flip(img, 0, img)\n if rot90:\n img = img.transpose(1, 0, 2)\n return img\n\n\n if not isinstance(imgs, list):\n imgs = [imgs]\n \n imgs = [_augment(img) for img in imgs]\n if len(imgs) == 1:\n imgs = imgs[0]\n\n\n return imgs\n \n\n def __getitem__(self, index):\n \n # Read File\n img_lr = cv2.imread(self.files_lr[index % len(self.files_lr)]) \n img_degrade_hr = cv2.imread(self.files_degrade_hr[index % len(self.files_degrade_hr)]) \n img_hr = cv2.imread(self.files_hr[index % len(self.files_hr)])\n\n # Augmentation\n if random.random() < opt[\"augment_prob\"]:\n img_lr, img_degrade_hr, img_hr = self.augment([img_lr, img_degrade_hr, img_hr])\n \n # Transform to Tensor\n img_lr = self.transform(img_lr)\n img_degrade_hr = self.transform(img_degrade_hr)\n img_hr = self.transform(img_hr) # ToTensor() \n\n\n return {\"lr\": img_lr, \"degrade_hr\": img_degrade_hr, \"hr\": img_hr}\n \n def __len__(self):\n assert(len(self.files_hr) == len(self.files_lr))\n return len(self.files_hr)" }, { "identifier": "generate_low_res_esr", "path": "scripts/generate_lr_esr.py", "snippet": "@torch.no_grad()\ndef generate_low_res_esr(org_opt, verbose=False):\n ''' Generate LR dataset from HR ones by ESR degradation\n Args:\n org_opt (dict): The setting we will use\n verbose (bool): Whether we print out some information\n '''\n\n # Prepare folders\n input_folder = org_opt['input_folder']\n save_folder = org_opt['save_folder']\n if osp.exists(save_folder):\n shutil.rmtree(save_folder)\n os.makedirs(save_folder)\n\n # Scan all images\n input_img_lists, output_img_lists = [], []\n for file in sorted(os.listdir(input_folder)): \n input_img_lists.append(osp.join(input_folder, file))\n output_img_lists.append(osp.join(save_folder, file))\n \n\n # Setting\n batch_size = org_opt[\"degradation_batch_size\"] \n img_length = len(input_img_lists)\n\n obj_img = degradation_v1()\n obj_vc = degradation_v2()\n \n\n # Remove log file\n if os.path.exists(\"datasets/degradation_log.txt\"):\n os.remove(\"datasets/degradation_log.txt\")\n\n\n # Extract image to torch batch\n iter_lists = []\n first_iter_length = min(random.randint(batch_size // 4, batch_size-1), img_length)\n iter_lists.append(first_iter_length)\n\n middle_batches_num = (img_length - first_iter_length) // batch_size\n for _ in range(middle_batches_num):\n iter_lists.append(batch_size)\n\n last_iter_length = img_length - first_iter_length - middle_batches_num * batch_size\n if last_iter_length == 0:\n total_range = middle_batches_num + 1\n else:\n total_range = middle_batches_num + 2\n iter_lists.append(last_iter_length)\n \n assert(sum(iter_lists) == len(input_img_lists))\n \n \n \n\n # Iterate all batches\n for batch_idx in tqdm(range(0, total_range), desc=\"Degradation\"):\n # Make a copy of the org_opt hyperparameter\n opt = copy.deepcopy(org_opt)\n\n\n # Reset kernels in every degradation batch for ESR\n obj_img.reset_kernels(opt)\n obj_vc.reset_kernels(opt)\n\n # Find the needed img lists\n iter_length = iter_lists.pop(0)\n needed_img_lists = []\n store_img_lists = []\n for _ in range(iter_length):\n needed_img_lists.append(input_img_lists.pop(0))\n store_img_lists.append(output_img_lists.pop(0))\n\n\n # Read all images and transform them to tensor\n out = None\n for idx in range(len(needed_img_lists)):\n\n input_path = needed_img_lists[idx]\n\n img_bgr = cv2.imread(input_path)\n\n\n if out is None:\n out = np2tensor(img_bgr) # tensor\n else:\n out = torch.cat((out, np2tensor(img_bgr)), 0)\n try:\n _, _, _, _ = out.size()\n except Exception:\n print(batch_idx, first_iter_length, last_iter_length, total_range)\n print(out)\n os._exit(0)\n \n \n\n if opt['degradation_mode'] == 'V1':\n # ESR V1 execuation\n obj_img.degradate_process(out, opt, store_img_lists, verbose = False, use_shuffled=False)\n elif opt['degradation_mode'] == 'V2':\n if random.random() < opt['v1_proportion']:\n # V1 skip mode\n obj_img.degradate_process(out, opt, store_img_lists, verbose = False, use_shuffled=False)\n else:\n obj_vc.degradate_process(out, opt, store_img_lists, verbose = False, use_shuffled=False)\n else:\n raise NotImplementedError\n\n # I think that we need to clean memory here\n del out\n gc.collect()\n if batch_idx != 0 and batch_idx%4 == 0:\n # empty the torch cache at certain iteration of each epoch\n torch.cuda.empty_cache()\n\n assert(len(input_img_lists) == 0)" } ]
import os, sys import torch import glob import time, shutil import math import gc from tqdm import tqdm from collections import defaultdict from torch.multiprocessing import Pool, Process, set_start_method from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader from loss.gan_loss import GANLoss from loss.pixel_loss import PixelLoss, L1_Charbonnier_loss, MS_SSIM_L1_LOSS from loss.perceptual_loss import PerceptualLoss from architecture.dataset import ImageDataset from scripts.generate_lr_esr import generate_low_res_esr
5,504
# -*- coding: utf-8 -*- # torch module import try: set_start_method('spawn') except RuntimeError: pass # import files from local folder root_path = os.path.abspath('.') sys.path.append(root_path) # Mixed precision training scaler = torch.cuda.amp.GradScaler() class train_master(object): def __init__(self, options, args, model_name, has_discriminator=False) -> None: # General specs setup self.args = args self.model_name = model_name self.options = options self.has_discriminator = has_discriminator self.loss_init() # Generator self.call_model() # generator + discriminator... # Optimizer self.learning_rate = options['start_learning_rate'] self.optimizer_g = torch.optim.Adam(self.generator.parameters(), lr=self.learning_rate, betas=(options["adam_beta1"], options["adam_beta2"])) if self.has_discriminator: self.optimizer_d = torch.optim.Adam(self.discriminator.parameters(), lr=self.learning_rate, betas=(self.options["adam_beta1"], self.options["adam_beta2"])) # Train specs self.start_iteration = 0 self.lowest_generator_loss = float("inf") # Other auxiliary function self.writer = SummaryWriter() self.weight_store = defaultdict(int) # Options setting self.n_iterations = options['train_iterations'] self.batch_size = options['train_batch_size'] self.n_cpu = options['train_dataloader_workers'] self.dataset_path = options['dataset_path'] def adjust_learning_rate(self, iteration_idx): self.learning_rate = self.options['start_learning_rate'] end_iteration = self.options['train_iterations'] - 2*self.options['decay_iteration'] # Caclulate a learning rate we need in real-time based on the iteration_idx for idx in range(min(end_iteration, iteration_idx)//self.options['decay_iteration']): idx = idx+1 if idx * self.options['decay_iteration'] in self.options['double_milestones']: # double the learning rate in milestones self.learning_rate = self.learning_rate * 2 else: # else, try to multiply decay_gamma (when we decay, we won't upscale) self.learning_rate = self.learning_rate * self.options['decay_gamma'] # should be divisible in all cases for param_group in self.optimizer_g.param_groups: param_group['lr'] = self.learning_rate if self.has_discriminator: print("For the Learning Rate Decay, we didn't yet handle discriminator, but we think that it should be necessary") assert(self.learning_rate == self.optimizer_g.param_groups[0]['lr']) def pixel_loss_load(self): if self.options['pixel_loss'] == "L1": self.cri_pix = PixelLoss().cuda() elif self.options['pixel_loss'] == "L1_Charbonnier": self.cri_pix = L1_Charbonnier_loss().cuda() elif self.options['pixel_loss'] == "L1_MS-SSIM_loss": self.cri_pix = MS_SSIM_L1_LOSS(alpha=self.options['MS-SSIM_alpha']).cuda() print("We are using {} loss".format(self.options['pixel_loss'])) def GAN_loss_load(self): # parameter init gan_loss_weight = self.options["gan_loss_weight"] vgg_type = self.options['train_perceptual_vgg_type'] layer_weights = self.options['train_perceptual_layer_weights'] # Preceptual Loss and GAN Loss self.cri_pix = torch.nn.L1Loss().cuda()
# -*- coding: utf-8 -*- # torch module import try: set_start_method('spawn') except RuntimeError: pass # import files from local folder root_path = os.path.abspath('.') sys.path.append(root_path) # Mixed precision training scaler = torch.cuda.amp.GradScaler() class train_master(object): def __init__(self, options, args, model_name, has_discriminator=False) -> None: # General specs setup self.args = args self.model_name = model_name self.options = options self.has_discriminator = has_discriminator self.loss_init() # Generator self.call_model() # generator + discriminator... # Optimizer self.learning_rate = options['start_learning_rate'] self.optimizer_g = torch.optim.Adam(self.generator.parameters(), lr=self.learning_rate, betas=(options["adam_beta1"], options["adam_beta2"])) if self.has_discriminator: self.optimizer_d = torch.optim.Adam(self.discriminator.parameters(), lr=self.learning_rate, betas=(self.options["adam_beta1"], self.options["adam_beta2"])) # Train specs self.start_iteration = 0 self.lowest_generator_loss = float("inf") # Other auxiliary function self.writer = SummaryWriter() self.weight_store = defaultdict(int) # Options setting self.n_iterations = options['train_iterations'] self.batch_size = options['train_batch_size'] self.n_cpu = options['train_dataloader_workers'] self.dataset_path = options['dataset_path'] def adjust_learning_rate(self, iteration_idx): self.learning_rate = self.options['start_learning_rate'] end_iteration = self.options['train_iterations'] - 2*self.options['decay_iteration'] # Caclulate a learning rate we need in real-time based on the iteration_idx for idx in range(min(end_iteration, iteration_idx)//self.options['decay_iteration']): idx = idx+1 if idx * self.options['decay_iteration'] in self.options['double_milestones']: # double the learning rate in milestones self.learning_rate = self.learning_rate * 2 else: # else, try to multiply decay_gamma (when we decay, we won't upscale) self.learning_rate = self.learning_rate * self.options['decay_gamma'] # should be divisible in all cases for param_group in self.optimizer_g.param_groups: param_group['lr'] = self.learning_rate if self.has_discriminator: print("For the Learning Rate Decay, we didn't yet handle discriminator, but we think that it should be necessary") assert(self.learning_rate == self.optimizer_g.param_groups[0]['lr']) def pixel_loss_load(self): if self.options['pixel_loss'] == "L1": self.cri_pix = PixelLoss().cuda() elif self.options['pixel_loss'] == "L1_Charbonnier": self.cri_pix = L1_Charbonnier_loss().cuda() elif self.options['pixel_loss'] == "L1_MS-SSIM_loss": self.cri_pix = MS_SSIM_L1_LOSS(alpha=self.options['MS-SSIM_alpha']).cuda() print("We are using {} loss".format(self.options['pixel_loss'])) def GAN_loss_load(self): # parameter init gan_loss_weight = self.options["gan_loss_weight"] vgg_type = self.options['train_perceptual_vgg_type'] layer_weights = self.options['train_perceptual_layer_weights'] # Preceptual Loss and GAN Loss self.cri_pix = torch.nn.L1Loss().cuda()
self.cri_perceptual = PerceptualLoss(layer_weights, vgg_type, perceptual_weight=self.options["perceptual_loss_weight"]).cuda()
4
2023-10-29 04:33:38+00:00
8k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/adalora.py
[ { "identifier": "PeftType", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = {\n \"t5\": [\"q\", \"k\", \"v\", \"o\", \"wi\", \"wo\"],\n \"mt5\": [\"q\", \"k\", \"v\", \"o\", \"wi_0\", \"wi_1\", \"wo\"],\n \"bart\": [\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\", \"fc1\", \"fc2\"],\n # \"gpt2\": [\"c_attn\"],\n # \"bloom\": [\"query_key_value\"],\n \"opt\": [\"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\", \"fc1\", \"fc2\"],\n # \"gptj\": [\"q_proj\", \"v_proj\"],\n # \"gpt_neox\": [\"query_key_value\"],\n # \"gpt_neo\": [\"q_proj\", \"v_proj\"],\n # \"bert\": [\"query\", \"value\"],\n \"roberta\": [\"query\", \"key\", \"value\", \"dense\"],\n # \"xlm-roberta\": [\"query\", \"value\"],\n # \"electra\": [\"query\", \"value\"],\n \"deberta-v2\": [\"query_proj\", \"key_proj\", \"value_proj\", \"dense\"],\n # \"deberta\": [\"in_proj\"],\n # \"layoutlm\": [\"query\", \"value\"],\n}" }, { "identifier": "transpose", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def transpose(weight, fan_in_fan_out):\n return weight.T if fan_in_fan_out else weight" }, { "identifier": "_get_submodules", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def _get_submodules(model, key):\n parent = get_module_by_name(model, \".\".join(key.split(\".\")[:-1]))\n target_name = key.split(\".\")[-1]\n target = get_module_by_name(model, key)\n return parent, target, target_name" }, { "identifier": "_freeze_adapter", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/other.py", "snippet": "def _freeze_adapter(model, adapter_name):\n for n, p in model.named_parameters():\n if adapter_name in n:\n p.requires_grad = False" }, { "identifier": "LoraConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraConfig(PeftConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`LoraModel`].\n\n Args:\n r (`int`): Lora attention dimension.\n target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.\n lora_alpha (`float`): The alpha parameter for Lora scaling.\n lora_dropout (`float`): The dropout probability for Lora layers.\n fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).\n For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:\n bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'\n modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable\n and saved in the final checkpoint.\n \"\"\"\n\n r: int = field(default=8, metadata={\"help\": \"Lora attention dimension\"})\n target_modules: Optional[Union[List[str], str]] = field(\n default=None,\n metadata={\n \"help\": \"List of module names or regex expression of the module names to replace with Lora.\"\n \"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' \"\n },\n )\n lora_alpha: int = field(default=None, metadata={\"help\": \"Lora alpha\"})\n lora_dropout: float = field(default=None, metadata={\"help\": \"Lora dropout\"})\n fan_in_fan_out: bool = field(\n default=False,\n metadata={\"help\": \"Set this to True if the layer to replace stores weight like (fan_in, fan_out)\"},\n )\n bias: str = field(default=\"none\", metadata={\"help\": \"Bias type for Lora. Can be 'none', 'all' or 'lora_only'\"})\n modules_to_save: Optional[List[str]] = field(\n default=None,\n metadata={\n \"help\": \"List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. \"\n \"For example, in Sequence Classification or Token Classification tasks, \"\n \"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.\"\n },\n )\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.LORA" }, { "identifier": "LoraLayer", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraLayer:\n def __init__(\n self,\n in_features: int,\n out_features: int,\n ):\n self.r = {}\n self.lora_alpha = {}\n self.scaling = {}\n self.lora_dropout = nn.ModuleDict({})\n self.lora_A = nn.ModuleDict({})\n self.lora_B = nn.ModuleDict({})\n # Mark the weight as unmerged\n self.merged = False\n self.disable_adapters = False\n self.in_features = in_features\n self.out_features = out_features\n\n def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights):\n self.r[adapter_name] = r\n self.lora_alpha[adapter_name] = lora_alpha\n if lora_dropout > 0.0:\n lora_dropout_layer = nn.Dropout(p=lora_dropout)\n else:\n lora_dropout_layer = nn.Identity()\n\n self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))\n # Actual trainable parameters\n if r > 0:\n self.lora_A.update(nn.ModuleDict({adapter_name: nn.Linear(self.in_features, r, bias=False)}))\n self.lora_B.update(nn.ModuleDict({adapter_name: nn.Linear(r, self.out_features, bias=False)}))\n self.scaling[adapter_name] = lora_alpha / r\n if init_lora_weights:\n self.reset_lora_parameters(adapter_name)\n self.to(self.weight.device)\n\n def reset_lora_parameters(self, adapter_name):\n if adapter_name in self.lora_A.keys():\n # initialize A the same way as the default for nn.Linear and B to zero\n nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_B[adapter_name].weight)" }, { "identifier": "LoraModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None:\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight" }, { "identifier": "mark_only_lora_as_trainable", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "def mark_only_lora_as_trainable(model: nn.Module, bias: str = \"none\") -> None:\n for n, p in model.named_parameters():\n if \"lora_\" not in n:\n p.requires_grad = False\n if bias == \"none\":\n return\n elif bias == \"all\":\n for n, p in model.named_parameters():\n if \"bias\" in n:\n p.requires_grad = True\n elif bias == \"lora_only\":\n for m in model.modules():\n if isinstance(m, LoraLayer) and hasattr(m, \"bias\") and m.bias is not None:\n m.bias.requires_grad = True\n else:\n raise NotImplementedError" } ]
import importlib import re import warnings import torch import torch.nn as nn import torch.nn.functional as F import bitsandbytes as bnb from dataclasses import dataclass, field from typing import Optional from transformers.pytorch_utils import Conv1D from ..utils import ( TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, PeftType, _freeze_adapter, _get_submodules, transpose, ) from .lora import ( LoraConfig, LoraLayer, LoraModel, mark_only_lora_as_trainable, )
6,011
def is_bnb_available(): return importlib.util.find_spec("bitsandbytes") is not None if is_bnb_available(): @dataclass class AdaLoraConfig(LoraConfig): """ This is the configuration class to store the configuration of a [`~peft.AdaLora`]. Args: target_r (`int`): The target average rank of incremental matrix. init_r (`int`): The initial rank for each incremental matrix. tinit (`int`): The steps of initial fine-tuning warmup. tfinal (`int`): The step of final fine-tuning. deltaT (`int`): The time internval between two budget allocations. beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. beta2 (`float`): The hyperparameter of EMA for undertainty quantification. orth_reg_weight (`float`): The coefficient of orthogonal regularization. total_step (`int`): The total training steps that should be specified before training. rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. """ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."}) tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) def __post_init__(self): self.peft_type = PeftType.ADALORA class AdaLoraModel(LoraModel): """ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper: https://openreview.net/pdf?id=lq62uWRJjiY Args: model ([`transformers.PreTrainedModel`]): The model to be adapted. config ([`AdaLoraConfig`]): The configuration of the AdaLora model. Returns: `torch.nn.Module`: The AdaLora model. Example:: >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig >>> config = AdaLoraConfig( peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.01, ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(config, model) **Attributes**: - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model. """ def __init__(self, model, config, adapter_name): nn.Module.__init__(self) self.model = model self.peft_config = config self.add_adapter(adapter_name, self.peft_config[adapter_name]) def add_adapter(self, adapter_name, config=None): if config is not None: model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config config = self._prepare_adalora_config(config, model_config) self.peft_config[adapter_name] = config self._find_and_replace(adapter_name) if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none": raise ValueError( "AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters." ) traininable_mode_counter = 0 for config in self.peft_config.values(): if not config.inference_mode: traininable_mode_counter += 1 if traininable_mode_counter > 1: raise ValueError( "AdaLoraModel supports only 1 trainable adapter. " "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train." ) mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) if self.peft_config[adapter_name].inference_mode:
def is_bnb_available(): return importlib.util.find_spec("bitsandbytes") is not None if is_bnb_available(): @dataclass class AdaLoraConfig(LoraConfig): """ This is the configuration class to store the configuration of a [`~peft.AdaLora`]. Args: target_r (`int`): The target average rank of incremental matrix. init_r (`int`): The initial rank for each incremental matrix. tinit (`int`): The steps of initial fine-tuning warmup. tfinal (`int`): The step of final fine-tuning. deltaT (`int`): The time internval between two budget allocations. beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. beta2 (`float`): The hyperparameter of EMA for undertainty quantification. orth_reg_weight (`float`): The coefficient of orthogonal regularization. total_step (`int`): The total training steps that should be specified before training. rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. """ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."}) tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) def __post_init__(self): self.peft_type = PeftType.ADALORA class AdaLoraModel(LoraModel): """ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper: https://openreview.net/pdf?id=lq62uWRJjiY Args: model ([`transformers.PreTrainedModel`]): The model to be adapted. config ([`AdaLoraConfig`]): The configuration of the AdaLora model. Returns: `torch.nn.Module`: The AdaLora model. Example:: >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig >>> config = AdaLoraConfig( peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.01, ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(config, model) **Attributes**: - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model. """ def __init__(self, model, config, adapter_name): nn.Module.__init__(self) self.model = model self.peft_config = config self.add_adapter(adapter_name, self.peft_config[adapter_name]) def add_adapter(self, adapter_name, config=None): if config is not None: model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config config = self._prepare_adalora_config(config, model_config) self.peft_config[adapter_name] = config self._find_and_replace(adapter_name) if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none": raise ValueError( "AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters." ) traininable_mode_counter = 0 for config in self.peft_config.values(): if not config.inference_mode: traininable_mode_counter += 1 if traininable_mode_counter > 1: raise ValueError( "AdaLoraModel supports only 1 trainable adapter. " "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train." ) mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) if self.peft_config[adapter_name].inference_mode:
_freeze_adapter(self.model, adapter_name)
4
2023-10-30 10:50:32+00:00
8k
imhotep/hass-unifi-access
custom_components/unifi_access/lock.py
[ { "identifier": "DOMAIN", "path": "custom_components/unifi_access/const.py", "snippet": "DOMAIN = \"unifi_access\"" }, { "identifier": "UnifiAccessDoor", "path": "custom_components/unifi_access/door.py", "snippet": "class UnifiAccessDoor:\n \"\"\"Unifi Access Door Class.\"\"\"\n\n def __init__(\n self,\n door_id: str,\n name: str,\n door_position_status: str,\n door_lock_relay_status: str,\n hub,\n ) -> None:\n \"\"\"Initialize door.\"\"\"\n self._callbacks: set[Callable] = set()\n self._is_locking = False\n self._is_unlocking = False\n self._hub = hub\n self._id = door_id\n self.name = name\n self.door_position_status = door_position_status\n self.door_lock_relay_status = door_lock_relay_status\n self.doorbell_request_id = None\n\n @property\n def doorbell_pressed(self) -> bool:\n \"\"\"Get doorbell pressed status.\"\"\"\n return self.doorbell_request_id is not None\n\n @property\n def id(self) -> str:\n \"\"\"Get door ID.\"\"\"\n return self._id\n\n @property\n def is_open(self):\n \"\"\"Get door status.\"\"\"\n return self.door_position_status == \"open\"\n\n @property\n def is_locked(self):\n \"\"\"Solely used for locked state when calling lock.\"\"\"\n return self.door_lock_relay_status == \"lock\"\n\n @property\n def is_locking(self):\n \"\"\"Solely used for locking state when calling lock.\"\"\"\n return False\n\n @property\n def is_unlocking(self):\n \"\"\"Solely used for unlocking state when calling unlock.\"\"\"\n return self._is_unlocking\n\n def unlock(self) -> None:\n \"\"\"Unlock door.\"\"\"\n if self.is_locked:\n self._is_unlocking = True\n self._hub.unlock_door(self._id)\n self._is_unlocking = False\n _LOGGER.info(\"Door with door ID %s is unlocked\", self.id)\n else:\n _LOGGER.error(\"Door with door ID %s is already unlocked\", self.id)\n\n def register_callback(self, callback: Callable[[], None]) -> None:\n \"\"\"Register callback, called when Roller changes state.\"\"\"\n self._callbacks.add(callback)\n\n def remove_callback(self, callback: Callable[[], None]) -> None:\n \"\"\"Remove previously registered callback.\"\"\"\n self._callbacks.discard(callback)\n\n def publish_updates(self) -> None:\n \"\"\"Schedule call all registered callbacks.\"\"\"\n for callback in self._callbacks:\n callback()" }, { "identifier": "UnifiAccessCoordinator", "path": "custom_components/unifi_access/hub.py", "snippet": "class UnifiAccessCoordinator(DataUpdateCoordinator):\n \"\"\"Unifi Access Coordinator. This is mostly used for local polling.\"\"\"\n\n def __init__(self, hass: HomeAssistant, hub) -> None:\n \"\"\"Initialize Unifi Access Coordinator.\"\"\"\n update_interval = timedelta(seconds=3) if hub.use_polling is True else None\n\n super().__init__(\n hass,\n _LOGGER,\n name=\"Unifi Access Coordinator\",\n update_interval=update_interval,\n )\n self.hub = hub\n\n async def _async_update_data(self):\n \"\"\"Handle Unifi Access Coordinator updates.\"\"\"\n try:\n async with asyncio.timeout(10):\n return await self.hass.async_add_executor_job(self.hub.update)\n except ApiAuthError as err:\n raise ConfigEntryAuthFailed from err\n except ApiError as err:\n raise UpdateFailed(\"Error communicating with API\") from err" }, { "identifier": "UnifiAccessHub", "path": "custom_components/unifi_access/hub.py", "snippet": "class UnifiAccessHub:\n \"\"\"UnifiAccessHub.\n\n This class takes care of interacting with the Unifi Access API.\n \"\"\"\n\n def __init__(\n self, host: str, verify_ssl: bool = False, use_polling: bool = False\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self.use_polling = use_polling\n self.verify_ssl = verify_ssl\n if self.verify_ssl is False:\n _LOGGER.warning(\"SSL Verification disabled for %s\", host)\n urllib3.disable_warnings()\n\n host_parts = host.split(\":\")\n parsed_host = urlparse(host)\n\n hostname = parsed_host.hostname if parsed_host.hostname else host_parts[0]\n port = (\n parsed_host.port\n if parsed_host.port\n else (host_parts[1] if len(host_parts) > 1 else UNIFI_ACCESS_API_PORT)\n )\n self._api_token = None\n self.host = f\"https://{hostname}:{port}\"\n self._http_headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n self.websocket_host = f\"wss://{hostname}:{port}\"\n self._websocket_headers = {\n \"Upgrade\": \"websocket\",\n \"Connection\": \"Upgrade\",\n }\n self._doors: dict[str, UnifiAccessDoor] = {}\n self.update_t = None\n\n @property\n def doors(self):\n \"\"\"Get current doors.\"\"\"\n return self._doors\n\n def set_api_token(self, api_token):\n \"\"\"Set API Access Token.\"\"\"\n self._api_token = api_token\n self._http_headers[\"Authorization\"] = f\"Bearer {self._api_token}\"\n self._websocket_headers[\"Authorization\"] = f\"Bearer {self._api_token}\"\n\n def update(self):\n \"\"\"Get latest door data.\"\"\"\n _LOGGER.info(\n \"Getting door updates from Unifi Access %s Use Polling %s\",\n self.host,\n self.use_polling,\n )\n data = self._make_http_request(f\"{self.host}{DOORS_URL}\")\n\n for _i, door in enumerate(data):\n door_id = door[\"id\"]\n if door_id in self.doors:\n existing_door = self.doors[door_id]\n existing_door.name = door[\"name\"]\n existing_door.door_position_status = door[\"door_position_status\"]\n existing_door.door_lock_relay_status = door[\"door_lock_relay_status\"]\n elif door[\"is_bind_hub\"] is True:\n self._doors[door_id] = UnifiAccessDoor(\n door_id=door[\"id\"],\n name=door[\"name\"],\n door_position_status=door[\"door_position_status\"],\n door_lock_relay_status=door[\"door_lock_relay_status\"],\n hub=self,\n )\n if self.update_t is None and self.use_polling is False:\n self.start_continuous_updates()\n\n return self._doors\n\n def update_door(self, door_id: int) -> None:\n \"\"\"Get latest door data for a specific door.\"\"\"\n _LOGGER.info(\"Getting door update from Unifi Access with id %s\", door_id)\n updated_door = self._make_http_request(f\"{self.host}{DOORS_URL}/{door_id}\")\n door_id = updated_door[\"id\"]\n _LOGGER.info(\"Got door update %s\", updated_door)\n if door_id in self.doors:\n existing_door: UnifiAccessDoor = self.doors[door_id]\n existing_door.door_lock_relay_status = updated_door[\n \"door_lock_relay_status\"\n ]\n existing_door.door_position_status = updated_door[\"door_position_status\"]\n existing_door.name = updated_door[\"name\"]\n _LOGGER.info(\"Door %s updated\", door_id)\n\n def authenticate(self, api_token: str) -> str:\n \"\"\"Test if we can authenticate with the host.\"\"\"\n self.set_api_token(api_token)\n _LOGGER.info(\"Authenticating %s\", self.host)\n try:\n self.update()\n except ApiError:\n _LOGGER.error(\n \"Could perform action with %s. Check host and token\", self.host\n )\n return \"api_error\"\n except ApiAuthError:\n _LOGGER.error(\n \"Could not authenticate with %s. Check host and token\", self.host\n )\n return \"api_auth_error\"\n except SSLError:\n _LOGGER.error(\"Error validating SSL Certificate for %s\", self.host)\n return \"ssl_error\"\n except ConnError:\n _LOGGER.error(\"Cannot connect to %s\", self.host)\n return \"cannot_connect\"\n\n return \"ok\"\n\n def unlock_door(self, door_id: str) -> None:\n \"\"\"Test if we can authenticate with the host.\"\"\"\n _LOGGER.info(\"Unlocking door with id %s\", door_id)\n self._make_http_request(\n f\"{self.host}{DOOR_UNLOCK_URL}\".format(door_id=door_id), \"PUT\"\n )\n\n def _make_http_request(self, url, method=\"GET\") -> dict:\n \"\"\"Make HTTP request to Unifi Access API server.\"\"\"\n r = request(\n method,\n url,\n headers=self._http_headers,\n verify=self.verify_ssl,\n timeout=10,\n )\n\n if r.status_code == 401:\n raise ApiAuthError\n\n if r.status_code != 200:\n raise ApiError\n\n response = r.json()\n\n return response[\"data\"]\n\n def on_message(self, ws: websocket.WebSocketApp, message):\n \"\"\"Handle messages received on the websocket client.\n\n Doorbell presses are relying on door names so if those are not unique, it may cause some issues\n \"\"\"\n # _LOGGER.info(f\"Got update {message}\")\n if \"Hello\" not in message:\n update = json.loads(message)\n existing_door = None\n match update[\"event\"]:\n case \"access.dps_change\":\n door_id = update[\"data\"][\"door_id\"]\n _LOGGER.info(\"DPS Change %s\", door_id)\n if door_id in self.doors:\n existing_door = self.doors[door_id]\n existing_door.door_position_status = update[\"data\"][\"status\"]\n _LOGGER.info(\n \"DPS Change of door %s with ID %s Updated\",\n existing_door.name,\n door_id,\n )\n case \"access.data.device.remote_unlock\":\n door_id = update[\"data\"][\"unique_id\"]\n _LOGGER.info(\"Remote Unlock %s\", door_id)\n if door_id in self.doors:\n existing_door = self.doors[door_id]\n existing_door.door_lock_relay_status = \"unlock\"\n _LOGGER.info(\n \"Remote Unlock of door %s with ID %s updated\",\n existing_door.name,\n door_id,\n )\n case \"access.data.device.update\":\n door_id = update[\"data\"][\"door\"][\"unique_id\"]\n _LOGGER.info(\"Device Update via websocket %s\", door_id)\n if door_id in self.doors:\n existing_door = self.doors[door_id]\n self.update_door(door_id)\n _LOGGER.info(\n \"Door name %s with ID %s updated\",\n existing_door.name,\n door_id,\n )\n case \"access.remote_view\":\n door_name = update[\"data\"][\"door_name\"]\n _LOGGER.info(\"Doorbell Press %s\", door_name)\n existing_door = next(\n (\n door\n for door in self.doors.values()\n if door.name == door_name\n ),\n None,\n )\n if existing_door is not None:\n existing_door.doorbell_request_id = update[\"data\"][\"request_id\"]\n _LOGGER.info(\n \"Doorbell press on %s Request ID %s\",\n door_name,\n update[\"data\"][\"request_id\"],\n )\n case \"access.remote_view.change\":\n doorbell_request_id = update[\"data\"][\"remote_call_request_id\"]\n _LOGGER.info(\n \"Doorbell press stopped. Request ID %s\", doorbell_request_id\n )\n existing_door = next(\n (\n door\n for door in self.doors.values()\n if door.doorbell_request_id == doorbell_request_id\n ),\n None,\n )\n if existing_door is not None:\n existing_door.doorbell_request_id = None\n _LOGGER.info(\n \"Doorbell press stopped on %s Request ID %s\",\n existing_door.name,\n doorbell_request_id,\n )\n\n if existing_door is not None:\n existing_door.publish_updates()\n\n def on_error(self, ws: websocket.WebSocketApp, error):\n \"\"\"Handle errors in the websocket client.\"\"\"\n _LOGGER.error(\"Got error %s\", error)\n\n def on_open(self, ws: websocket.WebSocketApp):\n \"\"\"Show message on connection.\"\"\"\n _LOGGER.info(\"Websocket connection established\")\n\n def on_close(self, ws: websocket.WebSocketApp, close_status_code, close_msg):\n \"\"\"Handle websocket closures.\"\"\"\n _LOGGER.error(\n \"Websocket connection closed code: %s message: %s\",\n close_status_code,\n close_msg,\n )\n sslopt = None\n if self.verify_ssl is False:\n sslopt = {\"cert_reqs\": ssl.CERT_NONE}\n ws.run_forever(sslopt=sslopt, reconnect=5)\n\n def start_continuous_updates(self):\n \"\"\"Start listening for updates in a separate thread using websocket-client.\"\"\"\n self.update_t = Thread(target=self.listen_for_updates)\n self.update_t.daemon = True\n self.update_t.start()\n\n def listen_for_updates(self):\n \"\"\"Create a websocket client and start listening for updates.\"\"\"\n uri = f\"{self.websocket_host}{DEVICE_NOTIFICATIONS_URL}\"\n _LOGGER.info(\"Listening for updates on %s\", uri)\n ws = websocket.WebSocketApp(\n uri,\n header=self._websocket_headers,\n on_message=self.on_message,\n on_error=self.on_error,\n on_open=self.on_open,\n on_close=self.on_close,\n )\n sslopt = None\n if self.verify_ssl is False:\n sslopt = {\"cert_reqs\": ssl.CERT_NONE}\n ws.run_forever(sslopt=sslopt, reconnect=5)" } ]
import logging from typing import Any from homeassistant.components.lock import LockEntity from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.device_registry import DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity from .const import DOMAIN from .door import UnifiAccessDoor from .hub import UnifiAccessCoordinator, UnifiAccessHub
3,614
"""Platform for sensor integration.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Add Binary Sensor for passed config entry.""" hub: UnifiAccessHub = hass.data[DOMAIN][config_entry.entry_id] coordinator = UnifiAccessCoordinator(hass, hub) await coordinator.async_config_entry_first_refresh() async_add_entities( UnifiDoorLockEntity(coordinator, key) for key, value in coordinator.data.items() ) class UnifiDoorLockEntity(CoordinatorEntity, LockEntity): """Unifi Access Door Lock.""" should_poll = False def __init__(self, coordinator, door_id) -> None: """Initialize Unifi Access Door Lock.""" super().__init__(coordinator, context=id) self.id = door_id
"""Platform for sensor integration.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Add Binary Sensor for passed config entry.""" hub: UnifiAccessHub = hass.data[DOMAIN][config_entry.entry_id] coordinator = UnifiAccessCoordinator(hass, hub) await coordinator.async_config_entry_first_refresh() async_add_entities( UnifiDoorLockEntity(coordinator, key) for key, value in coordinator.data.items() ) class UnifiDoorLockEntity(CoordinatorEntity, LockEntity): """Unifi Access Door Lock.""" should_poll = False def __init__(self, coordinator, door_id) -> None: """Initialize Unifi Access Door Lock.""" super().__init__(coordinator, context=id) self.id = door_id
self.door: UnifiAccessDoor = self.coordinator.data[door_id]
1
2023-10-27 20:34:27+00:00
8k
pengsongyou/lseg_feature_extraction
fusion_scannet.py
[ { "identifier": "LSeg_MultiEvalModule", "path": "additional_utils/models.py", "snippet": "class LSeg_MultiEvalModule(DataParallel):\n \"\"\"Multi-size Segmentation Eavluator\"\"\"\n def __init__(self, module, device_ids=None, flip=True,\n scales=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75]):\n super(LSeg_MultiEvalModule, self).__init__(module, device_ids)\n self.base_size = module.base_size\n self.crop_size = module.crop_size\n self.scales = scales\n self.flip = flip\n print('MultiEvalModule: base_size {}, crop_size {}'. \\\n format(self.base_size, self.crop_size))\n\n def parallel_forward(self, inputs, label_set='', **kwargs):\n \"\"\"Multi-GPU Mult-size Evaluation\n\n Args:\n inputs: list of Tensors\n \"\"\"\n # if len(label_set) < 10:\n # print('** MultiEvalModule parallel_forward phase: {} **'.format(label_set))\n self.nclass = len(label_set)\n inputs = [(input.unsqueeze(0).cuda(device),)\n for input, device in zip(inputs, self.device_ids)]\n replicas = self.replicate(self, self.device_ids[:len(inputs)])\n kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []\n if len(inputs) < len(kwargs):\n inputs.extend([() for _ in range(len(kwargs) - len(inputs))])\n elif len(kwargs) < len(inputs):\n kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])\n outputs = parallel_apply(replicas, inputs, label_set, kwargs)\n return outputs\n\n def forward(self, image, label_set=''):\n \"\"\"Mult-size Evaluation\"\"\"\n # only single image is supported for evaluation\n # if len(label_set) < 10:\n # print('** MultiEvalModule forward phase: {} **'.format(label_set))\n batch, _, h, w = image.size()\n # assert(batch == 1)\n self.nclass = len(label_set)\n stride_rate = 2.0/3.0\n crop_size = self.crop_size\n stride = int(crop_size * stride_rate)\n with torch.cuda.device_of(image):\n scores = image.new().resize_(batch,self.nclass,h,w).zero_().cuda()\n\n for scale in self.scales:\n long_size = int(math.ceil(self.base_size * scale))\n if h > w:\n height = long_size\n width = int(1.0 * w * long_size / h + 0.5)\n short_size = width\n else:\n width = long_size\n height = int(1.0 * h * long_size / w + 0.5)\n short_size = height\n \"\"\"\n short_size = int(math.ceil(self.base_size * scale))\n if h > w:\n width = short_size\n height = int(1.0 * h * short_size / w)\n long_size = height\n else:\n height = short_size\n width = int(1.0 * w * short_size / h)\n long_size = width\n \"\"\"\n # resize image to current size\n cur_img = resize_image(image, height, width, **self.module._up_kwargs)\n if long_size <= crop_size:\n\n \n pad_img = pad_image(cur_img, self.module.mean,\n self.module.std, crop_size)\n\n outputs = module_inference(self.module, pad_img, label_set, self.flip)\n outputs = crop_image(outputs, 0, image.shape[2], 0, image.shape[3]) #! songyou: minor modifications from height/width to image.shape[2]/imgae.shape[3], but I forgot why...\n\n\n if label_set == '': #! songyou: return only last layer feature\n return outputs\n else:\n if short_size < crop_size:\n # pad if needed\n pad_img = pad_image(cur_img, self.module.mean,\n self.module.std, crop_size)\n else:\n pad_img = cur_img\n _,_,ph,pw = pad_img.shape #.size()\n assert(ph >= height and pw >= width)\n # grid forward and normalize\n h_grids = int(math.ceil(1.0 * (ph-crop_size)/stride)) + 1\n w_grids = int(math.ceil(1.0 * (pw-crop_size)/stride)) + 1\n with torch.cuda.device_of(image):\n outputs = image.new().resize_(batch,self.nclass,ph,pw).zero_().cuda()\n count_norm = image.new().resize_(batch,1,ph,pw).zero_().cuda()\n # grid evaluation\n for idh in range(h_grids):\n for idw in range(w_grids):\n h0 = idh * stride\n w0 = idw * stride\n h1 = min(h0 + crop_size, ph)\n w1 = min(w0 + crop_size, pw)\n crop_img = crop_image(pad_img, h0, h1, w0, w1)\n # pad if needed\n pad_crop_img = pad_image(crop_img, self.module.mean,\n self.module.std, crop_size)\n output = module_inference(self.module, pad_crop_img, label_set, self.flip)\n outputs[:,:,h0:h1,w0:w1] += crop_image(output,\n 0, h1-h0, 0, w1-w0)\n count_norm[:,:,h0:h1,w0:w1] += 1\n assert((count_norm==0).sum()==0)\n outputs = outputs / count_norm\n outputs = outputs[:,:,:height,:width]\n score = resize_image(outputs, h, w, **self.module._up_kwargs)\n scores += score\n \n return scores" }, { "identifier": "LSegModule", "path": "modules/lseg_module.py", "snippet": "class LSegModule(LSegmentationModule):\n def __init__(self, data_path, dataset, batch_size, base_lr, max_epochs, **kwargs):\n super(LSegModule, self).__init__(\n data_path, dataset, batch_size, base_lr, max_epochs, **kwargs\n )\n\n if dataset == \"citys\":\n self.base_size = 2048\n self.crop_size = 768\n else:\n self.base_size = 520\n self.crop_size = 480\n\n use_pretrained = True\n norm_mean= [0.5, 0.5, 0.5]\n norm_std = [0.5, 0.5, 0.5]\n\n print('** Use norm {}, {} as the mean and std **'.format(norm_mean, norm_std))\n\n train_transform = [\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std),\n ]\n\n val_transform = [\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std),\n ]\n\n self.train_transform = transforms.Compose(train_transform)\n self.val_transform = transforms.Compose(val_transform)\n\n self.trainset = self.get_trainset(\n dataset,\n augment=kwargs[\"augment\"],\n base_size=self.base_size,\n crop_size=self.crop_size,\n )\n \n self.valset = self.get_valset(\n dataset,\n augment=kwargs[\"augment\"],\n base_size=self.base_size,\n crop_size=self.crop_size,\n )\n\n use_batchnorm = (\n (not kwargs[\"no_batchnorm\"]) if \"no_batchnorm\" in kwargs else True\n )\n # print(kwargs)\n\n labels = self.get_labels('ade20k')\n\n self.net = LSegNet(\n labels=labels,\n backbone=kwargs[\"backbone\"],\n features=kwargs[\"num_features\"],\n crop_size=self.crop_size,\n arch_option=kwargs[\"arch_option\"],\n block_depth=kwargs[\"block_depth\"],\n activation=kwargs[\"activation\"],\n )\n\n self.net.pretrained.model.patch_embed.img_size = (\n self.crop_size,\n self.crop_size,\n )\n\n self._up_kwargs = up_kwargs\n self.mean = norm_mean\n self.std = norm_std\n\n self.criterion = self.get_criterion(**kwargs)\n\n def get_labels(self, dataset):\n labels = []\n path = 'data/{}_objectInfo150.txt'.format(dataset)\n assert os.path.exists(path), '*** Error : {} not exist !!!'.format(path)\n f = open(path, 'r') \n lines = f.readlines() \n for line in lines: \n label = line.strip().split(',')[-1].split(';')[0]\n labels.append(label)\n f.close()\n if dataset in ['ade20k']:\n labels = labels[1:]\n return labels\n\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = LSegmentationModule.add_model_specific_args(parent_parser)\n parser = ArgumentParser(parents=[parser])\n\n parser.add_argument(\n \"--backbone\",\n type=str,\n default=\"clip_vitl16_384\",\n help=\"backbone network\",\n )\n\n parser.add_argument(\n \"--num_features\",\n type=int,\n default=256,\n help=\"number of featurs that go from encoder to decoder\",\n )\n\n parser.add_argument(\"--dropout\", type=float, default=0.1, help=\"dropout rate\")\n\n parser.add_argument(\n \"--finetune_weights\", type=str, help=\"load weights to finetune from\"\n )\n\n parser.add_argument(\n \"--no-scaleinv\",\n default=True,\n action=\"store_false\",\n help=\"turn off scaleinv layers\",\n )\n\n parser.add_argument(\n \"--no-batchnorm\",\n default=False,\n action=\"store_true\",\n help=\"turn off batchnorm\",\n )\n\n parser.add_argument(\n \"--widehead\", default=False, action=\"store_true\", help=\"wider output head\"\n )\n\n parser.add_argument(\n \"--widehead_hr\",\n default=False,\n action=\"store_true\",\n help=\"wider output head\",\n )\n\n parser.add_argument(\n \"--arch_option\",\n type=int,\n default=0,\n help=\"which kind of architecture to be used\",\n )\n\n parser.add_argument(\n \"--block_depth\",\n type=int,\n default=0,\n help=\"how many blocks should be used\",\n )\n\n parser.add_argument(\n \"--activation\",\n choices=['lrelu', 'tanh'],\n default=\"lrelu\",\n help=\"use which activation to activate the block\",\n )\n\n return parser" }, { "identifier": "extract_lseg_img_feature", "path": "fusion_util.py", "snippet": "def extract_lseg_img_feature(img_dir, transform, evaluator, label=''):\n # load RGB image\n image = Image.open(img_dir)\n image = np.array(image)\n image = transform(image).unsqueeze(0)\n with torch.no_grad():\n outputs = evaluator.parallel_forward(image, label)\n feat_2d = outputs[0][0].half()\n\n return feat_2d" }, { "identifier": "PointCloudToImageMapper", "path": "fusion_util.py", "snippet": "class PointCloudToImageMapper(object):\n def __init__(self, image_dim,\n visibility_threshold=0.25, cut_bound=0, intrinsics=None):\n \n self.image_dim = image_dim\n self.vis_thres = visibility_threshold\n self.cut_bound = cut_bound\n self.intrinsics = intrinsics\n\n def compute_mapping(self, camera_to_world, coords, depth=None, intrinsic=None):\n \"\"\"\n :param camera_to_world: 4 x 4\n :param coords: N x 3 format\n :param depth: H x W format\n :param intrinsic: 3x3 format\n :return: mapping, N x 3 format, (H,W,mask)\n \"\"\"\n if self.intrinsics is not None: # global intrinsics\n intrinsic = self.intrinsics\n\n mapping = np.zeros((3, coords.shape[0]), dtype=int)\n coords_new = np.concatenate([coords, np.ones([coords.shape[0], 1])], axis=1).T\n assert coords_new.shape[0] == 4, \"[!] Shape error\"\n\n world_to_camera = np.linalg.inv(camera_to_world)\n p = np.matmul(world_to_camera, coords_new)\n p[0] = (p[0] * intrinsic[0][0]) / p[2] + intrinsic[0][2]\n p[1] = (p[1] * intrinsic[1][1]) / p[2] + intrinsic[1][2]\n pi = np.round(p).astype(int) # simply round the projected coordinates\n inside_mask = (pi[0] >= self.cut_bound) * (pi[1] >= self.cut_bound) \\\n * (pi[0] < self.image_dim[0]-self.cut_bound) \\\n * (pi[1] < self.image_dim[1]-self.cut_bound)\n if depth is not None:\n depth_cur = depth[pi[1][inside_mask], pi[0][inside_mask]]\n occlusion_mask = np.abs(depth[pi[1][inside_mask], pi[0][inside_mask]]\n - p[2][inside_mask]) <= \\\n self.vis_thres * depth_cur\n\n inside_mask[inside_mask == True] = occlusion_mask\n else:\n front_mask = p[2]>0 # make sure the depth is in front\n inside_mask = front_mask*inside_mask\n mapping[0][inside_mask] = pi[1][inside_mask]\n mapping[1][inside_mask] = pi[0][inside_mask]\n mapping[2][inside_mask] = 1\n\n return mapping.T" }, { "identifier": "save_fused_feature", "path": "fusion_util.py", "snippet": "def save_fused_feature(feat_bank, point_ids, n_points, out_dir, scene_id, args):\n '''Save features.'''\n\n for n in range(args.num_rand_file_per_scene):\n if n_points < args.n_split_points:\n n_points_cur = n_points # to handle point cloud numbers less than n_split_points\n else:\n n_points_cur = args.n_split_points\n\n rand_ind = np.random.choice(range(n_points), n_points_cur, replace=False)\n\n mask_entire = torch.zeros(n_points, dtype=torch.bool)\n mask_entire[rand_ind] = True\n mask = torch.zeros(n_points, dtype=torch.bool)\n mask[point_ids] = True\n mask_entire = mask_entire & mask\n\n torch.save({\"feat\": feat_bank[mask_entire].half().cpu(),\n \"mask_full\": mask_entire\n }, os.path.join(out_dir, scene_id +'_%d.pt'%(n)))\n print(os.path.join(out_dir, scene_id +'_%d.pt'%(n)) + ' is saved!')" }, { "identifier": "adjust_intrinsic", "path": "fusion_util.py", "snippet": "def adjust_intrinsic(intrinsic, intrinsic_image_dim, image_dim):\n '''Adjust camera intrinsics.'''\n\n if intrinsic_image_dim == image_dim:\n return intrinsic\n resize_width = int(math.floor(image_dim[1] * float(\n intrinsic_image_dim[0]) / float(intrinsic_image_dim[1])))\n intrinsic[0, 0] *= float(resize_width) / float(intrinsic_image_dim[0])\n intrinsic[1, 1] *= float(image_dim[1]) / float(intrinsic_image_dim[1])\n # account for cropping here\n intrinsic[0, 2] *= float(image_dim[0] - 1) / float(intrinsic_image_dim[0] - 1)\n intrinsic[1, 2] *= float(image_dim[1] - 1) / float(intrinsic_image_dim[1] - 1)\n return intrinsic" }, { "identifier": "make_intrinsic", "path": "fusion_util.py", "snippet": "def make_intrinsic(fx, fy, mx, my):\n '''Create camera intrinsics.'''\n\n intrinsic = np.eye(4)\n intrinsic[0][0] = fx\n intrinsic[1][1] = fy\n intrinsic[0][2] = mx\n intrinsic[1][2] = my\n return intrinsic" } ]
import os import torch import imageio import argparse import numpy as np import torchvision.transforms as transforms from os.path import join, exists from glob import glob from tqdm import tqdm, trange from additional_utils.models import LSeg_MultiEvalModule from modules.lseg_module import LSegModule from encoding.models.sseg import BaseNet from fusion_util import extract_lseg_img_feature, PointCloudToImageMapper, save_fused_feature, adjust_intrinsic, make_intrinsic
5,292
num_rand_file_per_scene = args.num_rand_file_per_scene feat_dim = args.feat_dim point2img_mapper = args.point2img_mapper depth_scale = args.depth_scale keep_features_in_memory = args.keep_features_in_memory evaluator = args.evaluator transform = args.transform # load 3D data (point cloud) locs_in = torch.load(data_path)[0] n_points = locs_in.shape[0] n_interval = num_rand_file_per_scene n_finished = 0 for n in range(n_interval): if exists(join(out_dir, scene_id +'_%d.pt'%(n))): n_finished += 1 print(scene_id +'_%d.pt'%(n) + ' already done!') continue if n_finished == n_interval: return 1 # short hand for processing 2D features scene = join(args.data_root_2d, scene_id) img_dirs = sorted(glob(join(scene, 'color/*')), key=lambda x: int(os.path.basename(x)[:-4])) num_img = len(img_dirs) device = torch.device('cpu') # extract image features and keep them in the memory # default: False (extract image on the fly) if keep_features_in_memory and evaluator is not None: img_features = [] for img_dir in tqdm(img_dirs): img_features.append(extract_lseg_img_feature(img_dir, transform, evaluator)) n_points_cur = n_points counter = torch.zeros((n_points_cur, 1), device=device) sum_features = torch.zeros((n_points_cur, feat_dim), device=device) ################ Feature Fusion ################### vis_id = torch.zeros((n_points_cur, num_img), dtype=int, device=device) for img_id, img_dir in enumerate(tqdm(img_dirs)): # load pose posepath = img_dir.replace('color', 'pose').replace('.jpg', '.txt') pose = np.loadtxt(posepath) # load depth and convert to meter depth = imageio.v2.imread(img_dir.replace('color', 'depth').replace('jpg', 'png')) / depth_scale # calculate the 3d-2d mapping based on the depth mapping = np.ones([n_points, 4], dtype=int) mapping[:, 1:4] = point2img_mapper.compute_mapping(pose, locs_in, depth) if mapping[:, 3].sum() == 0: # no points corresponds to this image, skip continue mapping = torch.from_numpy(mapping).to(device) mask = mapping[:, 3] vis_id[:, img_id] = mask if keep_features_in_memory: feat_2d = img_features[img_id].to(device) else: feat_2d = extract_lseg_img_feature(img_dir, transform, evaluator).to(device) feat_2d_3d = feat_2d[:, mapping[:, 1], mapping[:, 2]].permute(1, 0) counter[mask!=0]+= 1 sum_features[mask!=0] += feat_2d_3d[mask!=0] counter[counter==0] = 1e-5 feat_bank = sum_features/counter point_ids = torch.unique(vis_id.nonzero(as_tuple=False)[:, 0]) save_fused_feature(feat_bank, point_ids, n_points, out_dir, scene_id, args) def main(args): seed = 1457 torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) #!### Dataset specific parameters ##### img_dim = (320, 240) depth_scale = 1000.0 fx = 577.870605 fy = 577.870605 mx=319.5 my=239.5 ####################################### visibility_threshold = 0.25 # threshold for the visibility check args.depth_scale = depth_scale args.cut_num_pixel_boundary = 10 # do not use the features on the image boundary args.keep_features_in_memory = False # keep image features in the memory, very expensive split = args.split data_dir = args.data_dir data_root = join(data_dir, 'scannet_3d') data_root_2d = join(data_dir,'scannet_2d') args.data_root_2d = data_root_2d out_dir = args.output_dir args.feat_dim = 512 # CLIP feature dimension os.makedirs(out_dir, exist_ok=True) process_id_range = args.process_id_range if split== 'train': # for training set, export a chunk of point cloud args.n_split_points = 20000 args.num_rand_file_per_scene = 5 else: # for the validation set, export the entire point cloud instead of chunks args.n_split_points = 2000000 args.num_rand_file_per_scene = 1 ############################## ##### load the LSeg model ####
#!!!!!!! this file needs to be placed in the root directory of LSeg def get_args(): # command line args parser = argparse.ArgumentParser( description='Multi-view feature fusion of LSeg on ScanNet.') parser.add_argument('--data_dir', type=str, help='Where is the base logging directory') parser.add_argument('--output_dir', type=str, help='Where is the base logging directory') parser.add_argument('--split', type=str, default='val', help='split: "train"| "val"') parser.add_argument('--lseg_model', type=str, default='checkpoints/demo_e200.ckpt', help='Where is the LSeg checkpoint') parser.add_argument('--process_id_range', nargs='+', default=None, help='the id range to process') parser.add_argument('--img_feat_dir', type=str, default='', help='the id range to process') # Hyper parameters parser.add_argument('--hparams', default=[], nargs="+") args = parser.parse_args() return args def process_one_scene(data_path, out_dir, args): # short hand scene_id = data_path.split('/')[-1].split('_vh')[0] num_rand_file_per_scene = args.num_rand_file_per_scene feat_dim = args.feat_dim point2img_mapper = args.point2img_mapper depth_scale = args.depth_scale keep_features_in_memory = args.keep_features_in_memory evaluator = args.evaluator transform = args.transform # load 3D data (point cloud) locs_in = torch.load(data_path)[0] n_points = locs_in.shape[0] n_interval = num_rand_file_per_scene n_finished = 0 for n in range(n_interval): if exists(join(out_dir, scene_id +'_%d.pt'%(n))): n_finished += 1 print(scene_id +'_%d.pt'%(n) + ' already done!') continue if n_finished == n_interval: return 1 # short hand for processing 2D features scene = join(args.data_root_2d, scene_id) img_dirs = sorted(glob(join(scene, 'color/*')), key=lambda x: int(os.path.basename(x)[:-4])) num_img = len(img_dirs) device = torch.device('cpu') # extract image features and keep them in the memory # default: False (extract image on the fly) if keep_features_in_memory and evaluator is not None: img_features = [] for img_dir in tqdm(img_dirs): img_features.append(extract_lseg_img_feature(img_dir, transform, evaluator)) n_points_cur = n_points counter = torch.zeros((n_points_cur, 1), device=device) sum_features = torch.zeros((n_points_cur, feat_dim), device=device) ################ Feature Fusion ################### vis_id = torch.zeros((n_points_cur, num_img), dtype=int, device=device) for img_id, img_dir in enumerate(tqdm(img_dirs)): # load pose posepath = img_dir.replace('color', 'pose').replace('.jpg', '.txt') pose = np.loadtxt(posepath) # load depth and convert to meter depth = imageio.v2.imread(img_dir.replace('color', 'depth').replace('jpg', 'png')) / depth_scale # calculate the 3d-2d mapping based on the depth mapping = np.ones([n_points, 4], dtype=int) mapping[:, 1:4] = point2img_mapper.compute_mapping(pose, locs_in, depth) if mapping[:, 3].sum() == 0: # no points corresponds to this image, skip continue mapping = torch.from_numpy(mapping).to(device) mask = mapping[:, 3] vis_id[:, img_id] = mask if keep_features_in_memory: feat_2d = img_features[img_id].to(device) else: feat_2d = extract_lseg_img_feature(img_dir, transform, evaluator).to(device) feat_2d_3d = feat_2d[:, mapping[:, 1], mapping[:, 2]].permute(1, 0) counter[mask!=0]+= 1 sum_features[mask!=0] += feat_2d_3d[mask!=0] counter[counter==0] = 1e-5 feat_bank = sum_features/counter point_ids = torch.unique(vis_id.nonzero(as_tuple=False)[:, 0]) save_fused_feature(feat_bank, point_ids, n_points, out_dir, scene_id, args) def main(args): seed = 1457 torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) #!### Dataset specific parameters ##### img_dim = (320, 240) depth_scale = 1000.0 fx = 577.870605 fy = 577.870605 mx=319.5 my=239.5 ####################################### visibility_threshold = 0.25 # threshold for the visibility check args.depth_scale = depth_scale args.cut_num_pixel_boundary = 10 # do not use the features on the image boundary args.keep_features_in_memory = False # keep image features in the memory, very expensive split = args.split data_dir = args.data_dir data_root = join(data_dir, 'scannet_3d') data_root_2d = join(data_dir,'scannet_2d') args.data_root_2d = data_root_2d out_dir = args.output_dir args.feat_dim = 512 # CLIP feature dimension os.makedirs(out_dir, exist_ok=True) process_id_range = args.process_id_range if split== 'train': # for training set, export a chunk of point cloud args.n_split_points = 20000 args.num_rand_file_per_scene = 5 else: # for the validation set, export the entire point cloud instead of chunks args.n_split_points = 2000000 args.num_rand_file_per_scene = 1 ############################## ##### load the LSeg model ####
module = LSegModule.load_from_checkpoint(
1
2023-10-27 15:40:36+00:00
8k
chenran-li/RQL-release
imitation/scripts/eval_policy.py
[ { "identifier": "VecEnvWrapper", "path": "stable_baselines3/common/vec_env/base_vec_env.py", "snippet": "class VecEnvWrapper(VecEnv):\n \"\"\"\n Vectorized environment base class\n\n :param venv: the vectorized environment to wrap\n :param observation_space: the observation space (can be None to load from venv)\n :param action_space: the action space (can be None to load from venv)\n \"\"\"\n\n def __init__(\n self,\n venv: VecEnv,\n observation_space: Optional[spaces.Space] = None,\n action_space: Optional[spaces.Space] = None,\n ):\n self.venv = venv\n VecEnv.__init__(\n self,\n num_envs=venv.num_envs,\n observation_space=observation_space or venv.observation_space,\n action_space=action_space or venv.action_space,\n )\n self.class_attributes = dict(inspect.getmembers(self.__class__))\n\n def step_async(self, actions: np.ndarray) -> None:\n self.venv.step_async(actions)\n\n @abstractmethod\n def reset(self) -> VecEnvObs:\n pass\n\n @abstractmethod\n def step_wait(self) -> VecEnvStepReturn:\n pass\n\n def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:\n return self.venv.seed(seed)\n\n def close(self) -> None:\n return self.venv.close()\n\n def render(self, mode: str = \"human\") -> Optional[np.ndarray]:\n return self.venv.render(mode=mode)\n\n def get_images(self) -> Sequence[np.ndarray]:\n return self.venv.get_images()\n\n def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:\n return self.venv.get_attr(attr_name, indices)\n\n def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:\n return self.venv.set_attr(attr_name, value, indices)\n\n def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:\n return self.venv.env_method(method_name, *method_args, indices=indices, **method_kwargs)\n\n def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]:\n return self.venv.env_is_wrapped(wrapper_class, indices=indices)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"Find attribute from wrapped venv(s) if this wrapper does not have it.\n Useful for accessing attributes from venvs which are wrapped with multiple wrappers\n which have unique attributes of interest.\n \"\"\"\n blocked_class = self.getattr_depth_check(name, already_found=False)\n if blocked_class is not None:\n own_class = f\"{type(self).__module__}.{type(self).__name__}\"\n error_str = (\n f\"Error: Recursive attribute lookup for {name} from {own_class} is \"\n f\"ambiguous and hides attribute from {blocked_class}\"\n )\n raise AttributeError(error_str)\n\n return self.getattr_recursive(name)\n\n def _get_all_attributes(self) -> Dict[str, Any]:\n \"\"\"Get all (inherited) instance and class attributes\n\n :return: all_attributes\n \"\"\"\n all_attributes = self.__dict__.copy()\n all_attributes.update(self.class_attributes)\n return all_attributes\n\n def getattr_recursive(self, name: str) -> Any:\n \"\"\"Recursively check wrappers to find attribute.\n\n :param name: name of attribute to look for\n :return: attribute\n \"\"\"\n all_attributes = self._get_all_attributes()\n if name in all_attributes: # attribute is present in this wrapper\n attr = getattr(self, name)\n elif hasattr(self.venv, \"getattr_recursive\"):\n # Attribute not present, child is wrapper. Call getattr_recursive rather than getattr\n # to avoid a duplicate call to getattr_depth_check.\n attr = self.venv.getattr_recursive(name)\n else: # attribute not present, child is an unwrapped VecEnv\n attr = getattr(self.venv, name)\n\n return attr\n\n def getattr_depth_check(self, name: str, already_found: bool) -> str:\n \"\"\"See base class.\n\n :return: name of module whose attribute is being shadowed, if any.\n \"\"\"\n all_attributes = self._get_all_attributes()\n if name in all_attributes and already_found:\n # this venv's attribute is being hidden because of a higher venv.\n shadowed_wrapper_class = f\"{type(self).__module__}.{type(self).__name__}\"\n elif name in all_attributes and not already_found:\n # we have found the first reference to the attribute. Now check for duplicates.\n shadowed_wrapper_class = self.venv.getattr_depth_check(name, True)\n else:\n # this wrapper does not have the attribute. Keep searching.\n shadowed_wrapper_class = self.venv.getattr_depth_check(name, already_found)\n\n return shadowed_wrapper_class" }, { "identifier": "rollout", "path": "imitation/data/rollout.py", "snippet": "def rollout(\n policy: AnyPolicy,\n venv: VecEnv,\n sample_until: GenTrajTerminationFn,\n rng: np.random.Generator,\n *,\n unwrap: bool = True,\n exclude_infos: bool = True,\n verbose: bool = True,\n **kwargs: Any,\n) -> Sequence[types.TrajectoryWithRew]:\n \"\"\"Generate policy rollouts.\n\n This method is a wrapper of generate_trajectories that allows\n the user to additionally replace the rewards and observations with the original\n values if the environment is wrapped, to exclude the infos from the\n trajectories, and to print summary statistics of the rollout.\n\n The `.infos` field of each Trajectory is set to `None` to save space.\n\n Args:\n policy: Can be any of the following:\n 1) A stable_baselines3 policy or algorithm trained on the gym environment.\n 2) A Callable that takes an ndarray of observations and returns an ndarray\n of corresponding actions.\n 3) None, in which case actions will be sampled randomly.\n venv: The vectorized environments.\n sample_until: End condition for rollout sampling.\n rng: Random state to use for sampling.\n unwrap: If True, then save original observations and rewards (instead of\n potentially wrapped observations and rewards) by calling\n `unwrap_traj()`.\n exclude_infos: If True, then exclude `infos` from pickle by setting\n this field to None. Excluding `infos` can save a lot of space during\n pickles.\n verbose: If True, then print out rollout stats before saving.\n **kwargs: Passed through to `generate_trajectories`.\n\n Returns:\n Sequence of trajectories, satisfying `sample_until`. Additional trajectories\n may be collected to avoid biasing process towards short episodes; the user\n should truncate if required.\n \"\"\"\n trajs = generate_trajectories(\n policy,\n venv,\n sample_until,\n rng=rng,\n **kwargs,\n )\n if unwrap:\n trajs = [unwrap_traj(traj) for traj in trajs]\n if exclude_infos:\n trajs = [dataclasses.replace(traj, infos=None) for traj in trajs]\n if verbose:\n stats = rollout_stats(trajs)\n logging.info(f\"Rollout stats: {stats}\")\n return trajs" }, { "identifier": "types", "path": "imitation/data/types.py", "snippet": "T = TypeVar(\"T\")\ndef dataclass_quick_asdict(obj) -> Dict[str, Any]:\ndef parse_path(\n path: AnyPath,\n allow_relative: bool = True,\n base_directory: Optional[pathlib.Path] = None,\n) -> pathlib.Path:\ndef parse_optional_path(\n path: Optional[AnyPath],\n allow_relative: bool = True,\n base_directory: Optional[pathlib.Path] = None,\n) -> Optional[pathlib.Path]:\n def __len__(self) -> int:\n def __eq__(self, other) -> bool:\n def __post_init__(self):\n def __setstate__(self, state):\ndef _rews_validation(rews: np.ndarray, acts: np.ndarray):\n def __post_init__(self):\ndef transitions_collate_fn(\n batch: Sequence[Mapping[str, np.ndarray]],\n) -> Mapping[str, Union[np.ndarray, th.Tensor]]:\n def __len__(self) -> int:\n def __post_init__(self):\n def __getitem__(self, key: int) -> Mapping[str, np.ndarray]:\n def __getitem__(self: TransitionsMinimalSelf, key: slice) -> TransitionsMinimalSelf:\n def __getitem__(self, key):\n def __post_init__(self):\n def __post_init__(self):\ndef load_with_rewards(path: AnyPath) -> Sequence[TrajectoryWithRew]:\ndef load(path: AnyPath) -> Sequence[Trajectory]:\ndef save(path: AnyPath, trajectories: Sequence[Trajectory]):\nclass Trajectory:\nclass TrajectoryWithRew(Trajectory):\nclass TransitionsMinimal(th_data.Dataset, Sequence[Mapping[str, np.ndarray]]):\nclass Transitions(TransitionsMinimal):\nclass TransitionsWithRew(Transitions):" }, { "identifier": "ExplorationWrapper", "path": "imitation/policies/exploration_wrapper.py", "snippet": "class ExplorationWrapper:\n \"\"\"Wraps a PolicyCallable to create a partially randomized version.\n\n This wrapper randomly switches between two policies: the wrapped policy,\n and a random one. After each action, the current policy is kept\n with a certain probability. Otherwise, one of these two policies is chosen\n at random (without any dependence on what the current policy is).\n\n The random policy uses the `action_space.sample()` method.\n \"\"\"\n\n def __init__(\n self,\n policy: rollout.AnyPolicy,\n venv: vec_env.VecEnv,\n random_prob: float,\n switch_prob: float,\n rng: np.random.Generator,\n deterministic_policy: bool = False,\n ):\n \"\"\"Initializes the ExplorationWrapper.\n\n Args:\n policy: The policy to randomize.\n venv: The environment to use (needed for sampling random actions).\n random_prob: The probability of picking the random policy when switching.\n switch_prob: The probability of switching away from the current policy.\n rng: The random state to use for seeding the environment and for\n switching policies.\n deterministic_policy: Whether to make the policy deterministic when not\n exploring. This must be False when ``policy`` is a ``PolicyCallable``.\n \"\"\"\n policy_callable = rollout.policy_to_callable(policy, venv, deterministic_policy)\n self.wrapped_policy = policy_callable\n self.random_prob = random_prob\n self.switch_prob = switch_prob\n self.venv = venv\n\n self.rng = rng\n seed = util.make_seeds(self.rng)\n self.venv.action_space.seed(seed)\n\n self.current_policy = policy_callable\n # Choose the initial policy at random\n self._switch()\n\n def _random_policy(self, obs: np.ndarray) -> np.ndarray:\n acts = [self.venv.action_space.sample() for _ in range(len(obs))]\n return np.stack(acts, axis=0)\n\n def _switch(self) -> None:\n \"\"\"Pick a new policy at random.\"\"\"\n if self.rng.random() < self.random_prob:\n self.current_policy = self._random_policy\n else:\n self.current_policy = self.wrapped_policy\n\n def __call__(self, obs: np.ndarray) -> np.ndarray:\n acts = self.current_policy(obs)\n if self.rng.random() < self.switch_prob:\n self._switch()\n return acts" }, { "identifier": "reward_wrapper", "path": "imitation/rewards/reward_wrapper.py", "snippet": "class WrappedRewardCallback(callbacks.BaseCallback):\nclass RewardVecEnvWrapper(vec_env.VecEnvWrapper):\n def __init__(self, episode_rewards: Deque[float], *args, **kwargs):\n def _on_step(self) -> bool:\n def _on_rollout_start(self) -> None:\n def __init__(\n self,\n venv: vec_env.VecEnv,\n reward_fn: reward_function.RewardFn,\n ep_history: int = 100,\n ):\n def make_log_callback(self) -> WrappedRewardCallback:\n def envs(self):\n def reset(self):\n def step_async(self, actions):\n def step_wait(self):" }, { "identifier": "load_reward", "path": "imitation/rewards/serialize.py", "snippet": "@util.docstring_parameter(reward_types=\", \".join(reward_registry.keys()))\ndef load_reward(\n reward_type: str,\n reward_path: str,\n venv: VecEnv,\n **kwargs: Any,\n) -> reward_function.RewardFn:\n \"\"\"Load serialized reward.\n\n Args:\n reward_type: A key in `reward_registry`. Valid types\n include {reward_types}.\n reward_path: A path specifying the reward.\n venv: An environment that the policy is to be used with.\n **kwargs: kwargs to pass to reward fn\n\n Returns:\n The deserialized reward.\n \"\"\"\n reward_loader = reward_registry.get(reward_type)\n return reward_loader(reward_path, venv, **kwargs)" }, { "identifier": "eval_policy_ex", "path": "imitation/scripts/config/eval_policy.py", "snippet": "def replay_defaults():\ndef explore_eps_greedy():\ndef render():\ndef acrobot():\ndef ant():\ndef cartpole():\ndef seals_cartpole():\ndef half_cheetah():\ndef seals_half_cheetah():\ndef seals_hopper():\ndef seals_humanoid():\ndef mountain_car():\ndef seals_mountain_car():\ndef pendulum():\ndef reacher():\ndef seals_ant():\ndef seals_swimmer():\ndef seals_walker():\ndef fast():" }, { "identifier": "environment", "path": "imitation/scripts/ingredients/environment.py", "snippet": "def config():\ndef make_venv(\n gym_id: str,\n num_vec: int,\n parallel: bool,\n max_episode_steps: int,\n env_make_kwargs: Mapping[str, Any],\n _run: sacred.run.Run,\n _rnd: np.random.Generator,\n **kwargs,\n) -> Generator[vec_env.VecEnv, None, None]:\ndef make_rollout_venv(\n gym_id: str,\n num_vec: int,\n parallel: bool,\n max_episode_steps: int,\n env_make_kwargs: Mapping[str, Any],\n _rnd: np.random.Generator,\n) -> Generator[vec_env.VecEnv, None, None]:\ndef fast():" }, { "identifier": "expert", "path": "imitation/scripts/ingredients/expert.py", "snippet": "def config():\ndef config_hook(config, command_name, logger):\ndef get_expert_policy(venv, policy_type, loader_kwargs):" }, { "identifier": "logging", "path": "imitation/scripts/ingredients/logging.py", "snippet": "def config():\ndef update_log_format_strs(log_format_strs, log_format_strs_additional):\ndef hook(config, command_name: str, logger):\ndef wandb_logging():\ndef make_log_dir(\n _run,\n log_dir: str,\n log_level: Union[int, str],\n) -> pathlib.Path:\ndef setup_logging(\n _run,\n log_format_strs: Sequence[str],\n) -> Tuple[imit_logger.HierarchicalLogger, pathlib.Path]:" }, { "identifier": "video_wrapper", "path": "imitation/util/video_wrapper.py", "snippet": "class VideoWrapper(gym.Wrapper):\n def __init__(\n self,\n env: gym.Env,\n directory: pathlib.Path,\n single_video: bool = True,\n ):\n def _reset_video_recorder(self) -> None:\n def reset(self):\n def step(self, action):\n def close(self) -> None:" } ]
import logging import pathlib import time import gym import numpy as np from typing import Any, Mapping, Optional from sacred.observers import FileStorageObserver from stable_baselines3.common.vec_env import VecEnvWrapper from imitation.data import rollout, types from imitation.policies.exploration_wrapper import ExplorationWrapper from imitation.rewards import reward_wrapper from imitation.rewards.serialize import load_reward from imitation.scripts.config.eval_policy import eval_policy_ex from imitation.scripts.ingredients import environment, expert from imitation.scripts.ingredients import logging as logging_ingredient from imitation.util import video_wrapper
4,009
"""Evaluate policies: render policy interactively, save videos, log episode return.""" class InteractiveRender(VecEnvWrapper): """Render the wrapped environment(s) on screen.""" def __init__(self, venv, fps): """Builds renderer for `venv` running at `fps` frames per second.""" super().__init__(venv) self.render_fps = fps def reset(self): ob = self.venv.reset() self.venv.render() return ob def step_wait(self): ob = self.venv.step_wait() if self.render_fps > 0: time.sleep(1 / self.render_fps) self.venv.render() return ob def video_wrapper_factory(log_dir: pathlib.Path, **kwargs): """Returns a function that wraps the environment in a video recorder."""
"""Evaluate policies: render policy interactively, save videos, log episode return.""" class InteractiveRender(VecEnvWrapper): """Render the wrapped environment(s) on screen.""" def __init__(self, venv, fps): """Builds renderer for `venv` running at `fps` frames per second.""" super().__init__(venv) self.render_fps = fps def reset(self): ob = self.venv.reset() self.venv.render() return ob def step_wait(self): ob = self.venv.step_wait() if self.render_fps > 0: time.sleep(1 / self.render_fps) self.venv.render() return ob def video_wrapper_factory(log_dir: pathlib.Path, **kwargs): """Returns a function that wraps the environment in a video recorder."""
def f(env: gym.Env, i: int) -> video_wrapper.VideoWrapper:
10
2023-10-28 01:09:21+00:00
8k
AmgdGocha/DriveFS-Sleuth
drivefs_sleuth/setup.py
[ { "identifier": "get_last_pid", "path": "drivefs_sleuth/utils.py", "snippet": "def get_last_pid(drivefs_path):\n try:\n with open(os.path.join(drivefs_path, 'pid.txt')) as pid_file:\n return pid_file.read()\n except OSError:\n return -1" }, { "identifier": "get_item_info", "path": "drivefs_sleuth/utils.py", "snippet": "def get_item_info(profile_path, stable_id):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(f\"SELECT is_folder, stable_id, id, local_title, mime_type, is_owner, file_size, \"\n f\"modified_date, viewed_by_me_date, trashed, proto FROM items WHERE stable_id={stable_id}\")\n return cursor.fetchone()\n except sqlite3.OperationalError:\n return ()" }, { "identifier": "get_last_sync", "path": "drivefs_sleuth/utils.py", "snippet": "def get_last_sync(drivefs_path):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"experiments.db\")) as experiments_db:\n cursor = experiments_db.cursor()\n cursor.execute(\"SELECT value FROM PhenotypeValues WHERE key='last_sync'\")\n return int(cursor.fetchone()[0])\n except sqlite3.OperationalError:\n return -1" }, { "identifier": "parse_protobuf", "path": "drivefs_sleuth/utils.py", "snippet": "def parse_protobuf(protobuf):\n if not protobuf:\n return {}\n\n with contextlib.redirect_stdout(None):\n protodeep_schema = guess_schema(data=protobuf)\n return protodeep_schema.values" }, { "identifier": "get_max_root_ids", "path": "drivefs_sleuth/utils.py", "snippet": "def get_max_root_ids(drivefs_path):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"root_preference_sqlite.db\")) as root_preference_db:\n cursor = root_preference_db.cursor()\n cursor.execute(\"SELECT value FROM max_ids WHERE id_type='max_root_id'\")\n max_root_ids = cursor.fetchone()\n if max_root_ids:\n return int(max_root_ids[0])\n return None\n except sqlite3.OperationalError:\n return None" }, { "identifier": "get_deleted_items", "path": "drivefs_sleuth/utils.py", "snippet": "def get_deleted_items(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\"SELECT stable_id, proto FROM deleted_items\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []" }, { "identifier": "get_mirrored_items", "path": "drivefs_sleuth/utils.py", "snippet": "def get_mirrored_items(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"mirror_sqlite.db\")) as mirror_sqlite_db:\n cursor = mirror_sqlite_db.cursor()\n cursor.execute(\"SELECT local_stable_id, stable_id, volume, parent_local_stable_id, local_filename, \"\n \"cloud_filename, local_mtime_ms, cloud_mtime_ms, local_md5_checksum, cloud_md5_checksum,\"\n \"local_size, cloud_size, local_version, cloud_version, shared, read_only, is_root \"\n \"FROM mirror_item\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []" }, { "identifier": "get_item_properties", "path": "drivefs_sleuth/utils.py", "snippet": "def get_item_properties(profile_path, item_id):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(f\"SELECT key, value FROM item_properties WHERE item_stable_id={item_id}\")\n item_properties = {}\n for item_property in cursor.fetchall():\n item_properties[item_property[0]] = item_property[1]\n return item_properties\n except sqlite3.OperationalError:\n return {}" }, { "identifier": "get_target_stable_id", "path": "drivefs_sleuth/utils.py", "snippet": "def get_target_stable_id(profile_path, shortcut_stable_id):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(f\"SELECT target_stable_id FROM shortcut_details \"\n f\"WHERE shortcut_stable_id={shortcut_stable_id}\")\n shortcut_stable_id = cursor.fetchone()\n if shortcut_stable_id:\n return int(shortcut_stable_id[0])\n return 0\n except sqlite3.OperationalError:\n return 0" }, { "identifier": "get_connected_devices", "path": "drivefs_sleuth/utils.py", "snippet": "def get_connected_devices(drivefs_path):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"root_preference_sqlite.db\")) as root_preference_db:\n cursor = root_preference_db.cursor()\n cursor.execute(\"SELECT media_id, name, last_mount_point, capacity, ignored FROM media\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []" }, { "identifier": "get_parent_relationships", "path": "drivefs_sleuth/utils.py", "snippet": "def get_parent_relationships(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\n \"SELECT parent_stable_id, item_stable_id FROM stable_parents ORDER BY parent_stable_id, item_stable_id\"\n )\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []" }, { "identifier": "get_content_caches_paths", "path": "drivefs_sleuth/utils.py", "snippet": "def get_content_caches_paths(content_cache_dir):\n content_caches_paths = {}\n\n for root, _, content_caches in os.walk(content_cache_dir):\n for content_cache in content_caches:\n content_caches_paths[content_cache] = os.path.abspath(os.path.join(root, content_cache))\n del(content_caches_paths['chunks.db'])\n\n return content_caches_paths" }, { "identifier": "get_file_content_cache_path", "path": "drivefs_sleuth/utils.py", "snippet": "def get_file_content_cache_path(content_entry, content_caches_paths):\n if content_entry:\n parsed_content_entry = parse_protobuf(content_entry)\n content_entry_filename = str(parsed_content_entry['1'])\n return content_caches_paths.get(content_entry_filename, '')\n return ''" }, { "identifier": "get_shared_with_me_without_link", "path": "drivefs_sleuth/utils.py", "snippet": "def get_shared_with_me_without_link(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\"SELECT is_folder, stable_id, id, local_title, mime_type, is_owner, file_size, modified_date\"\n \", viewed_by_me_date, trashed, proto FROM items \"\n \"LEFT JOIN stable_parents ON items.stable_id = stable_parents.item_stable_id \"\n \"LEFT JOIN shortcut_details ON items.stable_id = shortcut_details.target_stable_id \"\n \"WHERE items.is_owner=0 AND items.shared_with_me_date=1 \"\n \"AND stable_parents.item_stable_id IS NULL \"\n \"AND shortcut_details.target_stable_id IS NULL \"\n \"ORDER BY items.stable_id\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []" }, { "identifier": "get_mirroring_roots_for_account", "path": "drivefs_sleuth/utils.py", "snippet": "def get_mirroring_roots_for_account(drivefs_path, account_id):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"root_preference_sqlite.db\")) as root_preference_db:\n cursor = root_preference_db.cursor()\n cursor.execute(\"SELECT account_token, root_id, media_id, title, root_path, sync_type, destination, \"\n f\"last_seen_absolute_path FROM roots WHERE account_token=\\\"{account_id}\\\"\")\n return cursor.fetchall()\n except sqlite3.OperationalError:\n return []" }, { "identifier": "File", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class File(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, content_cache_path, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n\n self.__content_cache_path = content_cache_path\n self.__file_type = parse_protobuf(proto).get('45', '')\n\n def get_content_cache_path(self):\n return self.__content_cache_path\n\n def get_file_type(self):\n return self.__file_type" }, { "identifier": "Link", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class Link(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, target_item, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n self.__target_item = target_item\n\n def get_target_item(self):\n return self.__target_item" }, { "identifier": "Directory", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class Directory(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n self.__sub_items = []\n\n def add_item(self, item):\n self.__sub_items.append(item)\n\n def remove_item(self, stable_id):\n for item in self.__sub_items:\n if item.get_stable_id() == stable_id:\n self.__sub_items.remove(item)\n\n def get_sub_items(self):\n return self.__sub_items" }, { "identifier": "DummyItem", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class DummyItem(Item):\n def __init__(self, stable_id):\n super().__init__(stable_id, '', 'DELETED_ITEM', '', '', '', '', '', '', '', '', '')\n\n def get_sub_items(self):\n return []" }, { "identifier": "MirrorItem", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class MirrorItem:\n def __init__(self, local_stable_id, stable_id, volume, parent, local_filename, cloud_filename, local_mtime,\n cloud_mtime, local_md5, cloud_md5, local_size, cloud_size, local_version, cloud_version, shared,\n read_only, is_root):\n self.local_stable_id = local_stable_id\n self.stable_id = stable_id\n self.volume = volume\n self.parent = parent\n self.local_filename = local_filename\n self.cloud_filename = cloud_filename\n self.local_mtime = local_mtime\n self.cloud_mtime = cloud_mtime\n self.local_md5 = local_md5\n self.cloud_md5 = cloud_md5\n self.local_size = local_size\n self.cloud_size = cloud_size\n self.local_version = local_version\n self.cloud_version = cloud_version\n self.shared = shared\n self.read_only = read_only\n self.is_root = is_root\n\n def get_local_mtime_utc(self):\n return datetime.datetime.fromtimestamp(int(self.local_mtime)/1000.0, datetime.timezone.utc)\n\n def get_cloud_mtime_utc(self):\n return datetime.datetime.fromtimestamp(int(self.cloud_mtime)/1000.0, datetime.timezone.utc)" }, { "identifier": "SyncedFilesTree", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class SyncedFilesTree:\n def __init__(self, root):\n self.__root = root\n self.__orphan_items = []\n self.__shared_with_me = []\n self.__recovered_deleted_items = []\n self.__deleted_items = []\n self.__mirror_items = []\n self.__recoverable_items_from_cache = []\n\n def get_root(self):\n return self.__root\n\n def get_orphan_items(self):\n return self.__orphan_items\n\n def add_orphan_item(self, item):\n self.__orphan_items.append(item)\n\n def add_deleted_item(self, stable_id):\n self.__deleted_items.append(stable_id)\n\n def add_recovered_deleted_item(self, item):\n self.__recovered_deleted_items.append(item)\n\n def add_shared_with_me_item(self, item):\n self.__shared_with_me.append(item)\n\n def get_shared_with_me_items(self):\n return self.__shared_with_me\n\n def get_deleted_items(self):\n return self.__deleted_items\n\n def get_recovered_deleted_items(self):\n return self.__recovered_deleted_items\n\n def get_item_by_id(self, target_id, is_owner=False):\n if not is_owner:\n queue = [self.get_root()] + self.get_orphan_items() + self.get_shared_with_me_items()\n else:\n queue = [self.get_root()]\n\n while queue:\n current_item = queue.pop(0)\n\n if current_item.get_stable_id() == target_id:\n return current_item\n\n if current_item.is_file():\n continue\n\n elif current_item.is_dir():\n queue += current_item.get_sub_items()\n\n elif current_item.is_link():\n queue += current_item.get_target_item()\n\n return None\n\n def search_item_by_name(self, filenames=None, regex=None, contains=True, list_sub_items=True):\n if filenames is None:\n filenames = []\n if regex is None:\n regex = []\n items = []\n\n def append_item_childs(item):\n items.append(item)\n if isinstance(item, File):\n return\n\n elif isinstance(item, Link):\n target = item.get_target_item()\n if isinstance(item, File):\n append_item_childs(target)\n else:\n for sub_item in target.get_sub_items():\n append_item_childs(sub_item)\n\n elif isinstance(item, Directory):\n for sub_item in item.get_sub_items():\n append_item_childs(sub_item)\n\n else:\n for sub_item in item:\n append_item_childs(sub_item)\n\n def search(current_item):\n hit = False\n if regex:\n for exp in regex:\n match = re.search(exp, current_item.local_title)\n if match:\n items.append(current_item)\n hit = True\n\n if contains:\n for filename in filenames:\n if filename.lower() in current_item.local_title.lower():\n items.append(current_item)\n hit = True\n else:\n for filename in filenames:\n if filename.lower() == current_item.local_title.lower():\n items.append(current_item)\n hit = True\n\n if isinstance(current_item, File):\n return\n\n elif isinstance(current_item, Link) and hit and list_sub_items:\n target = current_item.get_target_item()\n if isinstance(target, File):\n append_item_childs(target)\n else:\n for sub_item in target.get_sub_items():\n append_item_childs(sub_item)\n\n elif isinstance(current_item, Directory) and hit and list_sub_items:\n for sub_item in current_item.get_sub_items():\n append_item_childs(sub_item)\n\n else:\n if isinstance(current_item, Link):\n target = current_item.get_target_item()\n if isinstance(target, File):\n search(target)\n else:\n for sub_item in target.get_sub_items():\n search(sub_item)\n else:\n for sub_item in current_item.get_sub_items():\n search(sub_item)\n\n search(self.get_root())\n for orphan_item in self.get_orphan_items():\n search(orphan_item)\n\n for shared_item in self.get_shared_with_me_items():\n search(shared_item)\n\n for recovered_deleted_item in self.get_recovered_deleted_items():\n search(recovered_deleted_item)\n\n return items\n\n def add_mirrored_item(self, mirrored_item):\n self.__mirror_items.append(mirrored_item)\n\n def get_mirrored_items(self):\n return self.__mirror_items\n\n def add_recoverable_item_from_cache(self, recoverable_from_cache_item):\n self.__recoverable_items_from_cache.append(recoverable_from_cache_item)\n\n def get_recoverable_items_from_cache(self):\n return self.__recoverable_items_from_cache\n\n def print_synced_files_tree(self):\n print('\\n----------Synced Items----------\\n')\n\n _print_tree([self.get_root()] + self.get_orphan_items())\n\n print('\\n----------Deleted Items----------\\n')\n\n for recovered_deleted_items in self.__recovered_deleted_items:\n print(f'- ({recovered_deleted_items.get_stable_id()}) {recovered_deleted_items.local_title}')\n\n for deleted_item in self.__deleted_items:\n print(f'- {deleted_item}')\n\n print('\\n----------Orphan Items----------\\n')\n\n for orphan in self.get_orphan_items():\n print(f'- ({orphan.get_stable_id()}) {orphan.local_title}')\n\n print('\\n----------Shared With Me Items----------\\n')\n\n for shared_with_me_item in self.get_shared_with_me_items():\n print(f'- ({shared_with_me_item.get_stable_id()}) {shared_with_me_item.local_title}')" }, { "identifier": "get_accounts", "path": "drivefs_sleuth/tasks.py", "snippet": "def get_accounts(drivefs_path):\n accounts = {}\n experiments_ids = get_experiment_account_ids(drivefs_path)\n profiles = get_available_profiles(drivefs_path)\n available_accounts = set(experiments_ids + profiles)\n for account_id in available_accounts:\n accounts[account_id] = {\n 'email': lookup_account_id(drivefs_path, account_id)\n }\n logged_in = account_id in profiles\n accounts[account_id]['logged_in'] = logged_in\n accounts[account_id]['properties'] = get_account_properties(os.path.join(drivefs_path, account_id))\n return accounts" } ]
import os.path import datetime from enum import Enum from collections import OrderedDict from drivefs_sleuth.utils import get_last_pid from drivefs_sleuth.utils import get_item_info from drivefs_sleuth.utils import get_last_sync from drivefs_sleuth.utils import parse_protobuf from drivefs_sleuth.utils import get_max_root_ids from drivefs_sleuth.utils import get_deleted_items from drivefs_sleuth.utils import get_mirrored_items from drivefs_sleuth.utils import get_item_properties from drivefs_sleuth.utils import get_target_stable_id from drivefs_sleuth.utils import get_connected_devices from drivefs_sleuth.utils import get_parent_relationships from drivefs_sleuth.utils import get_content_caches_paths from drivefs_sleuth.utils import get_file_content_cache_path from drivefs_sleuth.utils import get_shared_with_me_without_link from drivefs_sleuth.utils import get_mirroring_roots_for_account from drivefs_sleuth.synced_files_tree import File from drivefs_sleuth.synced_files_tree import Link from drivefs_sleuth.synced_files_tree import Directory from drivefs_sleuth.synced_files_tree import DummyItem from drivefs_sleuth.synced_files_tree import MirrorItem from drivefs_sleuth.synced_files_tree import SyncedFilesTree from drivefs_sleuth.tasks import get_accounts
6,840
parent_info[9], get_item_properties(self.__profile_path, parent_id), parent_info[3], parent_info[10]) orphan_dirs[parent_id] = current_parent_dir for child_id in childs_ids: child_info = get_item_info(self.__profile_path, child_id) child_properties = get_item_properties(self.__profile_path, child_id) if not child_info: self.__synced_files_tree.add_deleted_item(DummyItem(child_id)) continue if child_info[0] == 0: content_cache_path = get_file_content_cache_path( child_properties.get('content-entry', None), content_caches_paths) child_file = File(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', content_cache_path, child_info[10]) current_parent_dir.add_item(child_file) if content_cache_path: self.__synced_files_tree.add_recoverable_item_from_cache(child_file) else: if child_info[4] == 'application/vnd.google-apps.shortcut': target_stable_id = get_target_stable_id(self.__profile_path, child_info[1]) if target_stable_id: target = orphan_dirs.get(target_stable_id, None) if target: added_dirs[target_stable_id] = target del orphan_dirs[target_stable_id] else: target_info = get_item_info(self.__profile_path, target_stable_id) if target_info: if target_info[0] == 0: content_cache_path = get_file_content_cache_path( child_properties.get('content-entry', None), content_caches_paths) target = File(target_info[1], target_info[2], target_info[3], target_info[4], target_info[5], target_info[6], target_info[7], target_info[8], target_info[9], get_item_properties(self.__profile_path, target_info[1]), f'{current_parent_dir.tree_path}\\{target_info[3]}', content_cache_path, target_info[10]) else: target = Directory(target_info[1], target_info[2], target_info[3], target_info[4], target_info[5], target_info[6], target_info[7], target_info[8], target_info[9], get_item_properties(self.__profile_path, target_info[1]), f'{current_parent_dir.tree_path}\\{target_info[3]}', target_info[10]) added_dirs[target_stable_id] = target else: target = DummyItem(target_stable_id) self.__synced_files_tree.add_deleted_item(target) child = Link(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', target, child_info[10]) else: target = DummyItem('-1') child = Link(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', target, child_info[10]) else: child = orphan_dirs.get(child_id, None) if child: child.tree_path = f'{current_parent_dir.tree_path}\\{child.local_title}' del orphan_dirs[child_id] else: child = Directory(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', child_info[10]) added_dirs[child_id] = child current_parent_dir.add_item(child) # TODO: check if I can add a link in the shared with me for shared_with_me_item_info in get_shared_with_me_without_link(self.__profile_path): shared_with_me_item_properties = get_item_properties(self.__profile_path, shared_with_me_item_info[1]) if shared_with_me_item_info[0] == 0: content_cache_path = get_file_content_cache_path( shared_with_me_item_properties.get('content-entry', None), content_caches_paths) shared_with_me_file = File(shared_with_me_item_info[1], shared_with_me_item_info[2], shared_with_me_item_info[3], shared_with_me_item_info[4], shared_with_me_item_info[5], shared_with_me_item_info[6], shared_with_me_item_info[7], shared_with_me_item_info[8], shared_with_me_item_info[9], shared_with_me_item_properties, f'Shared with me\\{shared_with_me_item_info[3]}', content_cache_path, shared_with_me_item_info[10]) self.__synced_files_tree.add_shared_with_me_item(shared_with_me_file) if shared_with_me_file: self.__synced_files_tree.add_recoverable_item_from_cache(shared_with_me_file) else: shared_with_me_item = orphan_dirs.get(shared_with_me_item_info[1], None) if shared_with_me_item: del orphan_dirs[shared_with_me_item_info[1]] else: shared_with_me_item = Directory(shared_with_me_item_info[1], shared_with_me_item_info[2], shared_with_me_item_info[3], shared_with_me_item_info[4], shared_with_me_item_info[5], shared_with_me_item_info[6], shared_with_me_item_info[7], shared_with_me_item_info[8], shared_with_me_item_info[9], shared_with_me_item_properties, f'{current_parent_dir.tree_path}\\{shared_with_me_item_info[3]}', shared_with_me_item_info[10]) self.__synced_files_tree.add_shared_with_me_item(shared_with_me_item) for orphan_id, orphan_dir in orphan_dirs.items(): self.__synced_files_tree.add_orphan_item(orphan_dir) mirrored_items = get_mirrored_items(self.__profile_path) for item in mirrored_items: self.__synced_files_tree.add_mirrored_item( MirrorItem(item[0], item[1], item[2], item[3], item[4], item[5], item[6], item[7], item[8], item[9], item[10], item[11], item[12], item[13], item[14], item[15], item[16] ) )
class StorageDestinations(Enum): DRIVE = "DRIVE" PHOTOS = "PHOTOS" class Account: def __init__(self, drivefs_path, account_id, email, is_logged_in, mirroring_roots, properties): self.__profile_path = os.path.join(drivefs_path, account_id) self.__account_id = account_id self.__account_email = email self.__is_logged_in = is_logged_in self.__synced_files_tree = None if is_logged_in: self._construct_synced_files_trees() self.__mirroring_roots = [] for mirroring_root in mirroring_roots: mirroring_root_info = { 'root_id': mirroring_root[1], 'media_id': mirroring_root[2], 'title': mirroring_root[3], 'root_path': mirroring_root[4], 'sync_type': mirroring_root[5], 'last_seen_absolute_path': mirroring_root[7], } if mirroring_root[6] == 1: mirroring_root_info['destination'] = StorageDestinations.DRIVE.value else: mirroring_root_info['destination'] = StorageDestinations.PHOTOS.value self.__mirroring_roots.append(mirroring_root_info) self.__name = properties['name'] self.__photo_url = properties['photo_url'] def get_profile_path(self): return self.__profile_path def get_account_id(self): return self.__account_id def get_account_email(self): return self.__account_email def is_logged_in(self): return self.__is_logged_in def get_synced_files_tree(self): return self.__synced_files_tree def get_mirroring_roots(self): return self.__mirroring_roots def get_name(self): return self.__name def get_photo_url(self): return self.__photo_url def _construct_synced_files_trees(self): parent_relationships = get_parent_relationships(self.__profile_path) root_info = get_item_info(self.__profile_path, parent_relationships[0][0]) root = Directory(root_info[1], root_info[2], root_info[3], root_info[4], root_info[5], root_info[6], root_info[7], root_info[8], root_info[9], get_item_properties(self.__profile_path, root_info[1]), root_info[3], root_info[10]) self.__synced_files_tree = SyncedFilesTree(root) content_caches_paths = get_content_caches_paths(os.path.join(self.__profile_path, 'content_cache')) parent_relationships_dict = OrderedDict() for parent, child in parent_relationships: if parent not in parent_relationships_dict.keys(): parent_relationships_dict[parent] = [] parent_relationships_dict[parent].append(child) added_dirs = {self.__synced_files_tree.get_root().get_stable_id(): self.__synced_files_tree.get_root()} orphan_dirs = {} current_parent_dir = self.__synced_files_tree.get_root() for parent_id, childs_ids in parent_relationships_dict.items(): if parent_id != current_parent_dir.get_stable_id(): if parent_id in added_dirs: current_parent_dir = added_dirs[parent_id] elif parent_id in orphan_dirs: current_parent_dir = orphan_dirs[parent_id] else: parent_info = get_item_info(self.__profile_path, parent_id) if not parent_info: self.__synced_files_tree.add_deleted_item(DummyItem(parent_id)) else: current_parent_dir = Directory(parent_info[1], parent_info[2], parent_info[3], parent_info[4], parent_info[5], parent_info[6], parent_info[7], parent_info[8], parent_info[9], get_item_properties(self.__profile_path, parent_id), parent_info[3], parent_info[10]) orphan_dirs[parent_id] = current_parent_dir for child_id in childs_ids: child_info = get_item_info(self.__profile_path, child_id) child_properties = get_item_properties(self.__profile_path, child_id) if not child_info: self.__synced_files_tree.add_deleted_item(DummyItem(child_id)) continue if child_info[0] == 0: content_cache_path = get_file_content_cache_path( child_properties.get('content-entry', None), content_caches_paths) child_file = File(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', content_cache_path, child_info[10]) current_parent_dir.add_item(child_file) if content_cache_path: self.__synced_files_tree.add_recoverable_item_from_cache(child_file) else: if child_info[4] == 'application/vnd.google-apps.shortcut': target_stable_id = get_target_stable_id(self.__profile_path, child_info[1]) if target_stable_id: target = orphan_dirs.get(target_stable_id, None) if target: added_dirs[target_stable_id] = target del orphan_dirs[target_stable_id] else: target_info = get_item_info(self.__profile_path, target_stable_id) if target_info: if target_info[0] == 0: content_cache_path = get_file_content_cache_path( child_properties.get('content-entry', None), content_caches_paths) target = File(target_info[1], target_info[2], target_info[3], target_info[4], target_info[5], target_info[6], target_info[7], target_info[8], target_info[9], get_item_properties(self.__profile_path, target_info[1]), f'{current_parent_dir.tree_path}\\{target_info[3]}', content_cache_path, target_info[10]) else: target = Directory(target_info[1], target_info[2], target_info[3], target_info[4], target_info[5], target_info[6], target_info[7], target_info[8], target_info[9], get_item_properties(self.__profile_path, target_info[1]), f'{current_parent_dir.tree_path}\\{target_info[3]}', target_info[10]) added_dirs[target_stable_id] = target else: target = DummyItem(target_stable_id) self.__synced_files_tree.add_deleted_item(target) child = Link(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', target, child_info[10]) else: target = DummyItem('-1') child = Link(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', target, child_info[10]) else: child = orphan_dirs.get(child_id, None) if child: child.tree_path = f'{current_parent_dir.tree_path}\\{child.local_title}' del orphan_dirs[child_id] else: child = Directory(child_info[1], child_info[2], child_info[3], child_info[4], child_info[5], child_info[6], child_info[7], child_info[8], child_info[9], child_properties, f'{current_parent_dir.tree_path}\\{child_info[3]}', child_info[10]) added_dirs[child_id] = child current_parent_dir.add_item(child) # TODO: check if I can add a link in the shared with me for shared_with_me_item_info in get_shared_with_me_without_link(self.__profile_path): shared_with_me_item_properties = get_item_properties(self.__profile_path, shared_with_me_item_info[1]) if shared_with_me_item_info[0] == 0: content_cache_path = get_file_content_cache_path( shared_with_me_item_properties.get('content-entry', None), content_caches_paths) shared_with_me_file = File(shared_with_me_item_info[1], shared_with_me_item_info[2], shared_with_me_item_info[3], shared_with_me_item_info[4], shared_with_me_item_info[5], shared_with_me_item_info[6], shared_with_me_item_info[7], shared_with_me_item_info[8], shared_with_me_item_info[9], shared_with_me_item_properties, f'Shared with me\\{shared_with_me_item_info[3]}', content_cache_path, shared_with_me_item_info[10]) self.__synced_files_tree.add_shared_with_me_item(shared_with_me_file) if shared_with_me_file: self.__synced_files_tree.add_recoverable_item_from_cache(shared_with_me_file) else: shared_with_me_item = orphan_dirs.get(shared_with_me_item_info[1], None) if shared_with_me_item: del orphan_dirs[shared_with_me_item_info[1]] else: shared_with_me_item = Directory(shared_with_me_item_info[1], shared_with_me_item_info[2], shared_with_me_item_info[3], shared_with_me_item_info[4], shared_with_me_item_info[5], shared_with_me_item_info[6], shared_with_me_item_info[7], shared_with_me_item_info[8], shared_with_me_item_info[9], shared_with_me_item_properties, f'{current_parent_dir.tree_path}\\{shared_with_me_item_info[3]}', shared_with_me_item_info[10]) self.__synced_files_tree.add_shared_with_me_item(shared_with_me_item) for orphan_id, orphan_dir in orphan_dirs.items(): self.__synced_files_tree.add_orphan_item(orphan_dir) mirrored_items = get_mirrored_items(self.__profile_path) for item in mirrored_items: self.__synced_files_tree.add_mirrored_item( MirrorItem(item[0], item[1], item[2], item[3], item[4], item[5], item[6], item[7], item[8], item[9], item[10], item[11], item[12], item[13], item[14], item[15], item[16] ) )
for deleted_item in get_deleted_items(self.__profile_path):
5
2023-10-29 11:05:04+00:00
8k
zyang1580/CoLLM
train_collm_sasrec.py
[ { "identifier": "Config", "path": "minigpt4/common/config.py", "snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_config = self._build_opt_list(self.args.options)\n\n config = OmegaConf.load(self.args.cfg_path)\n\n runner_config = self.build_runner_config(config)\n model_config = self.build_model_config(config, **user_config)\n dataset_config = self.build_dataset_config(config)\n\n # Validate the user-provided runner configuration\n # model and dataset configuration are supposed to be validated by the respective classes\n # [TODO] validate the model/dataset configuration\n # self._validate_runner_config(runner_config)\n\n # Override the default configuration with user options.\n self.config = OmegaConf.merge(\n runner_config, model_config, dataset_config, user_config\n )\n\n def _validate_runner_config(self, runner_config):\n \"\"\"\n This method validates the configuration, such that\n 1) all the user specified options are valid;\n 2) no type mismatches between the user specified options and the config.\n \"\"\"\n runner_config_validator = create_runner_config_validator()\n runner_config_validator.validate(runner_config)\n\n def _build_opt_list(self, opts):\n opts_dot_list = self._convert_to_dot_list(opts)\n return OmegaConf.from_dotlist(opts_dot_list)\n\n @staticmethod\n def build_model_config(config, **kwargs):\n model = config.get(\"model\", None)\n assert model is not None, \"Missing model configuration file.\"\n\n model_cls = registry.get_model_class(model.arch)\n assert model_cls is not None, f\"Model '{model.arch}' has not been registered.\"\n\n model_type = kwargs.get(\"model.model_type\", None)\n if not model_type:\n model_type = model.get(\"model_type\", None)\n # else use the model type selected by user.\n\n assert model_type is not None, \"Missing model_type.\"\n\n model_config_path = model_cls.default_config_path(model_type=model_type)\n\n model_config = OmegaConf.create()\n # hierarchy override, customized config > default config\n model_config = OmegaConf.merge(\n model_config,\n OmegaConf.load(model_config_path),\n {\"model\": config[\"model\"]},\n )\n\n return model_config\n\n @staticmethod\n def build_runner_config(config):\n return {\"run\": config.run}\n\n @staticmethod\n def build_dataset_config(config):\n datasets = config.get(\"datasets\", None)\n if datasets is None:\n raise KeyError(\n \"Expecting 'datasets' as the root key for dataset configuration.\"\n )\n\n dataset_config = OmegaConf.create()\n\n for dataset_name in datasets:\n builder_cls = registry.get_builder_class(dataset_name)\n\n dataset_config_type = datasets[dataset_name].get(\"type\", \"default\")\n dataset_config_path = builder_cls.default_config_path(\n type=dataset_config_type\n )\n\n # hierarchy override, customized config > default config\n dataset_config = OmegaConf.merge(\n dataset_config,\n OmegaConf.load(dataset_config_path),\n {\"datasets\": {dataset_name: config[\"datasets\"][dataset_name]}},\n )\n\n return dataset_config\n\n def _convert_to_dot_list(self, opts):\n if opts is None:\n opts = []\n\n if len(opts) == 0:\n return opts\n\n has_equal = opts[0].find(\"=\") != -1\n\n if has_equal:\n return opts\n\n return [(opt + \"=\" + value) for opt, value in zip(opts[0::2], opts[1::2])]\n\n def get_config(self):\n return self.config\n\n @property\n def run_cfg(self):\n return self.config.run\n\n @property\n def datasets_cfg(self):\n return self.config.datasets\n\n @property\n def model_cfg(self):\n return self.config.model\n\n def pretty_print(self):\n logging.info(\"\\n===== Running Parameters =====\")\n logging.info(self._convert_node_to_json(self.config.run))\n\n logging.info(\"\\n====== Dataset Attributes ======\")\n datasets = self.config.datasets\n\n for dataset in datasets:\n if dataset in self.config.datasets:\n logging.info(f\"\\n======== {dataset} =======\")\n dataset_config = self.config.datasets[dataset]\n logging.info(self._convert_node_to_json(dataset_config))\n else:\n logging.warning(f\"No dataset named '{dataset}' in config. Skipping\")\n\n logging.info(f\"\\n====== Model Attributes ======\")\n logging.info(self._convert_node_to_json(self.config.model))\n\n def _convert_node_to_json(self, node):\n container = OmegaConf.to_container(node, resolve=True)\n return json.dumps(container, indent=4, sort_keys=True)\n\n def to_dict(self):\n return OmegaConf.to_container(self.config)" }, { "identifier": "get_rank", "path": "minigpt4/common/dist_utils.py", "snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()" }, { "identifier": "init_distributed_mode", "path": "minigpt4/common/dist_utils.py", "snippet": "def init_distributed_mode(args):\n if \"RANK\" in os.environ and \"WORLD_SIZE\" in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n args.gpu = int(os.environ[\"LOCAL_RANK\"])\n elif \"SLURM_PROCID\" in os.environ:\n args.rank = int(os.environ[\"SLURM_PROCID\"])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print(\"Not using distributed mode\")\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = \"nccl\"\n print(\n \"| distributed init (rank {}, world {}): {}\".format(\n args.rank, args.world_size, args.dist_url\n ),\n flush=True,\n )\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n timeout=datetime.timedelta(\n days=365\n ), # allow auto-downloading and de-compressing\n )\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)" }, { "identifier": "GnnDataset", "path": "minigpt4/datasets/datasets/rec_gnndataset.py", "snippet": "class GnnDataset(BasicDataset):\n \"\"\"\n Dataset type for pytorch \\n\n Incldue graph information\n gowalla dataset\n \"\"\"\n def __init__(self,config, path=\"../data/gowalla\"):\n # train or test\n # cprint(f'loading [{path}]')\n print(\"loading: \", path)\n self.split = config.A_split\n self.folds = config.A_n_fold\n self.mode_dict = {'train': 0, \"test\": 1}\n self.mode = self.mode_dict['train']\n\n\n train_file = path+\"train_ood2.pkl\"\n\n valid_file = path+\"valid_ood2.pkl\"\n test_file = path + \"test_ood2.pkl\"\n self.path = path\n \n self.traindataSize = 0\n self.testDataSize = 0\n\n\n self.train = pd.read_pickle(train_file)[['uid','iid','label']]\n self.train.columns = ['user','item','label']\n self.valid = pd.read_pickle(valid_file)[['uid','iid','label']]\n self.valid.columns = ['user','item','label']\n self.test = pd.read_pickle(test_file)[['uid','iid','label']]\n self.test.columns = ['user','item','label']\n\n # self.train = pd.read_csv(train_file)[['user','item','lables']]\n # self.valid = pd.read_csv(valid_file)[['user','item','lables']]\n # self.test = pd.read_csv(test_file)[['user','item','lables']]\n\n self.m_users = 1 + max([self.train['user'].max(),self.valid['user'].max(),self.test['user'].max()])\n self.n_items = 1 + max([self.train['item'].max(),self.valid['item'].max(),self.test['item'].max()] )\n \n self.testDataSize = self.test.shape[0]\n self.validDataSize = self.valid.shape[0]\n self.train_size = self.train.shape[0]\n\n\n \n \n self.Graph = None\n print(f\"{self.train_size} interactions for normal training\")\n print(f\"{self.validDataSize} interactions for validation\")\n print(f\"{self.testDataSize} interactions for testing\")\n print(f\"{self.m_users} users, {self.n_items} items\")\n print(f\"{config.dataset} Sparsity : {(self.validDataSize + self.testDataSize+self.train_size) / self.m_users / self.n_items}\")\n\n # (users,items), bipartite graph\n # self.UserItemNet = csr_matrix((np.ones(len(self.trainUser)), (self.trainUser, self.trainItem)),\n # shape=(self.m_users, self.n_items))\n # self.users_D = np.array(self.UserItemNet.sum(axis=1)).squeeze()\n # self.users_D[self.users_D == 0.] = 1\n # self.items_D = np.array(self.UserItemNet.sum(axis=0)).squeeze()\n # self.items_D[self.items_D == 0.] = 1.\n # # pre-calculate\n # self._allPos = self.getUserPosItems(list(range(self.n_user)))\n # self.__testDict = self.__build_test()\n self._register_graph()\n \n print(\":%s is ready to go\"%(config.dataset))\n \n def _register_graph(self):\n self.getSparseGraph_mode_a2(\"graph\")\n \n\n \n @property\n def trainDataSize(self):\n return self.traindataSize\n \n @property\n def testDict(self):\n return self.__testDict\n\n @property\n def allPos(self):\n return self._allPos\n\n def _split_A_hat(self,A):\n A_fold = []\n fold_len = (self.m_users + self.n_items) // self.folds\n for i_fold in range(self.folds):\n start = i_fold*fold_len\n if i_fold == self.folds - 1:\n end = self.m_users + self.n_items\n else:\n end = (i_fold + 1) * fold_len\n A_fold.append(self._convert_sp_mat_to_sp_tensor(A[start:end]).coalesce().cuda())\n return A_fold\n\n def _convert_sp_mat_to_sp_tensor(self, X):\n coo = X.tocoo().astype(np.float32)\n row = torch.Tensor(coo.row).long()\n col = torch.Tensor(coo.col).long()\n index = torch.stack([row, col])\n data = torch.FloatTensor(coo.data)\n return torch.sparse_coo_tensor(index,data,torch.Size(coo.shape))\n \n\n \n def getSparseGraph_mode_a2(self,mode):\n pos_train = self.train[self.train['label']>0].values.copy()\n pos_train[:,1] += self.m_users\n self.trainUser = self.train['user'].values.squeeze()\n self.trainItem = self.train['item']\n print(\"loading adjacency matrix\")\n if self.Graph is None:\n try:\n pre_adj_mat = sp.load_npz(self.path + '/s_pre_adj_mat_'+mode+'.npz')\n print(\"successfully loaded...\")\n norm_adj = pre_adj_mat\n except :\n print(\"generating adjacency matrix\")\n s = time()\n pos_train_t = pos_train.copy()\n pos_train_t[:,0] = pos_train[:,1]\n pos_train_t[:,1] = pos_train[:,0]\n pos = np.concatenate([pos_train,pos_train_t],axis=0)\n\n adj_mat = sp.csr_matrix((pos[:,2], (pos[:,0],pos[:,1])), shape=(self.m_users+self.n_items, self.m_users+self.n_items))\n adj_mat = adj_mat.todok()\n rowsum = np.array(adj_mat.sum(axis=1))\n d_inv = np.power(rowsum, -0.5).flatten()\n d_inv[np.isinf(d_inv)] = 0.\n d_mat = sp.diags(d_inv)\n \n norm_adj = d_mat.dot(adj_mat)\n norm_adj = norm_adj.dot(d_mat)\n norm_adj = norm_adj.tocsr()\n end = time()\n print(f\"costing {end-s}s, saved norm_mat...\")\n sp.save_npz(self.path + '/s_pre_adj_mat_'+mode+'.npz', norm_adj)\n\n if self.split == True:\n self.Graph = self._split_A_hat(norm_adj)\n print(\"done split matrix\")\n else:\n self.Graph = self._convert_sp_mat_to_sp_tensor(norm_adj)\n self.Graph = self.Graph.coalesce().cuda()\n print(\"don't split the matrix\")\n return self.Graph\n\n\n\n\n def __build_test(self):\n \"\"\"\n return:\n dict: {user: [items]}\n \"\"\"\n test_data = {}\n for i, item in enumerate(self.testItem):\n user = self.testUser[i]\n if test_data.get(user):\n test_data[user].append(item)\n else:\n test_data[user] = [item]\n return test_data\n\n def getUserItemFeedback(self, users, items):\n \"\"\"\n users:\n shape [-1]\n items:\n shape [-1]\n return:\n feedback [-1]\n \"\"\"\n # print(self.UserItemNet[users, items])\n return np.array(self.UserItemNet[users, items]).astype('uint8').reshape((-1,))\n\n def getUserPosItems(self, users):\n posItems = []\n for user in users:\n posItems.append(self.UserItemNet[user].nonzero()[1])\n return posItems\n \n\n def generate_train_dataloader(self,batch_size=1024):\n '''\n generate minibatch data for full training and retrianing\n '''\n data = torch.from_numpy(self.train[['user','item','lables']].values)\n train_loader = torch.utils.data.DataLoader(data,shuffle=True,batch_size=batch_size,drop_last=False,num_workers=2)\n return train_loader" }, { "identifier": "setup_logger", "path": "minigpt4/common/logger.py", "snippet": "def setup_logger():\n logging.basicConfig(\n level=logging.INFO if dist_utils.is_main_process() else logging.WARN,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[logging.StreamHandler()],\n )" }, { "identifier": "LinearWarmupCosineLRScheduler", "path": "minigpt4/common/optims.py", "snippet": "class LinearWarmupCosineLRScheduler:\n def __init__(\n self,\n optimizer,\n max_epoch,\n iters_per_epoch,\n min_lr,\n init_lr,\n warmup_steps=0,\n warmup_start_lr=-1,\n **kwargs\n ):\n self.optimizer = optimizer\n\n self.max_epoch = max_epoch\n self.iters_per_epoch = iters_per_epoch\n self.min_lr = min_lr\n\n self.init_lr = init_lr\n self.warmup_steps = warmup_steps\n self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr\n\n def step(self, cur_epoch, cur_step):\n total_cur_step = cur_epoch * self.iters_per_epoch + cur_step\n if total_cur_step < self.warmup_steps:\n warmup_lr_schedule(\n step=cur_step,\n optimizer=self.optimizer,\n max_step=self.warmup_steps,\n init_lr=self.warmup_start_lr,\n max_lr=self.init_lr,\n )\n else:\n cosine_lr_schedule(\n epoch=total_cur_step,\n optimizer=self.optimizer,\n max_epoch=self.max_epoch * self.iters_per_epoch,\n init_lr=self.init_lr,\n min_lr=self.min_lr,\n )" }, { "identifier": "LinearWarmupStepLRScheduler", "path": "minigpt4/common/optims.py", "snippet": "class LinearWarmupStepLRScheduler:\n def __init__(\n self,\n optimizer,\n max_epoch,\n min_lr,\n init_lr,\n decay_rate=1,\n warmup_start_lr=-1,\n warmup_steps=0,\n iters_per_epoch=None,\n **kwargs\n ):\n self.optimizer = optimizer\n\n self.max_epoch = max_epoch\n self.min_lr = min_lr\n self.iters_per_epoch = iters_per_epoch\n\n self.decay_rate = decay_rate\n\n self.init_lr = init_lr\n self.warmup_steps = warmup_steps\n self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr\n\n def step(self, cur_epoch, cur_step):\n if cur_epoch == 0:\n warmup_lr_schedule(\n step=cur_step,\n optimizer=self.optimizer,\n max_step=self.warmup_steps,\n init_lr=self.warmup_start_lr,\n max_lr=self.init_lr,\n )\n else:\n step_lr_schedule(\n epoch=cur_epoch,\n optimizer=self.optimizer,\n init_lr=self.init_lr,\n min_lr=self.min_lr,\n decay_rate=self.decay_rate,\n )" }, { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "now", "path": "minigpt4/common/utils.py", "snippet": "def now():\n from datetime import datetime\n\n return datetime.now().strftime(\"%Y%m%d%H%M\")[:-1]" } ]
import argparse import os import random import numpy as np import torch import torch.backends.cudnn as cudnn import minigpt4.tasks as tasks import pandas as pd from minigpt4.common.config import Config from minigpt4.common.dist_utils import get_rank, init_distributed_mode from minigpt4.datasets.datasets.rec_gnndataset import GnnDataset from minigpt4.common.logger import setup_logger from minigpt4.common.optims import ( LinearWarmupCosineLRScheduler, LinearWarmupStepLRScheduler, ) from minigpt4.common.registry import registry from minigpt4.common.utils import now from minigpt4.datasets.builders import * from minigpt4.models import * from minigpt4.processors import * from minigpt4.runners import * from minigpt4.tasks import * from torch.distributed.elastic.multiprocessing.errors import *
5,111
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ # import os # os.environ['CURL_CA_BUNDLE'] = '' # os.environ["CUDA_VISIBLE_DEVICES"]="4" # imports modules for registration def parse_args(): parser = argparse.ArgumentParser(description="Training") # parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument("--cfg-path", default='train_configs/minigpt4rec_pretrain_sasrec_ood_cc.yaml', help="path to configuration file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() # if 'LOCAL_RANK' not in os.environ: # os.environ['LOCAL_RANK'] = str(args.local_rank) return args def setup_seeds(config): seed = config.run_cfg.seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True def get_runner_class(cfg): """ Get runner class from config. Default to epoch-based runner. """ runner_cls = registry.get_runner_class(cfg.run_cfg.get("runner", "rec_runner_base")) return runner_cls @record def main(): # allow auto-dl completes on main process without timeout when using NCCL backend. # os.environ["NCCL_BLOCKING_WAIT"] = "1" # set before init_distributed_mode() to ensure the same job_id shared across all ranks. job_id = now()
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ # import os # os.environ['CURL_CA_BUNDLE'] = '' # os.environ["CUDA_VISIBLE_DEVICES"]="4" # imports modules for registration def parse_args(): parser = argparse.ArgumentParser(description="Training") # parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument("--cfg-path", default='train_configs/minigpt4rec_pretrain_sasrec_ood_cc.yaml', help="path to configuration file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() # if 'LOCAL_RANK' not in os.environ: # os.environ['LOCAL_RANK'] = str(args.local_rank) return args def setup_seeds(config): seed = config.run_cfg.seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True def get_runner_class(cfg): """ Get runner class from config. Default to epoch-based runner. """ runner_cls = registry.get_runner_class(cfg.run_cfg.get("runner", "rec_runner_base")) return runner_cls @record def main(): # allow auto-dl completes on main process without timeout when using NCCL backend. # os.environ["NCCL_BLOCKING_WAIT"] = "1" # set before init_distributed_mode() to ensure the same job_id shared across all ranks. job_id = now()
cfg = Config(parse_args())
0
2023-10-29 12:47:25+00:00
8k
tobagin/whakarere
whakarere/managers/whatsapp.py
[ { "identifier": "UnknownContact", "path": "whakarere/images/unknown_contact.py", "snippet": "class UnknownContact:\n base64image = \"PCFET0NUWVBFIHN2ZyBQVUJMSUMgIi0vL1czQy8vRFREIFNWRyAxLjEvL0VOIiAiaHR0cDovL3d3dy53My5vcmcvR3JhcGhpY3MvU1ZHLzEuMS9EVEQvc3ZnMTEuZHRkIj4KPCEtLSBVcGxvYWRlZCB0bzogU1ZHIFJlcG8sIHd3dy5zdmdyZXBvLmNvbSwgVHJhbnNmb3JtZWQgYnk6IFNWRyBSZXBvIE1peGVyIFRvb2xzIC0tPgo8c3ZnIHdpZHRoPSIyNTZweCIgaGVpZ2h0PSIyNTZweCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciPgoNPGcgaWQ9IlNWR1JlcG9fYmdDYXJyaWVyIiBzdHJva2Utd2lkdGg9IjAiLz4KDTxnIGlkPSJTVkdSZXBvX3RyYWNlckNhcnJpZXIiIHN0cm9rZS1saW5lY2FwPSJyb3VuZCIgc3Ryb2tlLWxpbmVqb2luPSJyb3VuZCIvPgoNPGcgaWQ9IlNWR1JlcG9faWNvbkNhcnJpZXIiPiA8cGF0aCBkPSJNMTIgMkM2LjQ3NzE1IDIgMiA2LjQ3NzE1IDIgMTJDMiAxNy41MjI4IDYuNDc3MTUgMjIgMTIgMjJDMTcuNTIyOCAyMiAyMiAxNy41MjI4IDIyIDEyQzIyIDYuNDc3MTUgMTcuNTIyOCAyIDEyIDJaIiBmaWxsPSIjNDI5NkZGIi8+IDxwYXRoIGQ9Ik0xMi4wMDAxIDZDMTAuMzQzMyA2IDkuMDAwMTIgNy4zNDMxNSA5LjAwMDEyIDlDOS4wMDAxMiAxMC42NTY5IDEwLjM0MzMgMTIgMTIuMDAwMSAxMkMxMy42NTcgMTIgMTUuMDAwMSAxMC42NTY5IDE1LjAwMDEgOUMxNS4wMDAxIDcuMzQzMTUgMTMuNjU3IDYgMTIuMDAwMSA2WiIgZmlsbD0iIzE1MkM3MCIvPiA8cGF0aCBkPSJNMTcuODk0OCAxNi41NTI4QzE4LjAzNTYgMTYuODM0MyAxOC4wMzU2IDE3LjE2NTcgMTcuODk0OCAxNy40NDczQzE3LjkwMzMgMTcuNDI5NyAxNy44OTQxIDE3LjQ0ODcgMTcuODk0MSAxNy40NDg3TDE3Ljg5MzMgMTcuNDUwMkwxNy44OTE4IDE3LjQ1MzJMMTcuODg4MyAxNy40NkwxNy44ODAxIDE3LjQ3NTZDMTcuODc0IDE3LjQ4NzEgMTcuODY2NyAxNy41MDA0IDE3Ljg1ODIgMTcuNTE1NUMxNy44NDEgMTcuNTQ1OCAxNy44MTg3IDE3LjU4MzIgMTcuNzkwNyAxNy42MjY3QzE3LjczNDggMTcuNzEzOCAxNy42NTU5IDE3LjgyNTQgMTcuNTQ5OCAxNy45NTI3QzE3LjMzNyAxOC4yMDggMTcuMDE2NCAxOC41MjQ1IDE2LjU1NSAxOC44MzIxQzE1LjYyMyAxOS40NTM0IDE0LjE3NTIgMjAgMTIuMDAwMiAyMEM4LjMxNTA3IDIwIDYuNzY1NjIgMTguNDMwNCA2LjI2NjY1IDE3LjcxMTVDNS45NjQ3NiAxNy4yNzY1IDUuOTk4MTkgMTYuNzY4MyA2LjE4MDc5IDE2LjQwMzFDNi45MTcxOCAxNC45MzAzIDguNDIyNDcgMTQgMTAuMDY5MSAxNEgxMy43NjQzQzE1LjUxMzUgMTQgMTcuMTEyNSAxNC45ODgzIDE3Ljg5NDggMTYuNTUyOFoiIGZpbGw9IiMxNTJDNzAiLz4gPC9nPgoNPC9zdmc+\"" }, { "identifier": "WhatsappMessengerPage", "path": "whakarere/pages/whatsapp.py", "snippet": "class WhatsappMessengerPage(Adw.NavigationPage):\n def __init__(self, app_manager, session_id):\n super().__init__()\n self.set_title(\"Whakarere\")\n self.app_manager = app_manager\n self.session_id = session_id\n\n # Create TitleBar Widget\n self.window_titlebar_widget = WindowTitlebarWidget()\n self.window_titlebar_widget.set_title(\"Whakarere\")\n self.window_titlebar_widget.set_subtitle(f\"Current Session: {self.app_manager.whatsapp_manager.get_user_name(self.session_id)}\")\n self.set_can_pop(True)\n\n # Create Main Menu Button Widget\n self.button_settings_menu = MainMenuButtonWidget()\n\n # Create HeaderBar\n self.page_headerbar = Adw.HeaderBar()\n self.page_headerbar.set_title_widget(self.window_titlebar_widget)\n self.page_headerbar.pack_end(self.button_settings_menu)\n\n # Create Chat List \n self.chat_list = Gio.ListStore(item_type=ChatItem)\n\n self.check_session_status_thread = threading.Thread(target=self.load_chats,)\n self.check_session_status_thread.start()\n\n # Factory function for creating list items\n factory = Gtk.SignalListItemFactory.new()\n factory.connect('bind', self.bind_function)\n\n # Create SingleSelection\n self.selected_item = None\n self.selected_item_position = None\n self.selection_model = Gtk.SingleSelection.new(self.chat_list)\n self.selection_model.connect(\"selection-changed\", self.on_selection_changed)\n\n self.chat_list.connect(\"items-changed\", self.on_items_changed)\n\n # Create ListView\n self.list_view = Gtk.ListView.new(self.selection_model, factory)\n\n # Create ScrolledWindow\n scrolled_window = Gtk.ScrolledWindow()\n scrolled_window.set_vexpand(True)\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_child(self.list_view) # Set ListView as child of ScrolledWindow\n\n # Create Sidebar for SplitView\n self.sidebar = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.sidebar.set_vexpand(True)\n self.sidebar.append(scrolled_window)\n\n # Create Main Content\n self.content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n\n # Create SplitView\n self.split_view = Adw.OverlaySplitView()\n self.split_view.set_hexpand(True)\n self.split_view.set_vexpand(True)\n self.split_view.set_valign(Gtk.Align.FILL)\n self.split_view.set_halign(Gtk.Align.FILL)\n self.split_view.set_sidebar(self.sidebar)\n self.split_view.set_content(self.content)\n self.split_view.set_min_sidebar_width(270)\n\n # Create page content\n self.page_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.page_content.append(self.page_headerbar)\n self.page_content.append(self.split_view)\n\n # Set page content\n self.set_child(self.page_content)\n\n def load_chats(self):\n chats = self.app_manager.whatsapp_manager.get_chats_by_id(self.session_id)\n for chat in chats:\n if chat['id']['server'] == 'broadcast':\n continue\n chat_id = chat[\"id\"][\"_serialized\"]\n try:\n if(chat['name']):\n chat_name = chat[\"name\"]\n except:\n chat_name = \"Unknown\"\n chat_picture = self.app_manager.whatsapp_manager.get_chat_avatar(chat_id)\n\n if chat['lastMessage']['_data']['type'] == 'chat':\n last_message_body = chat['lastMessage']['_data']['body']\n elif chat['lastMessage']['_data']['type'] == 'call_log' and chat['lastMessage']['_data']['subtype'] == 'miss_video':\n last_message_body = '📵 Missed Video Call'\n elif chat['lastMessage']['_data']['type'] == 'call_log' and chat['lastMessage']['_data']['subtype'] == 'miss_audio':\n last_message_body = '📵 Missed Audio Call'\n elif chat['lastMessage']['_data']['type'] == 'call_log' and chat['lastMessage']['_data']['subtype'] == 'video':\n last_message_body = '📹 Video Call'\n elif chat['lastMessage']['_data']['type'] == 'call_log' and chat['lastMessage']['_data']['subtype'] == 'audio':\n last_message_body = '📞 Audio Call'\n elif chat['lastMessage']['_data']['type'] == 'image':\n last_message_body = '🖼️ Image'\n elif chat['lastMessage']['_data']['type'] == 'document':\n last_message_body = '📄 Document'\n elif chat['lastMessage']['_data']['type'] == 'sticker':\n last_message_body = '🤪 Sticker'\n elif chat['lastMessage']['_data']['type'] == 'ptt':\n last_message_body = '🎤 Voice Message'\n elif chat['lastMessage']['_data']['type'] == 'location':\n last_message_body = '📍 Location'\n elif chat['lastMessage']['_data']['type'] == 'vcard':\n last_message_body = '👤 Contact'\n else:\n last_message_body = '🤔 Unknown Message'\n\n is_group = chat[\"isGroup\"]\n\n chat_timestamp = chat[\"timestamp\"]\n if chat['lastMessage']['_data']['hasReaction']:\n if chat['lastMessage']['_data']['id']['fromMe']:\n last_messager_user = self.app_manager.whatsapp_manager.get_user_name(self.session_id)\n else:\n last_messager_user = self.app_manager.whatsapp_manager.get_contact_info(chat['lastMessage']['_data']['id']['participant']['_serialized'], self.session_id)\n else:\n if is_group:\n last_messager_user = self.app_manager.whatsapp_manager.get_contact_info(chat['lastMessage']['_data']['id']['participant']['_serialized'], self.session_id)\n else:\n if chat['lastMessage']['_data']['id']['fromMe']:\n last_messager_user = self.app_manager.whatsapp_manager.get_user_name(self.session_id)\n else:\n last_messager_user = self.app_manager.whatsapp_manager.get_contact_info(chat['lastMessage']['_data']['id']['_serialized'], self.session_id)\n unread_messages = chat[\"unreadCount\"]\n chat_item = ChatItem(chat_id, chat_name, chat_picture, last_message_body, chat_timestamp, last_messager_user, unread_messages, is_group)\n self.chat_list.append(chat_item)\n\n\n def on_items_changed(self, list_store, position, removed, added):\n # Update Chat window\n # print(\"items changed redo list_view\")\n pass\n\n def on_selection_changed(self, selection_model, positon, n_items):\n # Updating selection\n self.selected_item_position = selection_model.get_selected()\n self.selected_item = selection_model.get_selected_item()\n \n # Update Chat Window\n # print(\"new item selected, update chat window\")\n pass\n\n def bind_function(self, factory, list_item):\n model = list_item.get_item()\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)\n avatar = Adw.Avatar()\n avatar.set_size(50)\n avatar.set_margin_start(5)\n avatar.set_halign(Gtk.Align.START)\n avatar.set_custom_image(model.chat_picture)\n hbox.append(avatar)\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)\n vbox.set_halign(Gtk.Align.START)\n vbox.set_valign(Gtk.Align.CENTER)\n vbox.set_hexpand(True)\n label_name = Gtk.Label()\n label_name.set_use_markup(True)\n\n # Escape any markup-sensitive characters\n escaped_name = GLib.markup_escape_text(model.chat_name[:20])\n\n # Set label text with markup for font size\n label_name.set_markup(\"<span font='9'><b>\" + escaped_name + \"</b></span>\")\n \n label_name.set_halign(Gtk.Align.START)\n label_name.set_valign(Gtk.Align.CENTER)\n label_name.set_hexpand(True)\n label_name.set_vexpand(True)\n label_last_message = Gtk.Label()\n\n # Escape any markup-sensitive characters\n escaped_last = GLib.markup_escape_text(model.last_message_body[:50])\n\n # Set label text with markup for font size\n label_last_message.set_markup(\"<span font='8'>\" + escaped_last + \"</span>\")\n\n label_last_message.set_justify(True)\n label_last_message.set_use_markup(True)\n label_last_message.set_halign(Gtk.Align.START)\n label_last_message.set_valign(Gtk.Align.CENTER)\n label_last_message.set_hexpand(True)\n label_last_message.set_vexpand(True)\n\n # Set label properties for wrapping and font size\n label_last_message.set_wrap(True)\n label_last_message.set_wrap_mode(Pango.WrapMode.WORD)\n label_last_message.set_lines(2)\n label_last_message.set_max_width_chars(50) # Adjust the value as needed\n\n vbox.append(label_name)\n vbox.append(label_last_message)\n hbox.append(vbox)\n vbox_end = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)\n vbox_end.set_halign(Gtk.Align.END)\n vbox_end.set_valign(Gtk.Align.CENTER)\n\n now = datetime.now()\n timestamp = datetime.fromtimestamp(int(model.chat_timestamp))\n time_difference = now - timestamp\n days = time_difference.days\n hours = time_difference.seconds // 3600\n minutes = (time_difference.seconds // 60) % 60\n seconds = time_difference.seconds % 60\n label_timestamp = Gtk.Label()\n label_timestamp.set_use_markup(True)\n\n if days > 0:\n escaped_timestamp = timestamp.strftime('%d')\n else:\n escaped_timestamp = timestamp.strftime('%H:%M')\n\n # Set label text with markup for font size\n label_timestamp.set_markup(\"<span font='6'>\" + escaped_timestamp + \"</span>\")\n\n label_timestamp.set_halign(Gtk.Align.END) \n label_timestamp.set_valign(Gtk.Align.CENTER)\n label_timestamp.set_margin_top(5)\n label_timestamp.set_margin_end(10)\n label_timestamp.set_hexpand(True)\n label_timestamp.set_vexpand(True)\n chat_menu = Gtk.MenuButton()\n chat_menu.set_icon_name(\"go-down-symbolic\")\n chat_menu.set_halign(Gtk.Align.START)\n chat_menu.set_valign(Gtk.Align.END)\n chat_menu.set_has_frame(False)\n chat_menu.set_direction(Gtk.ArrowType.DOWN)\n chat_menu.set_popover(Gtk.Popover())\n chat_menu.get_popover().set_position(Gtk.PositionType.BOTTOM)\n chat_menu.get_popover().set_has_arrow(True)\n chat_menu.get_popover().set_halign(Gtk.Align.START)\n chat_menu.get_popover().set_valign(Gtk.Align.END)\n chat_menu.get_popover().set_size_request(200, 200)\n chat_menu_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n chat_menu_box.vexpand = True\n label_archive_chat = Gtk.Label(label=\"Archive Chat\")\n label_archive_chat.set_vexpand(True)\n label_archive_chat.set_hexpand(True)\n chat_menu_box.append(label_archive_chat)\n label_mute_notifications = Gtk.Label(label=\"Mute Notifications\")\n label_mute_notifications.set_vexpand(True)\n label_mute_notifications.set_hexpand(True)\n chat_menu_box.append(label_mute_notifications)\n label_exit_group = Gtk.Label(label=\"Exit Group\")\n label_exit_group.set_vexpand(True)\n label_exit_group.set_hexpand(True)\n chat_menu_box.append(label_exit_group)\n label_pin_chat = Gtk.Label(label=\"Pin Chat\")\n label_pin_chat.set_vexpand(True)\n label_pin_chat.set_hexpand(True)\n chat_menu_box.append(label_pin_chat)\n label_mark_as_read = Gtk.Label(label=\"Mark as read\")\n label_mark_as_read.set_vexpand(True)\n label_mark_as_read.set_hexpand(True)\n chat_menu_box.append(label_mark_as_read)\n chat_menu.get_popover().set_child(chat_menu_box)\n vbox_end.append(label_timestamp)\n vbox_end.append(chat_menu)\n hbox.append(vbox_end)\n list_item.set_child(hbox)" } ]
import gi, sqlite3, os, threading, requests, base64 from gi.repository import Gdk, GdkPixbuf, Gio, GLib from whakarere.images.unknown_contact import UnknownContact from whakarere.pages.whatsapp import WhatsappMessengerPage
5,003
gi.require_version("Gtk", "4.0") gi.require_version("Adw", "1") gi.require_version("GdkPixbuf", "2.0") gi.require_version("Gdk", "4.0") class WhatsAppSessionManager: def __init__(self, window): self.window = window api_key = "your_global_api_key_here" self.api_url = "http://localhost:3000" self.headers = { 'x-api-key': api_key } self.whatsapp_messenger_pages = {} self.chats = {} # Changed to a dictionary to map session IDs to chats self.chats_avatar = {} # Presumably for future functionality self.databases = {} # Changed to a dictionary to map session IDs to databases self.chat_messages = {} # Presumably for future functionality self.number = 0 def load_or_create_databases(self): db_directory = os.path.expanduser("~/.config/whakarere/dbs") # Ensure the database directory exists if not os.path.exists(db_directory): os.makedirs(db_directory) for session_id in self.window.session_manager.session_ids: db_file = f"{session_id}.db" db_path = os.path.join(db_directory, db_file) # Connect to the SQLite database conn = sqlite3.connect(db_path) cursor = conn.cursor() # Store the connection in the dictionary self.databases[session_id] = conn # Close the cursor cursor.close() def initialize(self): sessions_thread = threading.Thread(target=self.initialize_sessions) sessions_thread.start() def initialize_sessions(self): for session in self.window.session_manager.session_ids: if self.window.session_manager.check_session_status(session): result = self.get_chats(session) # Fixed assignment self.chats[session] = result # Store chats indexed by session ID for chat in result: chat_id = chat["id"]["_serialized"] if chat["isGroup"]: print(chat_id) try: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session) except: trimmed_chat_id = chat_id[-15:] print(trimmed_chat_id) self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session) else: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session) self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session) self.window.whatsapp_manager.add_whatsapp_messenger_page(session) def initialize_session_by_id(self, session_id): if self.window.session_manager.check_session_status(session_id): result = self.get_chats(session_id) # Fixed assignment self.chats[session_id] = result # Store chats indexed by session ID for chat in result: chat_id = chat["id"]["_serialized"] if chat["isGroup"]: print(chat_id) try: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id) except: trimmed_chat_id = chat_id[-15:] print(trimmed_chat_id) self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session_id) else: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id) self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session_id) if session_id not in self.whatsapp_sessions_pages:
gi.require_version("Gtk", "4.0") gi.require_version("Adw", "1") gi.require_version("GdkPixbuf", "2.0") gi.require_version("Gdk", "4.0") class WhatsAppSessionManager: def __init__(self, window): self.window = window api_key = "your_global_api_key_here" self.api_url = "http://localhost:3000" self.headers = { 'x-api-key': api_key } self.whatsapp_messenger_pages = {} self.chats = {} # Changed to a dictionary to map session IDs to chats self.chats_avatar = {} # Presumably for future functionality self.databases = {} # Changed to a dictionary to map session IDs to databases self.chat_messages = {} # Presumably for future functionality self.number = 0 def load_or_create_databases(self): db_directory = os.path.expanduser("~/.config/whakarere/dbs") # Ensure the database directory exists if not os.path.exists(db_directory): os.makedirs(db_directory) for session_id in self.window.session_manager.session_ids: db_file = f"{session_id}.db" db_path = os.path.join(db_directory, db_file) # Connect to the SQLite database conn = sqlite3.connect(db_path) cursor = conn.cursor() # Store the connection in the dictionary self.databases[session_id] = conn # Close the cursor cursor.close() def initialize(self): sessions_thread = threading.Thread(target=self.initialize_sessions) sessions_thread.start() def initialize_sessions(self): for session in self.window.session_manager.session_ids: if self.window.session_manager.check_session_status(session): result = self.get_chats(session) # Fixed assignment self.chats[session] = result # Store chats indexed by session ID for chat in result: chat_id = chat["id"]["_serialized"] if chat["isGroup"]: print(chat_id) try: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session) except: trimmed_chat_id = chat_id[-15:] print(trimmed_chat_id) self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session) else: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session) self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session) self.window.whatsapp_manager.add_whatsapp_messenger_page(session) def initialize_session_by_id(self, session_id): if self.window.session_manager.check_session_status(session_id): result = self.get_chats(session_id) # Fixed assignment self.chats[session_id] = result # Store chats indexed by session ID for chat in result: chat_id = chat["id"]["_serialized"] if chat["isGroup"]: print(chat_id) try: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id) except: trimmed_chat_id = chat_id[-15:] print(trimmed_chat_id) self.chats[trimmed_chat_id] = self.chat_fetch_messages(trimmed_chat_id, session_id) else: self.chat_messages[chat_id] = self.chat_fetch_messages(chat_id, session_id) self.chats_avatar[chat_id] = self.get_user_profile_picture(chat_id, session_id) if session_id not in self.whatsapp_sessions_pages:
self.whatsapp_sessions_pages[session_id] = WhatsappMessengerPage(self, session_id)
1
2023-10-29 15:46:50+00:00
8k
KHU-VLL/CAST
util_tools/video_transforms.py
[ { "identifier": "rand_augment_transform", "path": "util_tools/rand_augment.py", "snippet": "def rand_augment_transform(config_str, hparams):\n \"\"\"\n RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719\n\n Create a RandAugment transform\n :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by\n dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining\n sections, not order sepecific determine\n 'm' - integer magnitude of rand augment\n 'n' - integer num layers (number of transform ops selected per image)\n 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)\n 'mstd' - float std deviation of magnitude noise applied\n 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)\n Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5\n 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2\n :param hparams: Other hparams (kwargs) for the RandAugmentation scheme\n :return: A PyTorch compatible Transform\n \"\"\"\n magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)\n num_layers = 2 # default to 2 ops per image\n weight_idx = None # default to no probability weights for op choice\n transforms = _RAND_TRANSFORMS\n config = config_str.split(\"-\")\n assert config[0] == \"rand\"\n config = config[1:]\n for c in config:\n cs = re.split(r\"(\\d.*)\", c)\n if len(cs) < 2:\n continue\n key, val = cs[:2]\n if key == \"mstd\":\n # noise param injected via hparams for now\n hparams.setdefault(\"magnitude_std\", float(val))\n elif key == \"inc\":\n if bool(val):\n transforms = _RAND_INCREASING_TRANSFORMS\n elif key == \"m\":\n magnitude = int(val)\n elif key == \"n\":\n num_layers = int(val)\n elif key == \"w\":\n weight_idx = int(val)\n else:\n assert NotImplementedError\n ra_ops = rand_augment_ops(\n magnitude=magnitude, hparams=hparams, transforms=transforms\n )\n choice_weights = (\n None if weight_idx is None else _select_rand_weights(weight_idx)\n )\n return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)" }, { "identifier": "RandomErasing", "path": "util_tools/random_erasing.py", "snippet": "class RandomErasing:\n \"\"\"Randomly selects a rectangle region in an image and erases its pixels.\n 'Random Erasing Data Augmentation' by Zhong et al.\n See https://arxiv.org/pdf/1708.04896.pdf\n This variant of RandomErasing is intended to be applied to either a batch\n or single image tensor after it has been normalized by dataset mean and std.\n Args:\n probability: Probability that the Random Erasing operation will be performed.\n min_area: Minimum percentage of erased area wrt input image area.\n max_area: Maximum percentage of erased area wrt input image area.\n min_aspect: Minimum aspect ratio of erased area.\n mode: pixel color mode, one of 'const', 'rand', or 'pixel'\n 'const' - erase block is constant color of 0 for all channels\n 'rand' - erase block is same per-channel random (normal) color\n 'pixel' - erase block is per-pixel random (normal) color\n max_count: maximum number of erasing blocks per image, area per box is scaled by count.\n per-image count is randomly chosen between 1 and this value.\n \"\"\"\n\n def __init__(\n self,\n probability=0.5,\n min_area=0.02,\n max_area=1 / 3,\n min_aspect=0.3,\n max_aspect=None,\n mode=\"const\",\n min_count=1,\n max_count=None,\n num_splits=0,\n device=\"cuda\",\n cube=True,\n ):\n self.probability = probability\n self.min_area = min_area\n self.max_area = max_area\n max_aspect = max_aspect or 1 / min_aspect\n self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))\n self.min_count = min_count\n self.max_count = max_count or min_count\n self.num_splits = num_splits\n mode = mode.lower()\n self.rand_color = False\n self.per_pixel = False\n self.cube = cube\n if mode == \"rand\":\n self.rand_color = True # per block random normal\n elif mode == \"pixel\":\n self.per_pixel = True # per pixel random normal\n else:\n assert not mode or mode == \"const\"\n self.device = device\n\n def _erase(self, img, chan, img_h, img_w, dtype):\n if random.random() > self.probability:\n return\n area = img_h * img_w\n count = (\n self.min_count\n if self.min_count == self.max_count\n else random.randint(self.min_count, self.max_count)\n )\n for _ in range(count):\n for _ in range(10):\n target_area = (\n random.uniform(self.min_area, self.max_area) * area / count\n )\n aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if w < img_w and h < img_h:\n top = random.randint(0, img_h - h)\n left = random.randint(0, img_w - w)\n img[:, top : top + h, left : left + w] = _get_pixels(\n self.per_pixel,\n self.rand_color,\n (chan, h, w),\n dtype=dtype,\n device=self.device,\n )\n break\n\n def _erase_cube(\n self,\n img,\n batch_start,\n batch_size,\n chan,\n img_h,\n img_w,\n dtype,\n ):\n if random.random() > self.probability:\n return\n area = img_h * img_w\n count = (\n self.min_count\n if self.min_count == self.max_count\n else random.randint(self.min_count, self.max_count)\n )\n for _ in range(count):\n for _ in range(100):\n target_area = (\n random.uniform(self.min_area, self.max_area) * area / count\n )\n aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if w < img_w and h < img_h:\n top = random.randint(0, img_h - h)\n left = random.randint(0, img_w - w)\n for i in range(batch_start, batch_size):\n img_instance = img[i]\n img_instance[\n :, top : top + h, left : left + w\n ] = _get_pixels(\n self.per_pixel,\n self.rand_color,\n (chan, h, w),\n dtype=dtype,\n device=self.device,\n )\n break\n\n def __call__(self, input):\n if len(input.size()) == 3:\n self._erase(input, *input.size(), input.dtype)\n else:\n batch_size, chan, img_h, img_w = input.size()\n # skip first slice of batch if num_splits is set (for clean portion of samples)\n batch_start = (\n batch_size // self.num_splits if self.num_splits > 1 else 0\n )\n if self.cube:\n self._erase_cube(\n input,\n batch_start,\n batch_size,\n chan,\n img_h,\n img_w,\n input.dtype,\n )\n else:\n for i in range(batch_start, batch_size):\n self._erase(input[i], chan, img_h, img_w, input.dtype)\n return input" } ]
import math import numpy as np import random import torch import torchvision.transforms.functional as F import numbers import PIL import torchvision import skimage from PIL import Image from torchvision import transforms from .rand_augment import rand_augment_transform from .random_erasing import RandomErasing from . import functional as FF
4,727
else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w def __call__(self, img): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Randomly cropped and resized image. """ i, j, h, w = self.get_params(img, self.scale, self.ratio) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation return F.resized_crop(img, i, j, h, w, self.size, interpolation) def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = " ".join( [_pil_interpolation_to_str[x] for x in self.interpolation] ) else: interpolate_str = _pil_interpolation_to_str[self.interpolation] format_string = self.__class__.__name__ + "(size={0}".format(self.size) format_string += ", scale={0}".format( tuple(round(s, 4) for s in self.scale) ) format_string += ", ratio={0}".format( tuple(round(r, 4) for r in self.ratio) ) format_string += ", interpolation={0})".format(interpolate_str) return format_string def transforms_imagenet_train( img_size=224, scale=None, ratio=None, hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment=None, interpolation="random", use_prefetcher=False, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), re_prob=0.0, re_mode="const", re_count=1, re_num_splits=0, separate=False, ): """ If separate==True, the transforms are returned as a tuple of 3 separate transforms for use in a mixing dataset that passes * all data through the first (primary) transform, called the 'clean' data * a portion of the data through the secondary transform * normalizes and converts the branches above with the third, final transform """ if isinstance(img_size, tuple): img_size = img_size[-2:] else: img_size = img_size scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range ratio = tuple( ratio or (3.0 / 4.0, 4.0 / 3.0) ) # default imagenet ratio range primary_tfl = [ RandomResizedCropAndInterpolation( img_size, scale=scale, ratio=ratio, interpolation=interpolation ) ] if hflip > 0.0: primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] if vflip > 0.0: primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] secondary_tfl = [] if auto_augment: assert isinstance(auto_augment, str) if isinstance(img_size, tuple): img_size_min = min(img_size) else: img_size_min = img_size aa_params = dict( translate_const=int(img_size_min * 0.45), img_mean=tuple([min(255, round(255 * x)) for x in mean]), ) if interpolation and interpolation != "random": aa_params["interpolation"] = _pil_interp(interpolation) if auto_augment.startswith("rand"): secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] elif auto_augment.startswith("augmix"): raise NotImplementedError("Augmix not implemented") else: raise NotImplementedError("Auto aug not implemented") elif color_jitter is not None: # color jitter is enabled when not using AA if isinstance(color_jitter, (list, tuple)): # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation # or 4 if also augmenting hue assert len(color_jitter) in (3, 4) else: # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue color_jitter = (float(color_jitter),) * 3 secondary_tfl += [transforms.ColorJitter(*color_jitter)] final_tfl = [] final_tfl += [ transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), ] if re_prob > 0.0: final_tfl.append(
#!/usr/bin/env python3 _pil_interpolation_to_str = { Image.NEAREST: "PIL.Image.NEAREST", Image.BILINEAR: "PIL.Image.BILINEAR", Image.BICUBIC: "PIL.Image.BICUBIC", Image.LANCZOS: "PIL.Image.LANCZOS", Image.HAMMING: "PIL.Image.HAMMING", Image.BOX: "PIL.Image.BOX", } _RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) def _pil_interp(method): if method == "bicubic": return Image.BICUBIC elif method == "lanczos": return Image.LANCZOS elif method == "hamming": return Image.HAMMING else: return Image.BILINEAR def random_short_side_scale_jitter( images, min_size, max_size, boxes=None, inverse_uniform_sampling=False ): """ Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (tensor): images to perform scale jitter. Dimension is `num frames` x `channel` x `height` x `width`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (ndarray): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: (tensor): the scaled images with dimension of `num frames` x `channel` x `new height` x `new width`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4. """ if inverse_uniform_sampling: size = int( round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) ) else: size = int(round(np.random.uniform(min_size, max_size))) height = images.shape[2] width = images.shape[3] if (width <= height and width == size) or ( height <= width and height == size ): return images, boxes new_width = size new_height = size if width < height: new_height = int(math.floor((float(height) / width) * size)) if boxes is not None: boxes = boxes * float(new_height) / height else: new_width = int(math.floor((float(width) / height) * size)) if boxes is not None: boxes = boxes * float(new_width) / width return ( torch.nn.functional.interpolate( images, size=(new_height, new_width), mode="bilinear", align_corners=False, ), boxes, ) def crop_boxes(boxes, x_offset, y_offset): """ Peform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to peform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ cropped_boxes = boxes.copy() cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return cropped_boxes def random_crop(images, size, boxes=None): """ Perform random spatial crop on the given images and corresponding boxes. Args: images (tensor): images to perform random crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): the size of height and width to crop on the image. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): cropped images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ if images.shape[2] == size and images.shape[3] == size: return images height = images.shape[2] width = images.shape[3] y_offset = 0 if height > size: y_offset = int(np.random.randint(0, height - size)) x_offset = 0 if width > size: x_offset = int(np.random.randint(0, width - size)) cropped = images[ :, :, y_offset : y_offset + size, x_offset : x_offset + size ] cropped_boxes = ( crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None ) return cropped, cropped_boxes def horizontal_flip(prob, images, boxes=None): """ Perform horizontal flip on the given images and corresponding boxes. Args: prob (float): probility to flip the images. images (tensor): images to perform horizontal flip, the dimension is `num frames` x `channel` x `height` x `width`. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: images (tensor): images with dimension of `num frames` x `channel` x `height` x `width`. flipped_boxes (ndarray or None): the flipped boxes with dimension of `num boxes` x 4. """ if boxes is None: flipped_boxes = None else: flipped_boxes = boxes.copy() if np.random.uniform() < prob: images = images.flip((-1)) if len(images.shape) == 3: width = images.shape[2] elif len(images.shape) == 4: width = images.shape[3] else: raise NotImplementedError("Dimension does not supported") if boxes is not None: flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 return images, flipped_boxes def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] ndim = len(images.shape) if ndim == 3: images = images.unsqueeze(0) height = images.shape[2] width = images.shape[3] if scale_size is not None: if width <= height: width, height = scale_size, int(height / width * scale_size) else: width, height = int(width / height * scale_size), scale_size images = torch.nn.functional.interpolate( images, size=(height, width), mode="bilinear", align_corners=False, ) y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 2: y_offset = height - size else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 2: x_offset = width - size cropped = images[ :, :, y_offset : y_offset + size, x_offset : x_offset + size ] cropped_boxes = ( crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None ) if ndim == 3: cropped = cropped.squeeze(0) return cropped, cropped_boxes def clip_boxes_to_image(boxes, height, width): """ Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4. """ clipped_boxes = boxes.copy() clipped_boxes[:, [0, 2]] = np.minimum( width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) ) clipped_boxes[:, [1, 3]] = np.minimum( height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) ) return clipped_boxes def blend(images1, images2, alpha): """ Blend two images with a given weight alpha. Args: images1 (tensor): the first images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. images2 (tensor): the second images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. alpha (float): the blending weight. Returns: (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`. """ return images1 * alpha + images2 * (1 - alpha) def grayscale(images): """ Get the grayscale for the input images. The channels of images should be in order BGR. Args: images (tensor): the input images for getting grayscale. Dimension is `num frames` x `channel` x `height` x `width`. Returns: img_gray (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`. """ # R -> 0.299, G -> 0.587, B -> 0.114. img_gray = torch.tensor(images) gray_channel = ( 0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0] ) img_gray[:, 0] = gray_channel img_gray[:, 1] = gray_channel img_gray[:, 2] = gray_channel return img_gray def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): """ Perfrom a color jittering on the input images. The channels of images should be in order BGR. Args: images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ jitter = [] if img_brightness != 0: jitter.append("brightness") if img_contrast != 0: jitter.append("contrast") if img_saturation != 0: jitter.append("saturation") if len(jitter) > 0: order = np.random.permutation(np.arange(len(jitter))) for idx in range(0, len(jitter)): if jitter[order[idx]] == "brightness": images = brightness_jitter(img_brightness, images) elif jitter[order[idx]] == "contrast": images = contrast_jitter(img_contrast, images) elif jitter[order[idx]] == "saturation": images = saturation_jitter(img_saturation, images) return images def brightness_jitter(var, images): """ Perfrom brightness jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for brightness. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_bright = torch.zeros(images.shape) images = blend(images, img_bright, alpha) return images def contrast_jitter(var, images): """ Perfrom contrast jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for contrast. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_gray = grayscale(images) img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) images = blend(images, img_gray, alpha) return images def saturation_jitter(var, images): """ Perfrom saturation jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for saturation. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_gray = grayscale(images) images = blend(images, img_gray, alpha) return images def lighting_jitter(images, alphastd, eigval, eigvec): """ Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ if alphastd == 0: return images # generate alpha1, alpha2, alpha3. alpha = np.random.normal(0, alphastd, size=(1, 3)) eig_vec = np.array(eigvec) eig_val = np.reshape(eigval, (1, 3)) rgb = np.sum( eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), axis=1, ) out_images = torch.zeros_like(images) if len(images.shape) == 3: # C H W channel_dim = 0 elif len(images.shape) == 4: # T C H W channel_dim = 1 else: raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") for idx in range(images.shape[channel_dim]): # C H W if len(images.shape) == 3: out_images[idx] = images[idx] + rgb[2 - idx] # T C H W elif len(images.shape) == 4: out_images[:, idx] = images[:, idx] + rgb[2 - idx] else: raise NotImplementedError( f"Unsupported dimension {len(images.shape)}" ) return out_images def color_normalization(images, mean, stddev): """ Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`. """ if len(images.shape) == 3: assert ( len(mean) == images.shape[0] ), "channel mean not computed properly" assert ( len(stddev) == images.shape[0] ), "channel stddev not computed properly" elif len(images.shape) == 4: assert ( len(mean) == images.shape[1] ), "channel mean not computed properly" assert ( len(stddev) == images.shape[1] ), "channel stddev not computed properly" else: raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") out_images = torch.zeros_like(images) for idx in range(len(mean)): # C H W if len(images.shape) == 3: out_images[idx] = (images[idx] - mean[idx]) / stddev[idx] elif len(images.shape) == 4: out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] else: raise NotImplementedError( f"Unsupported dimension {len(images.shape)}" ) return out_images def _get_param_spatial_crop( scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False ): """ Given scale, ratio, height and width, return sampled coordinates of the videos. """ for _ in range(num_repeat): area = height * width target_area = random.uniform(*scale) * area if log_scale: log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) else: aspect_ratio = random.uniform(*ratio) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if np.random.uniform() < 0.5 and switch_hw: w, h = h, w if 0 < w <= width and 0 < h <= height: i = random.randint(0, height - h) j = random.randint(0, width - w) return i, j, h, w # Fallback to central crop in_ratio = float(width) / float(height) if in_ratio < min(ratio): w = width h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = height w = int(round(h * max(ratio))) else: # whole image w = width h = height i = (height - h) // 2 j = (width - w) // 2 return i, j, h, w def random_resized_crop( images, target_height, target_width, scale=(0.8, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), ): """ Crop the given images to random size and aspect ratio. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing. """ height = images.shape[2] width = images.shape[3] i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) cropped = images[:, :, i : i + h, j : j + w] return torch.nn.functional.interpolate( cropped, size=(target_height, target_width), mode="bilinear", align_corners=False, ) def random_resized_crop_with_shift( images, target_height, target_width, scale=(0.8, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), ): """ This is similar to random_resized_crop. However, it samples two different boxes (for cropping) for the first and last frame. It then linearly interpolates the two boxes for other frames. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing. """ t = images.shape[1] height = images.shape[2] width = images.shape[3] i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width) i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()] j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()] h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()] w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()] out = torch.zeros((3, t, target_height, target_width)) for ind in range(t): out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate( images[ :, ind : ind + 1, i_s[ind] : i_s[ind] + h_s[ind], j_s[ind] : j_s[ind] + w_s[ind], ], size=(target_height, target_width), mode="bilinear", align_corners=False, ) return out def create_random_augment( input_size, auto_augment=None, interpolation="bilinear", ): """ Get video randaug transform. Args: input_size: The size of the input video in tuple. auto_augment: Parameters for randaug. An example: "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number of operations to apply). interpolation: Interpolation method. """ if isinstance(input_size, tuple): img_size = input_size[-2:] else: img_size = input_size if auto_augment: assert isinstance(auto_augment, str) if isinstance(img_size, tuple): img_size_min = min(img_size) else: img_size_min = img_size aa_params = {"translate_const": int(img_size_min * 0.45)} if interpolation and interpolation != "random": aa_params["interpolation"] = _pil_interp(interpolation) if auto_augment.startswith("rand"): return transforms.Compose( [rand_augment_transform(auto_augment, aa_params)] ) raise NotImplementedError def random_sized_crop_img( im, size, jitter_scale=(0.08, 1.0), jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), max_iter=10, ): """ Performs Inception-style cropping (used for training). """ assert ( len(im.shape) == 3 ), "Currently only support image for random_sized_crop" h, w = im.shape[1:3] i, j, h, w = _get_param_spatial_crop( scale=jitter_scale, ratio=jitter_aspect, height=h, width=w, num_repeat=max_iter, log_scale=False, switch_hw=True, ) cropped = im[:, i : i + h, j : j + w] return torch.nn.functional.interpolate( cropped.unsqueeze(0), size=(size, size), mode="bilinear", align_corners=False, ).squeeze(0) # The following code are modified based on timm lib, we will replace the following # contents with dependency from PyTorchVideo. # https://github.com/facebookresearch/pytorchvideo class RandomResizedCropAndInterpolation: """Crop the given PIL Image to random size and aspect ratio with random interpolation. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: size: expected output size of each edge scale: range of size of the origin size cropped ratio: range of aspect ratio of the origin aspect ratio cropped interpolation: Default: PIL.Image.BILINEAR """ def __init__( self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation="bilinear", ): if isinstance(size, tuple): self.size = size else: self.size = (size, size) if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): print("range should be of kind (min, max)") if interpolation == "random": self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = _pil_interp(interpolation) self.scale = scale self.ratio = ratio @staticmethod def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ area = img.size[0] * img.size[1] for _ in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.size[0] and h <= img.size[1]: i = random.randint(0, img.size[1] - h) j = random.randint(0, img.size[0] - w) return i, j, h, w # Fallback to central crop in_ratio = img.size[0] / img.size[1] if in_ratio < min(ratio): w = img.size[0] h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = img.size[1] w = int(round(h * max(ratio))) else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w def __call__(self, img): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Randomly cropped and resized image. """ i, j, h, w = self.get_params(img, self.scale, self.ratio) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation return F.resized_crop(img, i, j, h, w, self.size, interpolation) def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = " ".join( [_pil_interpolation_to_str[x] for x in self.interpolation] ) else: interpolate_str = _pil_interpolation_to_str[self.interpolation] format_string = self.__class__.__name__ + "(size={0}".format(self.size) format_string += ", scale={0}".format( tuple(round(s, 4) for s in self.scale) ) format_string += ", ratio={0}".format( tuple(round(r, 4) for r in self.ratio) ) format_string += ", interpolation={0})".format(interpolate_str) return format_string def transforms_imagenet_train( img_size=224, scale=None, ratio=None, hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment=None, interpolation="random", use_prefetcher=False, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), re_prob=0.0, re_mode="const", re_count=1, re_num_splits=0, separate=False, ): """ If separate==True, the transforms are returned as a tuple of 3 separate transforms for use in a mixing dataset that passes * all data through the first (primary) transform, called the 'clean' data * a portion of the data through the secondary transform * normalizes and converts the branches above with the third, final transform """ if isinstance(img_size, tuple): img_size = img_size[-2:] else: img_size = img_size scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range ratio = tuple( ratio or (3.0 / 4.0, 4.0 / 3.0) ) # default imagenet ratio range primary_tfl = [ RandomResizedCropAndInterpolation( img_size, scale=scale, ratio=ratio, interpolation=interpolation ) ] if hflip > 0.0: primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] if vflip > 0.0: primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] secondary_tfl = [] if auto_augment: assert isinstance(auto_augment, str) if isinstance(img_size, tuple): img_size_min = min(img_size) else: img_size_min = img_size aa_params = dict( translate_const=int(img_size_min * 0.45), img_mean=tuple([min(255, round(255 * x)) for x in mean]), ) if interpolation and interpolation != "random": aa_params["interpolation"] = _pil_interp(interpolation) if auto_augment.startswith("rand"): secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] elif auto_augment.startswith("augmix"): raise NotImplementedError("Augmix not implemented") else: raise NotImplementedError("Auto aug not implemented") elif color_jitter is not None: # color jitter is enabled when not using AA if isinstance(color_jitter, (list, tuple)): # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation # or 4 if also augmenting hue assert len(color_jitter) in (3, 4) else: # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue color_jitter = (float(color_jitter),) * 3 secondary_tfl += [transforms.ColorJitter(*color_jitter)] final_tfl = [] final_tfl += [ transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), ] if re_prob > 0.0: final_tfl.append(
RandomErasing(
1
2023-10-25 07:07:05+00:00
8k
Agricultural-Robotics-Bonn/pagnerf
config_parser.py
[ { "identifier": "MultiviewDataset", "path": "datasets/multiview_dataset.py", "snippet": "class MultiviewDataset(Dataset):\n \"\"\"This is a static multiview image dataset class.\n\n This class should be used for training tasks where the task is to fit a static 3D volume from\n multiview images.\n\n TODO(ttakikawa): Support single-camera dynamic temporal scenes, and multi-camera dynamic temporal scenes.\n TODO(ttakikawa): Currently this class only supports sampling per image, not sampling across the entire\n dataset. This is due to practical reasons. Not sure if it matters...\n \"\"\"\n\n def __init__(self, \n dataset_path : str,\n multiview_dataset_format : str = 'standard',\n mip : int = None,\n bg_color : str = None,\n dataset_num_workers : int = -1,\n load_modes : list = [],\n class_labels : list = [],\n transform : Callable = None,\n scale : float = None,\n offset : list = None,\n model_rescaling : str = 'snap_to_bottom',\n add_noise_to_train_poses : bool = False,\n pose_noise_strength : float = 0.01,\n dataset_center_idx : int = 0,\n split : str = 'train',\n pose_src : str = 'odom',\n max_depth : float = -1,\n dataset_mode : str = 'label_window',\n **kwargs\n ):\n \"\"\"Initializes the dataset class.\n\n Note that the `init` function to actually load images is separate right now, because we don't want \n to load the images unless we have to. This might change later.\n\n Args: \n dataset_path (str): Path to the dataset.\n multiview_dataset_format (str): The dataset format. Currently supports standard (the same format\n used for instant-ngp) and the RTMV dataset.\n mip (int): The factor at which the images will be downsampled by to save memory and such.\n Will downscale by 2**mip.\n bg_color (str): The background color to use for images with 0 alpha.\n dataset_num_workers (int): The number of workers to use if the dataset format uses multiprocessing.\n \"\"\"\n self.root = dataset_path\n self.mip = mip\n self.bg_color = bg_color\n self.dataset_num_workers = dataset_num_workers\n self.transform = transform\n self.multiview_dataset_format = multiview_dataset_format\n self.load_modes = load_modes\n self.class_labels = class_labels\n self.scale = scale\n self.offset = offset\n self.model_rescaling = model_rescaling\n \n self.add_noise_to_train_poses = add_noise_to_train_poses\n self.pose_noise_strength = pose_noise_strength\n\n self.mesh_path = None \n self.semantic_info = None\n\n self.dataset_center_idx = dataset_center_idx\n self.dataset_mode = dataset_mode\n self.pose_src = pose_src\n\n self.max_depth = max_depth\n\n self.split = split\n \n # load the requested dataset format parser\n if self.multiview_dataset_format in ['standard', 'NeRFStandard','nerf_standard']:\n from .formats.nerf_standard import NeRFStandard\n self.dataset_format = NeRFStandard\n elif self.multiview_dataset_format in ['replica', 'ReplicaInstance', 'replica_instance']:\n from .formats.replica_instance import ReplicaInstance\n self.dataset_format = ReplicaInstance\n elif self.multiview_dataset_format in ['bup20', 'bup_20', 'BUP20', 'BUP_20']:\n from .formats.bup20 import BUP20\n self.dataset_format = BUP20\n elif self.multiview_dataset_format in ['sb20', 'sb_20', 'SB20', 'SB_20']:\n from .formats.sb20 import SB20\n self.dataset_format = SB20\n else:\n raise ImportError(f'\"{multiview_dataset_format}\" multiview dataset format not supported...')\n\n def init(self):\n \"\"\"Initializes the dataset.\n \"\"\"\n\n # Get image tensors \n \n self.coords_data = None\n self.coords = None\n\n if 'load_scale_and_offset' in vars(self.dataset_format):\n loaded_scale, loaded_offset = self.dataset_format.load_scale_and_offset(self.root, model_rescaling=self.model_rescaling)\n if self.scale is None:\n self.scale = loaded_scale\n if self.offset is None:\n self.offset = loaded_offset\n\n self.data = self.get_images(split=self.split)\n\n self.img_shape = self.data[\"imgs\"].shape[1:3]\n self.num_imgs = self.data[\"imgs\"].shape[0]\n\n for mode in [k for k in self.data if k not in ['cameras', 'cameras_ts', 'filenames']]:\n if mode == 'base_rays':\n self.data[mode] = self.data[mode].reshape(-1, 3)\n elif mode == 'rays':\n self.data[mode] = self.data[mode].reshape(self.num_imgs, -1, 3)\n else:\n num_channels = self.data[mode].shape[-1]\n self.data[mode] = self.data[mode].reshape(self.num_imgs, -1, num_channels)\n \n if 'get_semantic_info' in vars(self.dataset_format):\n self.semantic_info = self.dataset_format.get_semantic_info(self.root, self.class_labels)\n\n if 'get_semantic_info' in vars(self.dataset_format):\n self.semantic_info = self.dataset_format.get_semantic_info(self.root, self.class_labels)\n \n\n\n def get_images(self, split='train', mip=None):\n \"\"\"Will return the dictionary of image tensors.\n\n Args:\n split (str): The split to use from train, val, test\n mip (int): If specified, will rescale the image by 2**mip.\n\n Returns:\n (dict of torch.FloatTensor): Dictionary of tensors that come with the dataset.\n \"\"\"\n if mip is None:\n mip = self.mip\n \n data = self.dataset_format.load_data(self.root, split,\n bg_color=self.bg_color, num_workers=self.dataset_num_workers, mip=mip,\n coords=self.coords_data, load_modes=self.load_modes, scale=self.scale, offset=self.offset,\n add_noise_to_train_poses=self.add_noise_to_train_poses,\n pose_noise_strength=self.pose_noise_strength,\n dataset_center_idx=self.dataset_center_idx,\n pose_src=self.pose_src,\n max_depth=self.max_depth,\n mode=self.dataset_mode,\n class_labels=self.class_labels)\n \n if 'coords' in data:\n self.coords_data = data['coords']\n self.coords = data['coords']['values']\n\n return data\n\n def __len__(self):\n \"\"\"Length of the dataset in number of rays.\n \"\"\"\n return self.data[\"imgs\"].shape[0]\n\n def __getitem__(self, idx : int):\n \"\"\"Returns a ray.\n \"\"\"\n out = {}\n for mode in [k for k in self.data if k not in ['cameras', 'base_rays']]:\n out[mode] = self.data[mode][idx]\n \n if 'base_rays' in self.data:\n out['base_rays'] = self.data['base_rays']\n\n if self.transform is not None:\n out = self.transform(out)\n \n out['cam_id'] = self.data['cameras_ts'][idx] if 'cameras_ts' in self.data else idx\n out['filename'] = self.data['filenames'][idx] if 'filenames' in self.data else ''\n return out" }, { "identifier": "PermutoGrid", "path": "grids/permuto_grid.py", "snippet": "class PermutoGrid(HashGrid):\n \"\"\"This is a feature grid where the features are defined in a codebook that is hashed.\n \"\"\"\n def __init__(self,*args,\n coarsest_scale = 1.0,\n finest_scale = 0.001,\n capacity_log_2 = 18,\n num_lods = 24, \n **kwargs):\n super().__init__(*args, **kwargs)\n \n self._register_blas_buffers()\n \n self.coarsest_scale = coarsest_scale\n self.finest_scale = finest_scale\n self.capacity = pow(2,capacity_log_2)\n self.num_lods = num_lods\n\n self.multiscale_type = 'cat'\n \n def _register_blas_buffers(self):\n # register grid accelerator for save/load operations\n self.register_buffer('blas_octree', self.blas.octree)\n self.register_buffer('blas_points', self.blas.points)\n self.register_buffer('blas_prefix', self.blas.prefix)\n self.register_buffer('blas_pyramid', self.blas.pyramid)\n\n def blas_init(self, octree):\n self.blas.init(octree)\n self._register_blas_buffers()\n\n def set_capacity(self, capacity_log_2):\n self.capacity = pow(2,capacity_log_2)\n\n def init_from_scales(self):\n \"\"\"Build a multiscale hash grid from a list of resolutions.\n \"\"\"\n self.active_lods = [x for x in range(self.num_lods)]\n self.max_lod = self.num_lods - 1\n\n self.resolutions=np.geomspace(self.coarsest_scale, self.finest_scale, num=self.num_lods)\n log.info(f\"Active Resolutions: {self.resolutions}\")\n \n\n self.embedder = PermutoEncoding(\n 3, # In pos dimension\n self.capacity,\n self.num_lods,\n self.feature_dim,\n self.resolutions)\n\n\n @torch.cuda.amp.custom_fwd(cast_inputs=torch.half)\n def interpolate(self, coords, lod_idx=None, pidx=None):\n \n if coords.numel() == 0:\n return torch.empty([0,1,self.num_lods * self.feature_dim], device=coords.device)\n \n return self.embedder(coords.reshape(-1,3).type(torch.float))" }, { "identifier": "BAPipeline", "path": "pc_nerf/ba_pipeline.py", "snippet": "class BAPipeline(Pipeline):\n \"\"\"Bundle adjustment pipeline class\n\n Pipelines adds a pose database nn.Module in addition to a NeF and tracer\n\n \"\"\"\n \n def __init__(self, nef: BaseNeuralField, cameras: 'Camera | list[Camera] | dict[str,Camera]',\n tracer: BaseTracer = None,\n anchor_frame_idxs: 'list[int]' = [],\n pose_opt_only_frame_idxs: 'list[int]' = []):\n \"\"\"Initialize the Pipeline.\n\n Args:\n nef (nn.Module): Neural fields module.\n cam_db (CamDatabase): Cam database for implicit bundle adjustment optimization.\n tracer (nn.Module or None): Forward map module.\n \"\"\"\n super().__init__(nef, tracer)\n\n if isinstance(cameras, dict):\n self.cam_id_to_idx = {cam_id:idx for idx,cam_id in enumerate(cameras.keys())}\n self.cameras = Camera.cat(cameras.values())\n elif isinstance(cameras, (tuple,list)):\n self.cameras = Camera.cat(cameras)\n elif isinstance(cameras, Camera):\n assert cameras.extrinsics.parameters().shape[0] > 1, (\n 'Tried to create a camera database module with a Camera obejet with a signle camera extrinsincs',\n 'but needs to have more than one')\n self.cameras = cameras\n else:\n raise NotImplementedError('cameras constructor argument must be one of the folowing: Camera | list[Camera] | dict[str,Camera]',\n f'but{type(cameras)} was given.')\n \n self.cameras.extrinsics.switch_backend('matrix_6dof_rotation')\n \n self.anchor_frame_idxs = anchor_frame_idxs\n self.pose_opt_only_frame_idxs = pose_opt_only_frame_idxs\n\n self.cameras.extrinsics._backend.params = torch.nn.Parameter(self.cameras.extrinsics.parameters())\n\n self.register_parameter(name='camera_extrinsics', param=self.cameras.extrinsics.parameters())\n \n def to(self, *args, **kwargs):\n self = super().to(*args, **kwargs)\n self.cameras.intrinsics = self.cameras.intrinsics.to(*args, **kwargs)\n if isinstance(self.anchor_frame_idxs, (tuple,list)) and len(self.anchor_frame_idxs) > 0:\n params = self.cameras.extrinsics.parameters()\n grad_mask = torch.ones_like(params)\n grad_mask[self.anchor_frame_idxs] = 0.0\n params.register_hook(lambda grad: grad * grad_mask)\n\n return self\n\n def forward(self, *args, cam_ids=None, **kwargs):\n \"\"\" Transform base rays if cam_ids are passed requested camera poses\n and trace if a tracer is available\n \"\"\"\n # transform base rays acocording to frames extrinsics\n if isinstance(cam_ids, (tuple,list,torch.Tensor)):\n kwargs['rays'] = self.transform_rays(kwargs['rays'], cam_ids)\n\n \n if self.tracer is not None:\n return self.tracer(self.nef, *args, **kwargs)\n else:\n return self.nef(*args, **kwargs)\n \n def get_cameras_from_ids(self, cam_ids):\n assert isinstance(cam_ids, (tuple,list,torch.Tensor))\n if isinstance(cam_ids, (tuple,list)):\n cam_ids = torch.tensor([self.cam_id_to_idx[id] for id in cam_ids], dtype=torch.long)\n assert cam_ids.nelement() > 0\n return self.cameras[cam_ids]\n\n def transform_rays(self, base_rays, cam_ids):\n cameras = self.get_cameras_from_ids(cam_ids)\n \n batch_rays = base_rays.reshape(len(cameras), -1, 3)\n rays_orig, rays_dir = cameras.extrinsics.inv_transform_rays(batch_rays.origins, batch_rays.dirs)\n rays_dir = rays_dir / torch.linalg.norm(rays_dir, dim=-1, keepdim=True)\n return Rays(origins=rays_orig.type(torch.float32), dirs=rays_dir.type(torch.float32),\n dist_min=cameras.near, dist_max=cameras.far).reshape(-1,3)" } ]
import os import sys import argparse import pprint import yaml import torch import logging as log from datasets import MultiviewDataset from datasets.transforms import * from wisp.datasets import SDFDataset from wisp.models import Pipeline from wisp.models.nefs import * from wisp.models.grids import * from wisp.tracers import * from grids.permuto_grid import PermutoGrid from pc_nerf.ba_pipeline import BAPipeline
6,631
parser.set_defaults(**defaults_dict) def parse_config_dict(config_dict, parser): """Parses and sets the parser defaults with a yaml config file. Args: config_path : path to the yaml config file. parser : The parser for which the defaults will be set. parent : True if parsing the parent yaml. Should never be set to True by the user. """ list_of_valid_fields = [] for group in parser._action_groups: group_dict = {list_of_valid_fields.append(a.dest) for a in group._group_actions} list_of_valid_fields = set(list_of_valid_fields) defaults_dict = {} # Loads child parent and overwrite the parent configs # The yaml files assumes the argument groups, which aren't actually nested. for key in config_dict: for field in config_dict[key]: if field not in list_of_valid_fields: raise ValueError( f"ERROR: {field} is not a valid option. Check for typos in the config." ) defaults_dict[field] = config_dict[key][field] parser.set_defaults(**defaults_dict) def argparse_to_str(parser, args=None, config_dict=None): """Convert parser to string representation for Tensorboard logging. Args: parser (argparse.parser): Parser object. Needed for the argument groups. args : The parsed arguments. Will compute from the parser if None. Returns: args : The parsed arguments. arg_str : The string to be printed. """ if args is None: args = parser.parse_args() if config_dict is not None: parse_config_dict(config_dict, parser) elif args.config is not None: parse_yaml_config(args.config, parser) args = parser.parse_args() args_dict = {} for group in parser._action_groups: group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions} args_dict[group.title] = vars(argparse.Namespace(**group_dict)) pp = pprint.PrettyPrinter(indent=2) args_str = pp.pformat(args_dict) args_str = f'```{args_str}```' return args, args_str def get_trainer(args): return globals()[args.trainer_type] def get_optimizer_from_config(args): """Utility function to get the optimizer from the parsed config. """ optim_cls = str2optim[args.optimizer_type] if args.optimizer_type == 'adam': optim_params = {'eps': 1e-15} elif args.optimizer_type == 'sgd': optim_params = {'momentum': 0.8} else: optim_params = {} return optim_cls, optim_params def get_modules_from_config(args): """Utility function to get the modules for training from the parsed config. """ val_dataset = None if args.dataset_type == "multiview": log.info('Loading training dataset...') transform = SampleRays(args.num_rays_sampled_per_img) train_dataset = MultiviewDataset(**vars(args), transform=transform) train_dataset.init() args.ray_max_travel = args.ray_max_travel * train_dataset.scale if args.optimize_val_extrinsics: log.info('Loading validation split for pose optimization only...') val_dataset = MultiviewDataset(**vars(args), split='val', transform=transform) val_dataset.init() if 'semantic_info' in vars(train_dataset) and train_dataset.semantic_info is not None: args.num_classes = train_dataset.semantic_info['num_classes'] args.num_instances = train_dataset.semantic_info['num_instances'] device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') nef = globals()[args.nef_type](**vars(args)) tracer = globals()[args.tracer_type](**vars(args)) # Use a Bundle Adjustment pipeline if extriniscs need to be optimized if args.optimize_extrinsics: cameras = train_dataset.data['cameras'] # add validation cameras if val extrinsics need to be optimized if args.optimize_val_extrinsics: cameras.update(val_dataset.data['cameras']) pipeline = BAPipeline(nef, cameras, tracer) else: pipeline = Pipeline(nef, tracer) if args.dataset_type == "multiview": if pipeline.nef.grid is not None: if isinstance(pipeline.nef.grid, OctreeGrid): if not args.valid_only and not pipeline.nef.grid.blas_initialized(): if args.multiview_dataset_format in ['rtmv']: pipeline.nef.grid.init_from_pointcloud(train_dataset.coords) else: pipeline.nef.grid.init_dense()
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. str2optim = {m.lower(): getattr(torch.optim, m) for m in dir(torch.optim) if m[0].isupper()} def register_class(cls, name): globals()[name] = cls def parse_options(return_parser=False): """Function used to parse options. Apps should use these CLI options, and then extend using parser.add_argument_group('app') Args: return_parser : If true, will return the parser object instead of the parsed arguments. This is useful if you want to keep the parser around to add special argument groups through app. """ # New CLI parser parser = argparse.ArgumentParser(description='ArgumentParser for kaolin-wisp.') ################### # Global arguments ################### global_group = parser.add_argument_group('global') global_group.add_argument('--trainer-type', type=str, help='Trainer class to use') global_group.add_argument('--exp-name', type=str, help='Experiment name.') global_group.add_argument('--perf', action='store', const=True, default=False, nargs='?', help='Use high-level profiling for the trainer.') global_group.add_argument('--detect-anomaly', action='store', const=True, default=False, nargs='?', help='Turn on anomaly detection.') global_group.add_argument('--config', type=str, help='Path to config file to replace defaults.') global_group.add_argument('--default-channel', type=str, help='Default channel to show in the viewer') global_group.add_argument('--save-map-only', action='store', const=True, default=False, nargs='?', help='Load model and save the 3D map only') ################### # Grid arguments ################### grid_group = parser.add_argument_group('grid') grid_group.add_argument('--grid-type', type=str, default='OctreeGrid', choices=['None', 'OctreeGrid', 'CodebookOctreeGrid', 'TriplanarGrid', 'HashGrid'], help='Type of grid to use.') grid_group.add_argument('--interpolation-type', type=str, default='linear', choices=['linear', 'closest'], help='SPC interpolation mode.') grid_group.add_argument('--as-type', type=str, default='none', choices=['none', 'octree'], help='Type of accelstruct to use.') grid_group.add_argument('--raymarch-type', type=str, default='voxel', choices=['voxel', 'ray'], help='Method of raymarching. `voxel` samples within each primitive, \ `ray` samples within rays and then filters them with the primitives. \ See the accelstruct for details.') grid_group.add_argument('--multiscale-type', type=str, default='sum', choices=['cat', 'sum'], help='Type of multiscale aggregation function to use.') grid_group.add_argument('--feature-dim', type=int, default=32, help='Feature map dimension') grid_group.add_argument('--feature-std', type=float, default=0.0, help='Feature map std') grid_group.add_argument('--feature-bias', type=float, default=0.0, help='Feature map bias') grid_group.add_argument('--noise-std', type=float, default=0.0, help='Added noise to features in training.') grid_group.add_argument('--num-lods', type=int, default=1, help='Number of LODs') grid_group.add_argument('--base-lod', type=int, default=2, help='Base level LOD') grid_group.add_argument('--max-grid-res', type=int, default=2048, help='The maximum grid resolution. Used only in geometric initialization.') grid_group.add_argument('--tree-type', type=str, default='quad', choices=['quad', 'geometric'], help='What type of tree to use. `quad` is a quadtree or octree-like growing \ scheme, whereas geometric is the Instant-NGP growing scheme.') grid_group.add_argument('--codebook-bitwidth', type=int, default=8, help='Bitwidth to use for the codebook. The number of vectors will be 2^bitwidth.') # for Permutohedral grids grid_group.add_argument('--coarsest-scale', type=float, default=1.0, help='Coarsest grid scale') grid_group.add_argument('--finest-scale', type=float, default=0.0001, help='Finest grid scale') grid_group.add_argument('--capacity-log-2', type=int, default=18, help='Log2 capacity of grid') grid_group.add_argument('--delta-capacity-log-2', type=int, default=18, help='Log2 capacity of delta grid') ################### # Embedder arguments ################### embedder_group = parser.add_argument_group('embedder') embedder_group.add_argument('--embedder-type', type=str, default='none', choices=['none', 'positional', 'fourier']) embedder_group.add_argument('--pos-multires', type=int, default=10, help='log2 of max freq') embedder_group.add_argument('--view-multires', type=int, default=4, help='log2 of max freq') ################### # Decoder arguments (and general global network things) ################### net_group = parser.add_argument_group('net') net_group.add_argument('--nef-type', type=str, help='The neural field class to be used.') net_group.add_argument('--layer-type', type=str, default='none', choices=['none', 'spectral_norm', 'frobenius_norm', 'l_1_norm', 'l_inf_norm']) net_group.add_argument('--activation-type', type=str, default='relu', choices=['relu', 'sin']) net_group.add_argument('--decoder-type', type=str, default='basic', choices=['none', 'basic']) net_group.add_argument('--num-layers', type=int, default=1, help='Number of layers for the decoder') net_group.add_argument('--hidden-dim', type=int, default=128, help='Network width') net_group.add_argument('--out-dim', type=int, default=1, help='output dimension') net_group.add_argument('--skip', type=int, default=None, help='Layer to have skip connection.') net_group.add_argument('--pretrained', type=str, help='Path to pretrained model weights.') net_group.add_argument('--position-input', action='store', const=True, default=False, nargs='?', help='Use position as input.') # Semantic NeRF parameters net_group.add_argument('--num-classes', type=int, default=-1, help='num of semantic classes') net_group.add_argument('--num-instances', type=int, default=-1, help='num of object instnces') # if not specifiend, the following copy base net parameters net_group.add_argument('--sem-activation-type', type=str, default=None, choices=['relu', 'sin']) net_group.add_argument('--sem-num-layers', type=int, default=None, help='num of semantic layers') net_group.add_argument('--sem-hidden-dim', type=int, default=None, help='semantic hidden layer dimension') net_group.add_argument('--sem-detach', action='store', const=True, default=True, nargs='?', help='Detach encoder features before the semantic decoder.') net_group.add_argument('--sem-sigmoid', action='store', const=True, default=False, nargs='?', help='apply sigomid activation to semantic head.') net_group.add_argument('--sem-softmax', action='store', const=True, default=False, nargs='?', help='apply softmax to activation to semantic head.') net_group.add_argument('--sem-normalize', action='store', const=True, default=False, nargs='?', help='normalize output of semantic head.') net_group.add_argument('--contrast-sem-weight', type=float, default=0., help='semanticn semi sup loss weight.') net_group.add_argument('--sem-conf-enable', action='store', const=True, default=False, nargs='?', help='Reweight semantic predictions with confidence.') net_group.add_argument('--sem-temperature', type=float, default=1., help='semantic softmax temperature. 1 by default and has no effect.') net_group.add_argument('--sem-epoch-start', type=int, default=0, help='Epoch to start training semantic head') net_group.add_argument('--sem-cascade', action='store', const=True, default=False, nargs='?', help='Cascade panoptic decoders, first density then semantics') net_group.add_argument('--panoptic-features-type', type=str, default=None, choices=['position', 'pos_encoding', 'appearance', 'delta', 'separate']) # Semi supervised parameters net_group.add_argument('--inst-num-layers', type=int, default=None, help='num of instance layers') net_group.add_argument('--inst-hidden-dim', type=int, default=None, help='instance hidden layer dimension') net_group.add_argument('--inst-detach', action='store', const=True, default=True, nargs='?', help='Detach encoder features before the instance decoder.') net_group.add_argument('--inst-sigmoid', action='store', const=True, default=False, nargs='?', help='apply sigomid activation to instance head.') net_group.add_argument('--inst-softmax', action='store', const=True, default=False, nargs='?', help='apply softmax activation to instance head.') net_group.add_argument('--inst-direct-pos', action='store', const=True, default=False, nargs='?', help='use coordinates directly as instance decoder input') # for delta grid only net_group.add_argument('--separate-sem-grid', action='store', const=True, default=False, nargs='?', help='Do not fuse apperence and semantic grids in delta models') net_group.add_argument('--no-delta-grid', action='store', const=True, default=False, nargs='?', help='use only a color grid') net_group.add_argument('--inst-conf-bootstrap-epoch-start', type=int, default=-1, help='Epoch to start using the instance ID confidence to bootstrap training') ################### # Arguments for dataset ################### data_group = parser.add_argument_group('dataset') data_group.add_argument('--dataset-type', type=str, default=None, choices=['sdf', 'multiview'], help='Dataset class to use') data_group.add_argument('--dataset-path', type=str, help='Path to the dataset') data_group.add_argument('--dataset-num-workers', type=int, default=-1, help='Number of workers for dataset preprocessing, if it supports multiprocessing. \ -1 indicates no multiprocessing.') data_group.add_argument('--load-modes', nargs='+', default=[], help='modes to be loaded from the dataset.[] or None implies load all modes.') data_group.add_argument('--scale', type=list, default=None, help='scale factor to fit the data to the unit cube') data_group.add_argument('--offset', type=list, default=None, help='Position offset in in the unit cube') data_group.add_argument('--pose-src', type=str, default='odom', choices=['odom', 'metashape'], help='Dataset poses source') data_group.add_argument('--dataset-mode', type=str, default='label_window', choices=['label_window', 'all_frames_window'], help='Dataset mode configuration. Load sequences around each labeled frame or create sequences to cover the whole dataset') net_group.add_argument('--max-depth', type=float, default=-1., help='max depth for labels.') data_group.add_argument('--class-labels', nargs='+', default=[], help='classes to be loaded from the dataset. The order is used to enumerate the class IDs in the output predictions') # SDF Dataset data_group.add_argument('--sample-mode', type=str, nargs='*', default=['rand', 'near', 'near', 'trace', 'trace'], help='The sampling scheme to be used.') data_group.add_argument('--get-normals', action='store', const=True, default=False, nargs='?', help='Sample the normals.') data_group.add_argument('--num-samples', type=int, default=100000, help='Number of samples per mode (or per epoch for SPC)') data_group.add_argument('--num-samples-on-mesh', type=int, default=100000000, help='Number of samples generated on mesh surface to initialize occupancy structures') data_group.add_argument('--sample-tex', action='store', const=True, default=False, nargs='?', help='Sample textures') data_group.add_argument('--mode-mesh-norm', type=str, default='sphere', choices=['sphere', 'aabb', 'planar', 'none'], help='Normalize the mesh') data_group.add_argument('--samples-per-voxel', type=int, default=256, help='Number of samples per voxel (for SDF initialization from grid)') data_group.add_argument('--voxel-raymarch-epoch-start', type=int, default=-1, help='change raymarching to voxel tracing after this epoch') # Multiview Dataset data_group.add_argument('--multiview-dataset-format', default='standard', choices=['standard', 'rtmv'], help='Data format for the transforms') data_group.add_argument('--num-rays-sampled-per-img', type=int, default='4096', help='Number of rays to sample per image') data_group.add_argument('--bg-color', default='white', choices=['white', 'black'], help='Background color') data_group.add_argument('--mip', type=int, default=None, help='MIP level of ground truth image') data_group.add_argument('--val-mip', type=int, default=None, help='MIP level of ground truth image for validation') data_group.add_argument('--model-rescaling', default='snap_to_bottom', choices=['snap_to_bottom', 'scale_to_fit'], help='Rescaling of model options to fit in the unit cube') data_group.add_argument('--add-noise-to-train-poses', action='store', const=True, default=False, nargs='?', help='add noise to train poses to test pose optimization') data_group.add_argument('--pose-noise-strength', type=float, default=0.01, help='spose noise multipier.') # For sequence of real images with semantic labels on a specific frame # This index corresponds to the labeled frame to run NeRF around data_group.add_argument('--dataset-center-idx', type=int, default=0, help='Semantinc labeled center image') ################### # Arguments for optimizer ################### optim_group = parser.add_argument_group('optimizer') optim_group.add_argument('--optimizer-type', type=str, default='adam', choices=list(str2optim.keys()), help='Optimizer to be used.') optim_group.add_argument('--lr', type=float, default=0.001, help='Learning rate.') optim_group.add_argument('--extrinsics-lr', type=float, default=-1, help='extrinsics Learning rate.') optim_group.add_argument('--use-lr-scheduler', action='store', const=True, default=False, nargs='?', help='Flag to enable lr scheduler.') optim_group.add_argument('--lr-scheduler-type', type=str, default='step', choices=['panoptic_step', 'step', 'one_cycle'], help='Type of lr scheduler to use.') optim_group.add_argument('--lr-step-size', type=int, default=0, help='Step size for lr scheduler.') optim_group.add_argument('--lr-step-gamma', type=float, default=0.1, help='Gamma for lr scheduler.') optim_group.add_argument('--weight-decay', type=float, default=0, help='Weight decay.') optim_group.add_argument('--grid-lr-weight', type=float, default=100.0, help='Relative LR weighting for the grid') optim_group.add_argument('--delta-grid-lr-weight', type=float, default=100.0, help='Relative LR weighting for the delta grid') optim_group.add_argument('--rgb-weight', type=float, default=1.0, help='Weight of rgb loss') optim_group.add_argument('--lr-warmup-epochs', type=int, default=1, help='Number of learning rate warm up epochs.') optim_group.add_argument('--lr-div-factor', type=float, default=1.0, help='Learning rate final division factor') optim_group.add_argument('--sem-weight', type=float, default=1.0, help='Weight of semantic loss') optim_group.add_argument('--inst-weight', type=float, default=0.01, help='Semi-supervised loss weight.') optim_group.add_argument('--inst-outlier-rejection', action='store', const=True, default=False, nargs='?', help='Reject repeated ID outliers in instance segmentation.') optim_group.add_argument('--grid-tvl1-reg', type=float, default=0.0, help='Grid total vatiation L1 regulatization weight.') optim_group.add_argument('--grid-tvl2-reg', type=float, default=0.0, help='Grid total vatiation L2 regulatization weight.') optim_group.add_argument('--delta-grid-tvl1-reg', type=float, default=0.0, help='Delta grid total vatiation L1 regulatization weight.') optim_group.add_argument('--delta-grid-tvl2-reg', type=float, default=0.0, help='Delta grid total vatiation L2 regulatization weight.') optim_group.add_argument('--tv-window-size', type=float, default=0.0, help='Persentage of the hypervolume to aplly total vatiation to.') optim_group.add_argument('--tv-edge-num-samples', type=float, default=0.0, help='Number edge samples for total vatiation.') optim_group.add_argument('--ray-sparcity-reg', type=float, default=0.0, help='Ray density sparcity regularizarion weight.') ################### # Arguments for training ################### train_group = parser.add_argument_group('trainer') train_group.add_argument('--epochs', type=int, default=250, help='Number of epochs to run the training.') train_group.add_argument('--batch-size', type=int, default=512, help='Batch size for the training.') train_group.add_argument('--resample', action='store', const=True, default=False, nargs='?', help='Resample the dataset after every epoch.') train_group.add_argument('--only-last', action='store', const=True, default=False, nargs='?', help='Train only last LOD.') train_group.add_argument('--resample-every', type=int, default=1, help='Resample every N epochs') train_group.add_argument('--model-format', type=str, default='full', choices=['full', 'params_only', 'state_dict', 'params_only_ignore_missmatch'], help='Format in which to save models.') train_group.add_argument('--save-as-new', action='store', const=True, default=False, nargs='?', help='Save the model at every epoch (no overwrite).') train_group.add_argument('--save-every', type=int, default=5, help='Save the model at every N epoch.') train_group.add_argument('--render-every', type=int, default=5, help='Render every N epochs') train_group.add_argument('--render-val-labels', action='store', const=True, default=False, nargs='?', help='Render semantic labels in validations stage') train_group.add_argument('--save-grid', action='store', const=True, default=False, nargs='?', help='Save 3D grids to visualize with kaolin dash3d or omniverse') train_group.add_argument('--save-preds', action='store', const=True, default=False, nargs='?', help='save all preds and confidence') # TODO (ttakikawa): Only used for SDFs, but also should support RGB etc train_group.add_argument('--log-2d', action='store', const=True, default=False, nargs='?', help='Log cutting plane renders to TensorBoard.') train_group.add_argument('--log-dir', type=str, default='_results/logs/runs/', help='Log file directory for checkpoints.') # TODO (ttakikawa): This is only really used in the SDF training but it should be useful for multiview too train_group.add_argument('--grow-every', type=int, default=-1, help='Grow network every X epochs') train_group.add_argument('--prune-every', type=int, default=-1, help='Prune every N epochs') train_group.add_argument('--prune-at-epoch', type=int, default=-1, help='Prune one time at a the specified epoch') train_group.add_argument('--prune-at-start', action='store', const=True, default=False, nargs='?', help='Prune once at the begining of training, useful for pretrained models.') train_group.add_argument('--inst-num-dilations', type=int, default=-1, help='num of post-processing erosion/dilation steps for instance segmentation') train_group.add_argument('--low-res-val', action='store', const=True, default=False, nargs='?', help='use val-mip even at the last validation stage') # TODO (ttakikawa): Only used in multiview training, combine with the SDF growing schemes. train_group.add_argument('--random-lod', action='store', const=True, default=False, nargs='?', help='Use random lods to train.') # One by one trains one level at a time. # Increase starts from [0] and ends up at [0,...,N] # Shrink strats from [0,...,N] and ends up at [N] # Fine to coarse starts from [N] and ends up at [0,...,N] # Only last starts and ends at [N] train_group.add_argument('--growth-strategy', type=str, default='increase', choices=['onebyone','increase','shrink', 'finetocoarse', 'onlylast'], help='Strategy for coarse-to-fine training') train_group.add_argument('--log-sub-losses', action='store', const=True, default=False, nargs='?', help='If loss is composed, log all sub-losses as well.') # Camera params train_group.add_argument('--optimize-extrinsics', action='store', const=True, default=False, nargs='?', help='Weather to optimize camera extrinsics from the dataset.') train_group.add_argument('--extrinsics-epoch-start', type=int, default=0, help='Epoch to start training clustering post-processing') train_group.add_argument('--extrinsics-epoch-end', type=int, default=-1, help='Epoch to end training clustering post-processing') # Semi-Supervised params train_group.add_argument('--clustering-epoch-start', type=int, default=0, help='Epoch to start training clustering post-processing') train_group.add_argument('--num-clustering-samples', type=int, default=0, help='Number of render samples to use for clustering') train_group.add_argument('--num-clustering-workers', type=int, default=1, help='Number of jobs to run clustering') train_group.add_argument('--lod-anneling', action='store', const=True, default=False, nargs='?', help='Enable lod grid feature anneling.') train_group.add_argument('--lod-annel-epochs', type=int, default=0, help='Epoch to run anneling on lod grid features') train_group.add_argument('--lod-annel-epoch-start', type=int, default=0, help='Epoch to start anneling lod grid features') train_group.add_argument('--inst-epoch-start', type=int, default=0, help='Epoch to start training instance head') train_group.add_argument('--inst-loss', type=str, default='sup_contrastive', choices=['sup_contrastive'], help='Semi-supervised loss type. this loss is disabled if not specified') train_group.add_argument('--inst-dist-func', type=str, default='cos', choices=['l1', 'l2', 'cos'], help='Semi-supervised distnace function') train_group.add_argument('--inst-conf-enable', action='store', const=True, default=False, nargs='?', help='reweight inst loss with prediction confidence') train_group.add_argument('--inst-normalize', action='store', const=True, default=False, nargs='?', help='Semi-supervised feature pre-normalization.') train_group.add_argument('--weight-class-inbalance', action='store', const=True, default=False, nargs='?', help='Weather to compute a class-wise weight based on apearance in the data.') # Sup-Contrastive loss train_group.add_argument('--inst-temperature', type=float, default=0.07, help='inst softmax temperature.') train_group.add_argument('--inst-soft-temperature', type=float, default=0.0, help='inst softmax temperature before integration.') train_group.add_argument('--base-temperature', type=float, default=0.07, help='softmax base temperature. final is computed: l = -(T/base_T) * rms_loss') train_group.add_argument('--inst-pn-ratio', type=float, default=0.5, help='Ratio between positive and negative examples for supervised contrastive learning') train_group.add_argument('--sem-segment-reg-weight', type=float, default=0.0, help='Weight of semantic segment consistency regularization') train_group.add_argument('--inst-segment-reg-weight', type=float, default=0.0, help='Weight of instance segment consistency regularization') train_group.add_argument('--inst-segment-reg-epoch-start', type=float, default=-1, help='Weight of instance segment consistency regularization') train_group.add_argument('--optimize-val-extrinsics', action='store', const=True, default=False, nargs='?', help='Optimize val extrinsics flag.') train_group.add_argument('--val-extrinsics-start', type=int, default=0, help='Val extrinsics start epoch') train_group.add_argument('--val-extrinsics-every', type=int, default=0, help='Optimize validation extrinsics every n epochs') train_group.add_argument('--val-extrinsics-end', type=int, default=-1, help='Val extrinsics end epoch') ################### # Arguments for training ################### valid_group = parser.add_argument_group('validation') valid_group.add_argument('--valid-only', action='store', const=True, default=False, nargs='?', help='Run validation only (and do not run training).') valid_group.add_argument('--valid-every', type=int, default=-1, help='Frequency of running validation.') valid_group.add_argument('--valid-split', type=str, default='val', help='Split to use for validation.') ################### # Arguments for renderer ################### renderer_group = parser.add_argument_group('renderer') renderer_group.add_argument('--render-res', type=int, nargs=2, default=[512, 512], help='Width/height to render at.') renderer_group.add_argument('--render-batch', type=int, default=0, help='Batch size (in number of rays) for batched rendering.') renderer_group.add_argument('--camera-origin', type=float, nargs=3, default=[-2.8, 2.8, -2.8], help='Camera origin.') renderer_group.add_argument('--camera-lookat', type=float, nargs=3, default=[0, 0, 0], help='Camera look-at/target point.') renderer_group.add_argument('--camera-fov', type=float, default=30, help='Camera field of view (FOV).') renderer_group.add_argument('--camera-proj', type=str, choices=['ortho', 'persp'], default='persp', help='Camera projection.') renderer_group.add_argument('--camera-clamp', nargs=2, type=float, default=[0, 10], help='Camera clipping bounds.') renderer_group.add_argument('--tracer-type', type=str, default='PackedRFTracer', help='The tracer to be used.') renderer_group.add_argument('--num-val-frames-to-save', type=int, default=0, help='number of validation frames to save') # TODO(ttakikawa): In the future the interface will be such that you either select an absolute step size or # you select the number of steps to take. Sphere tracing will take step-scales. renderer_group.add_argument('--num-steps', type=int, default=128, help='Number of steps for raymarching / spheretracing / etc') renderer_group.add_argument('--step-size', type=float, default=1.0, help='Scale of step size') renderer_group.add_argument('--ray-max-travel', type=float, default=6.0, help='ray travel distance in meters after hitting the grid') # used only in voxel raymarching to increase resolution at the # surface of the model # Sphere tracing stuff renderer_group.add_argument('--min-dis', type=float, default=0.0003, help='Minimum distance away from surface for spheretracing') # TODO(ttakikawa): Shader stuff... will be more modular in future renderer_group.add_argument('--matcap-path', type=str, default='data/matcaps/matcap_plastic_yellow.jpg', help='Path to the matcap texture to render with.') renderer_group.add_argument('--ao', action='store', const=True, default=False, nargs='?', help='Use ambient occlusion.') renderer_group.add_argument('--shadow', action='store', const=True, default=False, nargs='?', help='Use shadowing.') renderer_group.add_argument('--shading-mode', type=str, default='normal', choices=['matcap', 'rb', 'normal'], help='Shading mode.') # Parse and run if return_parser: return parser else: return argparse_to_str(parser) def parse_yaml_config(config_path, parser): """Parses and sets the parser defaults with a yaml config file. Args: config_path : path to the yaml config file. parser : The parser for which the defaults will be set. parent : True if parsing the parent yaml. Should never be set to True by the user. """ with open(config_path) as f: config_dict = yaml.safe_load(f) list_of_valid_fields = [] for group in parser._action_groups: group_dict = {list_of_valid_fields.append(a.dest) for a in group._group_actions} list_of_valid_fields = set(list_of_valid_fields) defaults_dict = {} # Load the parent config if it exists parent_config_path = config_dict.pop("parent", None) if parent_config_path is not None: if not os.path.isabs(parent_config_path): parent_config_path = os.path.join(os.path.split(config_path)[0], parent_config_path) with open(parent_config_path) as f: parent_config_dict = yaml.safe_load(f) if "parent" in parent_config_dict.keys(): raise Exception("Hierarchical configs of more than 1 level deep are not allowed.") for key in parent_config_dict: for field in parent_config_dict[key]: if field not in list_of_valid_fields: raise ValueError( f"ERROR: {field} is not a valid option. Check for typos in the config." ) defaults_dict[field] = parent_config_dict[key][field] # Loads child parent and overwrite the parent configs # The yaml files assumes the argument groups, which aren't actually nested. for key in config_dict: for field in config_dict[key]: if field not in list_of_valid_fields: raise ValueError( f"ERROR: {field} is not a valid option. Check for typos in the config." ) defaults_dict[field] = config_dict[key][field] parser.set_defaults(**defaults_dict) def parse_config_dict(config_dict, parser): """Parses and sets the parser defaults with a yaml config file. Args: config_path : path to the yaml config file. parser : The parser for which the defaults will be set. parent : True if parsing the parent yaml. Should never be set to True by the user. """ list_of_valid_fields = [] for group in parser._action_groups: group_dict = {list_of_valid_fields.append(a.dest) for a in group._group_actions} list_of_valid_fields = set(list_of_valid_fields) defaults_dict = {} # Loads child parent and overwrite the parent configs # The yaml files assumes the argument groups, which aren't actually nested. for key in config_dict: for field in config_dict[key]: if field not in list_of_valid_fields: raise ValueError( f"ERROR: {field} is not a valid option. Check for typos in the config." ) defaults_dict[field] = config_dict[key][field] parser.set_defaults(**defaults_dict) def argparse_to_str(parser, args=None, config_dict=None): """Convert parser to string representation for Tensorboard logging. Args: parser (argparse.parser): Parser object. Needed for the argument groups. args : The parsed arguments. Will compute from the parser if None. Returns: args : The parsed arguments. arg_str : The string to be printed. """ if args is None: args = parser.parse_args() if config_dict is not None: parse_config_dict(config_dict, parser) elif args.config is not None: parse_yaml_config(args.config, parser) args = parser.parse_args() args_dict = {} for group in parser._action_groups: group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions} args_dict[group.title] = vars(argparse.Namespace(**group_dict)) pp = pprint.PrettyPrinter(indent=2) args_str = pp.pformat(args_dict) args_str = f'```{args_str}```' return args, args_str def get_trainer(args): return globals()[args.trainer_type] def get_optimizer_from_config(args): """Utility function to get the optimizer from the parsed config. """ optim_cls = str2optim[args.optimizer_type] if args.optimizer_type == 'adam': optim_params = {'eps': 1e-15} elif args.optimizer_type == 'sgd': optim_params = {'momentum': 0.8} else: optim_params = {} return optim_cls, optim_params def get_modules_from_config(args): """Utility function to get the modules for training from the parsed config. """ val_dataset = None if args.dataset_type == "multiview": log.info('Loading training dataset...') transform = SampleRays(args.num_rays_sampled_per_img) train_dataset = MultiviewDataset(**vars(args), transform=transform) train_dataset.init() args.ray_max_travel = args.ray_max_travel * train_dataset.scale if args.optimize_val_extrinsics: log.info('Loading validation split for pose optimization only...') val_dataset = MultiviewDataset(**vars(args), split='val', transform=transform) val_dataset.init() if 'semantic_info' in vars(train_dataset) and train_dataset.semantic_info is not None: args.num_classes = train_dataset.semantic_info['num_classes'] args.num_instances = train_dataset.semantic_info['num_instances'] device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') nef = globals()[args.nef_type](**vars(args)) tracer = globals()[args.tracer_type](**vars(args)) # Use a Bundle Adjustment pipeline if extriniscs need to be optimized if args.optimize_extrinsics: cameras = train_dataset.data['cameras'] # add validation cameras if val extrinsics need to be optimized if args.optimize_val_extrinsics: cameras.update(val_dataset.data['cameras']) pipeline = BAPipeline(nef, cameras, tracer) else: pipeline = Pipeline(nef, tracer) if args.dataset_type == "multiview": if pipeline.nef.grid is not None: if isinstance(pipeline.nef.grid, OctreeGrid): if not args.valid_only and not pipeline.nef.grid.blas_initialized(): if args.multiview_dataset_format in ['rtmv']: pipeline.nef.grid.init_from_pointcloud(train_dataset.coords) else: pipeline.nef.grid.init_dense()
elif isinstance(pipeline.nef.grid, PermutoGrid):
1
2023-10-30 16:14:39+00:00
8k
BIT-DA/Annotator
annotator/data/dataset/semantickitti/semantickitti.py
[ { "identifier": "LEARNING_MAP_19", "path": "annotator/data/dataset/semantickitti/semantickitti_utils.py", "snippet": "LEARNING_MAP_19 = {\n 0: 0, # \"unlabeled\"\n 1: 0, # \"outlier\" mapped to \"unlabeled\" --------------------------mapped\n 10: 1, # \"car\"\n 11: 2, # \"bicycle\"\n 13: 5, # \"bus\" mapped to \"other-vehicle\" --------------------------mapped\n 15: 3, # \"motorcycle\"\n 16: 5, # \"on-rails\" mapped to \"other-vehicle\" ---------------------mapped\n 18: 4, # \"truck\"\n 20: 5, # \"other-vehicle\"\n 30: 6, # \"person\"\n 31: 7, # \"bicyclist\"\n 32: 8, # \"motorcyclist\"\n 40: 9, # \"road\"\n 44: 10, # \"parking\"\n 48: 11, # \"sidewalk\"\n 49: 12, # \"other-ground\"\n 50: 13, # \"building\"\n 51: 14, # \"fence\"\n 52: 0, # \"other-structure\" mapped to \"unlabeled\" ------------------mapped\n 60: 9, # \"lane-marking\" to \"road\" ---------------------------------mapped\n 70: 15, # \"vegetation\"\n 71: 16, # \"trunk\"\n 72: 17, # \"terrain\"\n 80: 18, # \"pole\"\n 81: 19, # \"traffic-sign\"\n 99: 0, # \"other-object\" to \"unlabeled\" ----------------------------mapped\n 252: 1, # \"moving-car\" to \"car\" ------------------------------------mapped\n 253: 7, # \"moving-bicyclist\" to \"bicyclist\" ------------------------mapped\n 254: 6, # \"moving-person\" to \"person\" ------------------------------mapped\n 255: 8, # \"moving-motorcyclist\" to \"motorcyclist\" ------------------mapped\n 256: 5, # \"moving-on-rails\" mapped to \"other-vehicle\" --------------mapped\n 257: 5, # \"moving-bus\" mapped to \"other-vehicle\" -------------------mapped\n 258: 4, # \"moving-truck\" to \"truck\" --------------------------------mapped\n 259: 5, # \"moving-other\"-vehicle to \"other-vehicle\" ----------------mapped\n}" }, { "identifier": "LEARNING_MAP_12", "path": "annotator/data/dataset/semantickitti/semantickitti_utils.py", "snippet": "LEARNING_MAP_12 = {\n 0: 0, # \"unlabeled\"\n 1: 0, # \"outlier\" mapped to \"unlabeled\" --------------------------mapped\n 10: 1, # \"car\"\n 11: 2, # \"bicycle\"\n 13: 5, # \"bus\" mapped to \"other-vehicle\" --------------------------mapped\n 15: 3, # \"motorcycle\"\n 16: 0, # \"on-rails\" mapped to \"other-vehicle\" ---------------------mapped\n 18: 4, # \"truck\"\n 20: 0, # \"other-vehicle\"\n 30: 6, # \"person\"\n 31: 0, # \"bicyclist\"\n 32: 0, # \"motorcyclist\"\n 40: 7, # \"road\"\n 44: 0, # \"parking\"\n 48: 8, # \"sidewalk\"\n 49: 9, # \"other-ground\"\n 50: 12, # \"building\"\n 51: 12, # \"fence\"\n 52: 0, # \"other-structure\" mapped to \"unlabeled\" ------------------mapped\n 60: 7, # \"lane-marking\" to \"road\" ---------------------------------mapped\n 70: 10, # \"vegetation\"\n 71: 0, # \"trunk\"\n 72: 11, # \"terrain\"\n 80: 12, # \"pole\"\n 81: 12, # \"traffic-sign\"\n 99: 0, # \"other-object\" to \"unlabeled\" ----------------------------mapped\n 252: 1, # \"moving-car\" to \"car\" ------------------------------------mapped\n 253: 0, # \"moving-bicyclist\" to \"bicyclist\" ------------------------mapped\n 254: 6, # \"moving-person\" to \"person\" ------------------------------mapped\n 255: 0, # \"moving-motorcyclist\" to \"motorcyclist\" ------------------mapped\n 256: 0, # \"moving-on-rails\" mapped to \"other-vehicle\" --------------mapped\n 257: 5, # \"moving-bus\" mapped to \"other-vehicle\" -------------------mapped\n 258: 4, # \"moving-truck\" to \"truck\" --------------------------------mapped\n 259: 0, # \"moving-other\"-vehicle to \"other-vehicle\" ----------------mapped\n}" }, { "identifier": "LEARNING_MAP_7", "path": "annotator/data/dataset/semantickitti/semantickitti_utils.py", "snippet": "LEARNING_MAP_7 = {\n 0: 0, # \"unlabeled\"\n 1: 0, # \"outlier\" mapped to \"unlabeled\" --------------------------mapped\n 10: 1, # \"car\"\n 11: 0, # \"bicycle\"\n 13: 0, # \"bus\" mapped to \"other-vehicle\" --------------------------mapped\n 15: 0, # \"motorcycle\"\n 16: 0, # \"on-rails\" mapped to \"other-vehicle\" ---------------------mapped\n 18: 0, # \"truck\"\n 20: 0, # \"other-vehicle\"\n 30: 2, # \"person\"\n 31: 0, # \"bicyclist\"\n 32: 0, # \"motorcyclist\"\n 40: 3, # \"road\"\n 44: 3, # \"parking\"\n 48: 4, # \"sidewalk\"\n 49: 0, # \"other-ground\"\n 50: 6, # \"building\"\n 51: 6, # \"fence\"\n 52: 0, # \"other-structure\" mapped to \"unlabeled\" ------------------mapped\n 60: 3, # \"lane-marking\" to \"road\" ---------------------------------mapped\n 70: 7, # \"vegetation\"\n 71: 7, # \"trunk\"\n 72: 5, # \"terrain\"\n 80: 6, # \"pole\"\n 81: 6, # \"traffic-sign\"\n 99: 0, # \"other-object\" to \"unlabeled\" ----------------------------mapped\n 252: 1, # \"moving-car\" to \"car\" ------------------------------------mapped\n 253: 0, # \"moving-bicyclist\" to \"bicyclist\" ------------------------mapped\n 254: 2, # \"moving-person\" to \"person\" ------------------------------mapped\n 255: 0, # \"moving-motorcyclist\" to \"motorcyclist\" ------------------mapped\n 256: 0, # \"moving-on-rails\" mapped to \"other-vehicle\" --------------mapped\n 257: 0, # \"moving-bus\" mapped to \"other-vehicle\" -------------------mapped\n 258: 0, # \"moving-truck\" to \"truck\" --------------------------------mapped\n 259: 0, # \"moving-other\"-vehicle to \"other-vehicle\" ----------------mapped\n}" }, { "identifier": "LEARNING_MAP_11", "path": "annotator/data/dataset/semantickitti/semantickitti_utils.py", "snippet": "LEARNING_MAP_11 = {\n 0: 0, # \"unlabeled\"\n 1: 0, # \"outlier\" mapped to \"unlabeled\" --------------------------mapped\n 10: 1, # \"car\"\n 11: 2, # \"bicycle\"\n 13: 0, # \"bus\" mapped to \"other-vehicle\" --------------------------mapped\n 15: 0, # \"motorcycle\"\n 16: 0, # \"on-rails\" mapped to \"other-vehicle\" ---------------------mapped\n 18: 0, # \"truck\"\n 20: 0, # \"other-vehicle\"\n 30: 3, # \"person\"\n 31: 4, # \"bicyclist\"\n 32: 4, # \"motorcyclist\"\n 40: 5, # \"road\"\n 44: 0, # \"parking\"\n 48: 0, # \"sidewalk\"\n 49: 0, # \"other-ground\"\n 50: 6, # \"building\"\n 51: 7, # \"fence\"\n 52: 0, # \"other-structure\" mapped to \"unlabeled\" ------------------mapped\n 60: 5, # \"lane-marking\" to \"road\" ---------------------------------mapped\n 70: 8, # \"vegetation\"\n 71: 9, # \"trunk\"\n 72: 0, # \"terrain\"\n 80: 10, # \"pole\"\n 81: 11, # \"traffic-sign\"\n 99: 0, # \"other-object\" to \"unlabeled\" ----------------------------mapped\n 252: 1, # \"moving-car\" to \"car\" ------------------------------------mapped\n 253: 4, # \"moving-bicyclist\" to \"bicyclist\" ------------------------mapped\n 254: 3, # \"moving-person\" to \"person\" ------------------------------mapped\n 255: 4, # \"moving-motorcyclist\" to \"motorcyclist\" ------------------mapped\n 256: 0, # \"moving-on-rails\" mapped to \"other-vehicle\" --------------mapped\n 257: 0, # \"moving-bus\" mapped to \"other-vehicle\" -------------------mapped\n 258: 0, # \"moving-truck\" to \"truck\" --------------------------------mapped\n 259: 0, # \"moving-other\"-vehicle to \"other-vehicle\" ----------------mapped\n}" } ]
import os import numpy as np import torch import random from torch.utils import data from .semantickitti_utils import LEARNING_MAP_19, LEARNING_MAP_12, LEARNING_MAP_7, LEARNING_MAP_11
3,964
ABSOLUTE_PATH = os.path.dirname(os.path.abspath(__file__)) def absoluteFilePaths(directory): for dirpath, _, filenames in os.walk(directory): for f in filenames: yield os.path.abspath(os.path.join(dirpath, f)) class SemantickittiDataset(data.Dataset): def __init__( self, data_cfgs=None, training: bool = True, class_names: list = None, root_path: str = None, logger=None, if_scribble: bool = False, ): super().__init__() self.data_cfgs = data_cfgs self.root_path = root_path self.training = training self.logger = logger self.class_names = class_names self.tta = data_cfgs.get('TTA', False) self.train_val = data_cfgs.get('TRAINVAL', False) self.augment = data_cfgs.AUGMENT self.if_scribble = if_scribble self.num_classes = data_cfgs.NUM_CLASSES if self.training and not self.train_val: self.split = 'train' else: if self.training and self.train_val: self.split = 'train_val' else: self.split = 'val' if self.tta: self.split = 'test' if self.split == 'train': self.seqs = ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10'] elif self.split == 'val': self.seqs = ['08'] elif self.split == 'train_val': self.seqs = ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10', '08'] elif self.split == 'test': self.seqs = ['11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'] else: raise Exception('split must be train/val/train_val/test.') self.annos = [] for seq in self.seqs: self.annos += absoluteFilePaths('/'.join([self.root_path, str(seq).zfill(2), 'velodyne'])) self.annos.sort() self.annos_another = self.annos.copy() random.shuffle(self.annos_another) print(f'The total sample is {len(self.annos)}') self._sample_idx = np.arange(len(self.annos)) self.samples_per_epoch = self.data_cfgs.get('SAMPLES_PER_EPOCH', -1) if self.samples_per_epoch == -1 or not self.training: self.samples_per_epoch = len(self.annos) if self.training: self.resample() else: self.sample_idx = self._sample_idx # init_path = os.path.join(ABSOLUTE_PATH, 'semantickitti_init.pkl') self.scan_size = {} # if not os.path.isfile(init_path): for path in self.annos: self.scan_size[path] = np.fromfile(path, dtype=np.float32).reshape((-1, 4)).shape[0] # torch.save(self.scan_size, init_path) # else: # self.scan_size = torch.load(init_path) def __len__(self): return len(self.sample_idx) def resample(self): self.sample_idx = np.random.choice(self._sample_idx, self.samples_per_epoch) def get_kitti_points_ringID(self, points): scan_x = points[:, 0] scan_y = points[:, 1] yaw = -np.arctan2(scan_y, -scan_x) proj_x = 0.5 * (yaw / np.pi + 1.0) new_raw = np.nonzero((proj_x[1:] < 0.2) * (proj_x[:-1] > 0.8))[0] + 1 proj_y = np.zeros_like(proj_x) proj_y[new_raw] = 1 ringID = np.cumsum(proj_y) ringID = np.clip(ringID, 0, 63) return ringID def __getitem__(self, index): raw_data = np.fromfile(self.annos[index], dtype=np.float32).reshape((-1, 4)) if self.split == 'test': annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1) else: if self.if_scribble: # ScribbleKITTI (weak label) annos = self.annos[index].replace('SemanticKITTI', 'ScribbleKITTI') annotated_data = np.fromfile( annos.replace('velodyne', 'scribbles')[:-3] + 'label', dtype=np.uint32 ).reshape((-1, 1)) else: # SemanticKITTI (full label) annotated_data = np.fromfile( self.annos[index].replace('velodyne', 'labels')[:-3] + 'label', dtype=np.uint32 ).reshape((-1, 1)) annotated_data = annotated_data & 0xFFFF if self.num_classes == 19: annotated_data = np.vectorize(LEARNING_MAP_19.__getitem__)(annotated_data) elif self.num_classes == 12: annotated_data = np.vectorize(LEARNING_MAP_12.__getitem__)(annotated_data) elif self.num_classes == 7:
# used for polarmix instance_classes = [0, 1, 2, 3, 4, 5, 6, 7] Omega = [np.random.random() * np.pi * 2 / 3, (np.random.random() + 1) * np.pi * 2 / 3] ABSOLUTE_PATH = os.path.dirname(os.path.abspath(__file__)) def absoluteFilePaths(directory): for dirpath, _, filenames in os.walk(directory): for f in filenames: yield os.path.abspath(os.path.join(dirpath, f)) class SemantickittiDataset(data.Dataset): def __init__( self, data_cfgs=None, training: bool = True, class_names: list = None, root_path: str = None, logger=None, if_scribble: bool = False, ): super().__init__() self.data_cfgs = data_cfgs self.root_path = root_path self.training = training self.logger = logger self.class_names = class_names self.tta = data_cfgs.get('TTA', False) self.train_val = data_cfgs.get('TRAINVAL', False) self.augment = data_cfgs.AUGMENT self.if_scribble = if_scribble self.num_classes = data_cfgs.NUM_CLASSES if self.training and not self.train_val: self.split = 'train' else: if self.training and self.train_val: self.split = 'train_val' else: self.split = 'val' if self.tta: self.split = 'test' if self.split == 'train': self.seqs = ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10'] elif self.split == 'val': self.seqs = ['08'] elif self.split == 'train_val': self.seqs = ['00', '01', '02', '03', '04', '05', '06', '07', '09', '10', '08'] elif self.split == 'test': self.seqs = ['11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21'] else: raise Exception('split must be train/val/train_val/test.') self.annos = [] for seq in self.seqs: self.annos += absoluteFilePaths('/'.join([self.root_path, str(seq).zfill(2), 'velodyne'])) self.annos.sort() self.annos_another = self.annos.copy() random.shuffle(self.annos_another) print(f'The total sample is {len(self.annos)}') self._sample_idx = np.arange(len(self.annos)) self.samples_per_epoch = self.data_cfgs.get('SAMPLES_PER_EPOCH', -1) if self.samples_per_epoch == -1 or not self.training: self.samples_per_epoch = len(self.annos) if self.training: self.resample() else: self.sample_idx = self._sample_idx # init_path = os.path.join(ABSOLUTE_PATH, 'semantickitti_init.pkl') self.scan_size = {} # if not os.path.isfile(init_path): for path in self.annos: self.scan_size[path] = np.fromfile(path, dtype=np.float32).reshape((-1, 4)).shape[0] # torch.save(self.scan_size, init_path) # else: # self.scan_size = torch.load(init_path) def __len__(self): return len(self.sample_idx) def resample(self): self.sample_idx = np.random.choice(self._sample_idx, self.samples_per_epoch) def get_kitti_points_ringID(self, points): scan_x = points[:, 0] scan_y = points[:, 1] yaw = -np.arctan2(scan_y, -scan_x) proj_x = 0.5 * (yaw / np.pi + 1.0) new_raw = np.nonzero((proj_x[1:] < 0.2) * (proj_x[:-1] > 0.8))[0] + 1 proj_y = np.zeros_like(proj_x) proj_y[new_raw] = 1 ringID = np.cumsum(proj_y) ringID = np.clip(ringID, 0, 63) return ringID def __getitem__(self, index): raw_data = np.fromfile(self.annos[index], dtype=np.float32).reshape((-1, 4)) if self.split == 'test': annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1) else: if self.if_scribble: # ScribbleKITTI (weak label) annos = self.annos[index].replace('SemanticKITTI', 'ScribbleKITTI') annotated_data = np.fromfile( annos.replace('velodyne', 'scribbles')[:-3] + 'label', dtype=np.uint32 ).reshape((-1, 1)) else: # SemanticKITTI (full label) annotated_data = np.fromfile( self.annos[index].replace('velodyne', 'labels')[:-3] + 'label', dtype=np.uint32 ).reshape((-1, 1)) annotated_data = annotated_data & 0xFFFF if self.num_classes == 19: annotated_data = np.vectorize(LEARNING_MAP_19.__getitem__)(annotated_data) elif self.num_classes == 12: annotated_data = np.vectorize(LEARNING_MAP_12.__getitem__)(annotated_data) elif self.num_classes == 7:
annotated_data = np.vectorize(LEARNING_MAP_7.__getitem__)(annotated_data)
2
2023-10-31 08:11:57+00:00
8k
thoddnn/open-datagen
opendatagen/data_generator.py
[ { "identifier": "dict_to_string", "path": "opendatagen/utils.py", "snippet": "def dict_to_string(d):\n result = []\n for key, value in d.items():\n result.append(f'#{key}#:\\n\"\"\"')\n result.append(f'{value}')\n result.append('\"\"\"')\n return '\\n'.join(result)" }, { "identifier": "load_file", "path": "opendatagen/utils.py", "snippet": "def load_file(path:str):\n # Adjust the path based on this module's location\n absolute_path = os.path.join(os.path.dirname(__file__), path)\n\n with open(absolute_path, 'r') as file:\n content = file.read()\n\n return content" }, { "identifier": "write_to_csv", "path": "opendatagen/utils.py", "snippet": "def write_to_csv(rows, filename):\n\n if not rows: # Check if rows is empty or None\n raise ValueError(\"The 'rows' argument cannot be empty.\")\n \n # Use the current working directory instead of the script's directory\n base_path = os.getcwd()\n\n if os.path.isabs(filename):\n path = filename\n else:\n path = os.path.join(base_path, filename)\n \n # Open the file and write the rows\n with open(path, 'w', newline='') as file:\n writer = csv.DictWriter(file, fieldnames=rows[0].keys())\n writer.writeheader() # Writing the headers\n writer.writerows(rows) # Writing the rows" }, { "identifier": "generate_context_from_json", "path": "opendatagen/utils.py", "snippet": "def generate_context_from_json(data, stop_field=None):\n if stop_field and list(data.keys())[0] == stop_field:\n return \"\"\n\n output = \"Given these values\\n\"\n\n for key, value in data.items():\n if key == stop_field:\n break\n output += f\"#{key} value#\\n'''{value}\\n'''\\n\"\n\n return output" }, { "identifier": "extract_website_details", "path": "opendatagen/utils.py", "snippet": "def extract_website_details(url):\n downloaded = trafilatura.fetch_url(url)\n metadata = trafilatura.metadata.extract_metadata(downloaded)\n\n title = metadata['title'] if metadata and 'title' in metadata else None\n description = metadata['description'] if metadata and 'description' in metadata else None\n\n content = trafilatura.extract(downloaded)\n\n response = {\n \"title\": title,\n \"description\": description,\n \"content\": content\n }\n\n return response" }, { "identifier": "create_type_message", "path": "opendatagen/utils.py", "snippet": "def create_type_message(comp_type, min_value, max_value):\n \"\"\"Helper function to create the type message based on the given constraints.\"\"\"\n type_msg = f\"The answer must be a {comp_type}\" if comp_type else \"\"\n\n if comp_type == \"int\":\n if min_value and max_value:\n type_msg += f\" between {min_value} and {max_value}\"\n elif max_value:\n type_msg += f\" lower than {max_value}\"\n elif min_value:\n type_msg += f\" greater than {min_value}\"\n\n return type_msg" }, { "identifier": "find_strings_in_brackets", "path": "opendatagen/utils.py", "snippet": "def find_strings_in_brackets(text):\n # This pattern matches text enclosed in { and }\n pattern = r\"\\{(.*?)\\}\"\n # Find all matches\n matches = re.findall(pattern, text)\n return matches" }, { "identifier": "snake_case_to_title_case", "path": "opendatagen/utils.py", "snippet": "def snake_case_to_title_case(snake_str):\n # Split the string at underscores\n words = snake_str.split('_')\n # Capitalize the first letter of each word and join them with a space\n title_case_str = ' '.join(word.capitalize() for word in words)\n return title_case_str" }, { "identifier": "title_case_to_snake_case", "path": "opendatagen/utils.py", "snippet": "def title_case_to_snake_case(title_str):\n # First, split the string by spaces\n words = title_str.split(' ')\n # Convert all the words to lowercase and join them with underscores\n snake_case_str = '_'.join(word.lower() for word in words)\n return snake_case_str" }, { "identifier": "extract_content_from_internet", "path": "opendatagen/utils.py", "snippet": "def extract_content_from_internet(keyword:str):\n\n print(f\"Browsing for the keyword {keyword}...\")\n\n result = \"\"\n\n urls = get_google_search_result(keyword)\n\n for url in urls:\n\n content = get_content_from_url(url)\n\n if content and word_counter(content) > 500:\n\n print(url)\n\n result = result + \"\\n\" + content\n\n print(\"Finish browsing...\")\n\n return result" }, { "identifier": "clean_string", "path": "opendatagen/utils.py", "snippet": "def clean_string(original_string:str):\n\n cleaned_string = re.sub(r'\\n+', '\\n\\n', original_string).strip()\n \n return cleaned_string" }, { "identifier": "Anonymizer", "path": "opendatagen/anonymizer.py", "snippet": "class Anonymizer:\n\n NER_PLACEHOLDER = {\n \"PERSON\": \"{person}\",\n \"ORG\": \"{organization}\",\n \"GPE\": \"{location}\",\n \"DATE\": \"{date}\",\n \"TIME\": \"{time}\",\n \"NORP\": \"{group}\",\n \"FAC\": \"{facility}\",\n \"LOC\": \"{location}\",\n \"PRODUCT\": \"{product}\",\n \"EVENT\": \"{event}\",\n \"WORK_OF_ART\": \"{artwork}\",\n \"LAW\": \"{law}\",\n \"LANGUAGE\": \"{language}\",\n \"MONEY\": \"{money}\",\n \"PERCENT\": \"{percentage}\",\n \"ORDINAL\": \"{ordinal}\",\n \"CARDINAL\": \"{number}\",\n # Add more if needed\n }\n\n REGEX_PATTERN = {\n \"{phone_number}\": r\"\\+?\\d{1,4}?[-.\\s]?\\(?\\d{1,3}?\\)?[-.\\s]?\\d{1,4}[-.\\s]?\\d{1,4}[-.\\s]?\\d{1,9}\",\n \"{email}\": r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b\",\n \"{credit_card_pattern}\": r\"\\d{4}[-\\s]?\\d{4}[-\\s]?\\d{4}[-\\s]?\\d{4}\",\n \"{address_pattern}\": r\"\\d{1,5}\\s\\w+(\\s\\w+)*,\\s\\w+,\\s\\w+(\\s\\w+)*\",\n \"{date_pattern}\": r\"(\\d{4}[-/]\\d{1,2}[-/]\\d{1,2})|(\\d{1,2}[-/]\\d{1,2}[-/]\\d{4})\",\n \"{time_pattern}\": r\"(?:[01]\\d|2[0-3]):[0-5]\\d\",\n \"{ipv4_pattern}\": r\"\\b(?:\\d{1,3}\\.){3}\\d{1,3}\\b\",\n \"{url_pattern}\": r\"https?://(?:www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\",\n \"{ssn_pattern}\": r\"\\d{3}-\\d{2}-\\d{4}\",\n \"{license_plate_pattern}\": r\"[A-Z0-9]{2,}-[A-Z0-9]{2,}\",\n \"{zip_code_pattern}\": r\"\\d{5}(-\\d{4})?\",\n \"{vin_pattern}\": r\"[A-HJ-NPR-Z0-9]{17}\",\n \"{iban_pattern}\": r\"[A-Z]{2}\\d{2}[A-Z0-9]{1,30}\",\n \"{driver_license_pattern}\": r\"[A-Z]{1,2}-\\d{4,9}\"\n }\n\n\n\n def __init__(self, completion_model:OpenAIChatModel):\n \n self.nlp = spacy.load(\"en_core_web_sm\")\n self.ner_prompt = load_file(\"files/ner.txt\")\n self.completion_model = completion_model\n\n def regex_anonymization(self, text: str) -> str:\n\n for replacement, pattern in self.REGEX_PATTERN.items():\n text = re.sub(pattern, replacement, text)\n \n return text\n\n def ner_anonymization(self, text: str) -> str:\n doc = self.nlp(text)\n for entity in doc.ents:\n placeholder = self.NER_PLACEHOLDER.get(entity.label_)\n if placeholder:\n text = text.replace(entity.text, placeholder)\n return text\n\n def llm_anonymization(self, text: str) -> str:\n\n completion = self.completion_model.ask(\n system_prompt=self.ner_prompt,\n user_prompt=text,\n max_tokens=126,\n temperature=0\n ) \n\n return completion\n\n def anonymize(self, text: str) -> str:\n\n text = self.regex_anonymization(text)\n text = self.ner_anonymization(text)\n return self.llm_anonymization(text)" }, { "identifier": "OpenAIChatModel", "path": "opendatagen/model.py", "snippet": "class OpenAIChatModel(BaseModel):\n\n name:str = \"gpt-3.5-turbo-1106\"\n system_prompt:Optional[str] = \"No verbose.\"\n max_tokens:Optional[int] = 256\n temperature:Optional[List[float]] = [1]\n json_mode:Optional[bool] = False \n seed:Optional[int] = None \n tools:Optional[list] = None \n top_p:Optional[int] = 1 \n stop:Optional[str] = None \n presence_penalty: Optional[float] = 0\n frequency_penalty: Optional[float] = 0 \n client:Optional[Type[OpenAI]] = None \n logprobs:Optional[bool] = False \n confidence_score:Optional[Dict] = {} \n \n def __init__(self, **data):\n super().__init__(**data)\n \n self.client = OpenAI()\n self.client.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n \n @retry(retry=retry_if_result(is_retryable_answer), stop=stop_after_attempt(N_RETRIES), wait=wait_exponential(multiplier=1, min=4, max=60))\n def ask(self, messages) -> str:\n \n param = {\n\n \"model\":self.name,\n \"temperature\": random.choice(self.temperature),\n \"messages\": messages,\n \"logprobs\": self.logprobs\n\n }\n\n if self.tools:\n param[\"functions\"] = self.tools\n \n if self.max_tokens:\n param[\"max_tokens\"] = self.max_tokens\n\n if self.seed:\n param[\"seed\"] = self.seed\n \n if self.max_tokens:\n param[\"max_tokens\"] = self.max_tokens\n\n if self.json_mode:\n param[\"response_format\"] = {\"type\": \"json_object\"}\n\n if self.seed:\n param[\"seed\"] = self.seed\n\n completion = self.client.chat.completions.create(**param)\n\n if self.logprobs:\n self.confidence_score = get_confidence_score(completion=completion)\n\n answer = completion.choices[0].message.content\n \n return answer" }, { "identifier": "OpenAIInstructModel", "path": "opendatagen/model.py", "snippet": "class OpenAIInstructModel(BaseModel):\n\n name:str = \"gpt-3.5-turbo-instruct\"\n max_tokens:Optional[int] = 256\n temperature:Optional[List[float]] = [1]\n messages:Optional[str] = None \n seed:Optional[int] = None \n tools:Optional[List[str]] = None \n start_with:Optional[List[str]] = None\n top_p:Optional[int] = 1 \n stop:Optional[str] = None \n presence_penalty: Optional[float] = 0\n frequency_penalty: Optional[float] = 0 \n client:Optional[Type[OpenAI]] = None \n confidence_score:Optional[Dict] = {} \n\n\n def __init__(self, **data):\n super().__init__(**data)\n\n self.client = OpenAI()\n self.client.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n \n \n @retry(stop=stop_after_attempt(N_RETRIES), wait=wait_exponential(multiplier=1, min=4, max=60))\n def ask(self, messages:str) -> str:\n\n if self.start_with:\n starter = random.choice(self.start_with)\n else:\n starter = \"\"\n\n param = {\n\n \"model\":self.name,\n \"temperature\": random.choice(self.temperature),\n \"prompt\": f\"{messages}\\n\\n{starter}\"\n\n }\n\n if self.tools:\n param[\"functions\"] = self.tools\n \n if self.max_tokens:\n param[\"max_tokens\"] = self.max_tokens\n\n if self.seed:\n param[\"seed\"] = self.seed\n\n completion = self.client.completions.create(**param)\n\n answer = completion.choices[0].text \n\n return answer" }, { "identifier": "OpenAIEmbeddingModel", "path": "opendatagen/model.py", "snippet": "class OpenAIEmbeddingModel(BaseModel):\n\n name:str = \"\"" }, { "identifier": "ModelName", "path": "opendatagen/model.py", "snippet": "class ModelName(Enum):\n GPT_35_TURBO_INSTRUCT = \"gpt-3.5-turbo-instruct\"\n TEXT_DAVINCI_INSTRUCT = \"text-davinci-003\"\n GPT_35_TURBO_CHAT = \"gpt-3.5-turbo-1106\"\n GPT_35_TURBO_16K_CHAT = \"gpt-3.5-turbo-16k\"\n GPT_4_CHAT = \"gpt-4\"\n GPT_4_TURBO_CHAT = \"gpt-4-1106-preview\"\n TEXT_EMBEDDING_ADA = \"text-embedding-ada-002\"\n SMARTCHUNK = \"SmartChunk-0.1-Mistral-7B\"\n MISTRAL_7B = \"Mistral-7B-v0.1\"\n LLAMA_7B = \"Llama-2-7b-chat-hf\"\n LLAMA_13B = \"Llama-2-13b-chat-hf\"\n LLAMA_70B = \"Llama-2-70b-chat-hf\"" }, { "identifier": "MistralChatModel", "path": "opendatagen/model.py", "snippet": "class MistralChatModel(BaseModel):\n\n name:str = \"mistral-tiny\"\n max_tokens:Optional[int] = 256\n temperature:Optional[List[float]] = [0.7]\n messages:Optional[str] = None \n random_seed:Optional[int] = None \n top_p:Optional[int] = 1 \n safe_mode:Optional[bool] = False \n client:Optional[Type[MistralClient]] = None \n confidence_score:Optional[Dict] = {} \n\n def __init__(self, **data):\n \n super().__init__(**data)\n api_key = os.environ[\"MISTRAL_API_KEY\"]\n self.client = MistralClient(api_key=api_key)\n \n @retry(stop=stop_after_attempt(N_RETRIES), wait=wait_exponential(multiplier=1, min=4, max=60))\n def ask(self, messages) -> str:\n \n param = {\n\n \"model\":self.name,\n \"temperature\": random.choice(self.temperature),\n \"messages\": messages\n\n }\n\n if self.max_tokens:\n param[\"max_tokens\"] = self.max_tokens\n\n if self.top_p:\n param[\"top_p\"] = self.top_p\n\n if self.random_seed:\n param[\"random_seed\"] = self.random_seed\n\n chat_response = self.client.chat(**param)\n\n answer = chat_response.choices[0].message.content\n\n return answer" }, { "identifier": "LlamaCPPModel", "path": "opendatagen/model.py", "snippet": "class LlamaCPPModel(BaseModel):\n\n path:str\n temperature:Optional[List[float]] = [0.8]\n max_tokens:Optional[int] = 256\n top_p:Optional[float] = 0.95\n min_p:Optional[float] = 0.05\n echo:Optional[bool] = False\n start_with:Optional[List[str]] = None\n confidence_score:Optional[Dict] = {} \n\n def ask(self, messages:str) -> str:\n\n llm = Llama(model_path=self.path, verbose=False)\n\n if self.start_with:\n starter = random.choice(self.start_with)\n else:\n starter = \"\"\n\n output = llm(\n prompt=f\"{messages}\\n{starter}\", \n max_tokens=self.max_tokens, \n echo=self.echo,\n temperature=random.choice(self.temperature),\n )\n\n return output[\"choices\"][0][\"text\"]" }, { "identifier": "Template", "path": "opendatagen/template.py", "snippet": "class Template(BaseModel):\n\n description: str\n prompt: str\n completion: str\n prompt_variation_number: Optional[int] = 1\n variables: Optional[Dict[str, Variable]] = None\n source_internet: Optional[RAGInternet] = None\n source_localfile: Optional[RAGLocalPath] = None\n rag_content: Optional[str] = None\n value:Optional[List[str]] = None\n decontamination: Optional[Decontomination] = None \n\n class Config:\n extra = \"forbid\" # This will raise an error for extra fields\n\n def load_internet_source(self):\n\n if self.source_internet is not None:\n self.rag_content = self.source_internet.extract_content_from_internet()\n\n def load_local_file(self):\n\n if self.source_localfile is not None and self.source_localfile.localPath is not None:\n self.rag_content = self.source_localfile.get_content_from_file()\n\n def load_local_directory(self):\n\n if self.source_localfile is not None and self.source_localfile.directoryPath is not None:\n self.rag_content = self.source_localfile.get_content_from_directory()" }, { "identifier": "Variable", "path": "opendatagen/template.py", "snippet": "class Variable(BaseModel):\n\n name: str\n models:Optional[List[Model]] = None \n generation_number: int = 1\n source_internet: Optional[RAGInternet] = None\n source_localfile: Optional[RAGLocalPath] = None\n source_localdirectory: Optional[RAGLocalPath] = None\n source_huggingface:Optional[RAGHuggingFace] = None\n get_value_from_huggingface:Optional[RAGHuggingFace] = None\n get_value_from_localfile:Optional[RAGLocalPath] = None\n note: Optional[List[str]] = None\n rag_content: Optional[str] = None\n validator:Optional[Validator] = None\n values:Optional[Dict[str, Variations]] = {}\n\n model_config = ConfigDict(\n protected_namespaces=('protect_me_', 'also_protect_'),\n extra = \"forbid\"\n )\n\n def load_internet_source(self):\n\n if self.source_internet is not None:\n self.rag_content = self.source_internet.extract_content_from_internet()\n\n def load_local_file(self):\n\n if self.source_localfile is not None and self.source_localfile.localPath is not None:\n self.rag_content = self.source_localfile.get_content_from_file()\n\n def load_local_directory(self):\n\n if self.source_localfile is not None and self.source_localfile.directoryPath is not None:\n self.rag_content = self.source_localfile.get_content_from_directory()\n\n def load_huggingface_dataset(self):\n\n if self.source_huggingface is not None:\n self.rag_content = self.source_huggingface.get_random_value_from_dataset()\n\n def load_value(self):\n\n if self.get_value_from_huggingface:\n self.value = self.get_value_from_huggingface.get_random_value_from_dataset(max_token=self.max_tokens)" }, { "identifier": "Variations", "path": "opendatagen/template.py", "snippet": "class Variations(BaseModel):\n\n id:str\n parent_id:Optional[str] = None\n value:str\n confidence_score:Optional[Dict] = None \n error_message:str = None\n\n class Config:\n extra = \"forbid\" # This will raise an error for extra fields" }, { "identifier": "create_variable_from_name", "path": "opendatagen/template.py", "snippet": "def create_variable_from_name(model:OpenAIChatModel, variable_name:str) -> Variable:\n\n prompt = load_file(path=\"files/variable_generation.txt\")\n\n prompt = prompt.format(variable_name=variable_name)\n\n completion = model.ask_instruct_gpt(prompt=prompt, temperature=0, max_tokens=30)\n\n return Variable(**completion)" }, { "identifier": "function_to_call", "path": "opendatagen/utils.py", "snippet": "def function_to_call(function_name, from_notebook, *args):\n\n user_function = load_user_function(function_name, from_notebook)\n\n return user_function(*args)" } ]
from dotenv import load_dotenv from urllib.parse import quote from re import findall from typing import Dict, List, Union from opendatagen.utils import dict_to_string, load_file, write_to_csv, generate_context_from_json, extract_website_details, create_type_message, find_strings_in_brackets from opendatagen.utils import snake_case_to_title_case, title_case_to_snake_case from opendatagen.utils import extract_content_from_internet, clean_string from opendatagen.anonymizer import Anonymizer from opendatagen.model import OpenAIChatModel, OpenAIInstructModel, OpenAIEmbeddingModel, ModelName, MistralChatModel, LlamaCPPModel from opendatagen.template import Template, Variable, Variations, create_variable_from_name from opendatagen.utils import function_to_call from mistralai.client import MistralClient from mistralai.models.chat_completion import ChatMessage import numpy as np import time import random import re import json import requests import uuid
5,946
load_dotenv() class DataGenerator: output_array = [] def __init__(self, template:Template): self.template = template def extract_variable_from_string(self, text:str): return findall(r'\{(.*?)\}', text) def extract_variable_dict_from_string(self, text:str): list_of_variables = findall(r'\{(.*?)\}', text) result = {} for variable_id, variable in self.template.variables.items(): if variable_id in list_of_variables: result[variable_id] = variable return result def anonymize_text(self, text_to_anonymize): # Example usage: anonymizer = Anonymizer() anonymized_text = anonymizer.anonymize(text_to_anonymize) return anonymized_text def contextual_generation(self, prompt_text:str, variables:list, current_variation_dict:dict, fixed_variables: Dict[str, Variable], completion:str=None, parent_id:str=None): # This will be the list to collect all dictionaries result = [] if not variables: # No more variables to process, generate final variation return [current_variation_dict.copy()] # Get the next variable next_var = variables[0] remaining_variables = variables[1:] if completion: formatted_template = completion.format(**{var: current_variation_dict.get(var, f'{{{var}}}').value if hasattr(current_variation_dict.get(var, f'{{{var}}}'), 'value') else current_variation_dict.get(var, f'{{{var}}}') for var in re.findall(r'\{(.*?)\}', completion)}) current_completion = formatted_template.split(f'{{{next_var}}}')[0] + f'{{{next_var}}}' current_prompt = prompt_text else: formatted_template = prompt_text.format(**{var: current_variation_dict.get(var, f'{{{var}}}').value if hasattr(current_variation_dict.get(var, f'{{{var}}}'), 'value') else current_variation_dict.get(var, f'{{{var}}}') for var in re.findall(r'\{(.*?)\}', prompt_text)}) current_prompt = formatted_template.split(f'{{{next_var}}}')[0] + f'{{{next_var}}}' current_completion = None variable = fixed_variables[next_var] variations = self.generate_variable(prompt_text=current_prompt, completion_text=current_completion, current_variable=variable, variable_id_string=next_var, parent_id=parent_id) for id, variation in variations.items(): # Update the current variations dictionary with the new variation updated_variation_dict = current_variation_dict.copy() updated_variation_dict[next_var] = variation # Recursively process the remaining variables # and extend the all_variation_dicts list with the results result.extend(self.contextual_generation( prompt_text=prompt_text, completion=completion, variables=remaining_variables, current_variation_dict=updated_variation_dict, fixed_variables=fixed_variables, parent_id=id )) # Return the list of all variation dictionaries generated return result def generate_variable(self, prompt_text:str, current_variable:Variable, variable_id_string:str, completion_text:str=None, parent_id:str=None): generation_number = current_variable.generation_number variations = {} if current_variable.get_value_from_localfile: for _ in range(generation_number): generated_value = current_variable.get_value_from_localfile.get_content_from_file() if parent_id: new_id = str(uuid.uuid4())
load_dotenv() class DataGenerator: output_array = [] def __init__(self, template:Template): self.template = template def extract_variable_from_string(self, text:str): return findall(r'\{(.*?)\}', text) def extract_variable_dict_from_string(self, text:str): list_of_variables = findall(r'\{(.*?)\}', text) result = {} for variable_id, variable in self.template.variables.items(): if variable_id in list_of_variables: result[variable_id] = variable return result def anonymize_text(self, text_to_anonymize): # Example usage: anonymizer = Anonymizer() anonymized_text = anonymizer.anonymize(text_to_anonymize) return anonymized_text def contextual_generation(self, prompt_text:str, variables:list, current_variation_dict:dict, fixed_variables: Dict[str, Variable], completion:str=None, parent_id:str=None): # This will be the list to collect all dictionaries result = [] if not variables: # No more variables to process, generate final variation return [current_variation_dict.copy()] # Get the next variable next_var = variables[0] remaining_variables = variables[1:] if completion: formatted_template = completion.format(**{var: current_variation_dict.get(var, f'{{{var}}}').value if hasattr(current_variation_dict.get(var, f'{{{var}}}'), 'value') else current_variation_dict.get(var, f'{{{var}}}') for var in re.findall(r'\{(.*?)\}', completion)}) current_completion = formatted_template.split(f'{{{next_var}}}')[0] + f'{{{next_var}}}' current_prompt = prompt_text else: formatted_template = prompt_text.format(**{var: current_variation_dict.get(var, f'{{{var}}}').value if hasattr(current_variation_dict.get(var, f'{{{var}}}'), 'value') else current_variation_dict.get(var, f'{{{var}}}') for var in re.findall(r'\{(.*?)\}', prompt_text)}) current_prompt = formatted_template.split(f'{{{next_var}}}')[0] + f'{{{next_var}}}' current_completion = None variable = fixed_variables[next_var] variations = self.generate_variable(prompt_text=current_prompt, completion_text=current_completion, current_variable=variable, variable_id_string=next_var, parent_id=parent_id) for id, variation in variations.items(): # Update the current variations dictionary with the new variation updated_variation_dict = current_variation_dict.copy() updated_variation_dict[next_var] = variation # Recursively process the remaining variables # and extend the all_variation_dicts list with the results result.extend(self.contextual_generation( prompt_text=prompt_text, completion=completion, variables=remaining_variables, current_variation_dict=updated_variation_dict, fixed_variables=fixed_variables, parent_id=id )) # Return the list of all variation dictionaries generated return result def generate_variable(self, prompt_text:str, current_variable:Variable, variable_id_string:str, completion_text:str=None, parent_id:str=None): generation_number = current_variable.generation_number variations = {} if current_variable.get_value_from_localfile: for _ in range(generation_number): generated_value = current_variable.get_value_from_localfile.get_content_from_file() if parent_id: new_id = str(uuid.uuid4())
new_value = Variations(id=new_id, parent_id=parent_id, value=generated_value)
20
2023-10-27 17:38:37+00:00
8k
zhanggang001/HEDNet
pcdet/datasets/waymo/waymo_dataset.py
[ { "identifier": "roiaware_pool3d_utils", "path": "pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py", "snippet": "def points_in_boxes_cpu(points, boxes):\ndef points_in_boxes_gpu(points, boxes):\n def __init__(self, out_size, max_pts_each_voxel=128):\n def forward(self, rois, pts, pts_feature, pool_method='max'):\n def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):\n def backward(ctx, grad_out):\nclass RoIAwarePool3d(nn.Module):\nclass RoIAwarePool3dFunction(Function):" }, { "identifier": "box_utils", "path": "pcdet/utils/box_utils.py", "snippet": "def in_hull(p, hull):\ndef boxes_to_corners_3d(boxes3d):\ndef corners_rect_to_camera(corners):\ndef mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1, use_center_to_filter=True):\ndef remove_points_in_boxes3d(points, boxes3d):\ndef boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib):\ndef boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):\ndef boxes3d_kitti_lidar_to_fakelidar(boxes3d_lidar):\ndef enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):\ndef boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib):\ndef boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):\ndef boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None):\ndef boxes_iou_normal(boxes_a, boxes_b):\ndef boxes3d_lidar_to_aligned_bev_boxes(boxes3d):\ndef boxes3d_nearest_bev_iou(boxes_a, boxes_b):\ndef area(box) -> torch.Tensor:\ndef pairwise_iou(boxes1, boxes2) -> torch.Tensor:\ndef center_to_corner2d(center, dim):\ndef bbox3d_overlaps_diou(pred_boxes, gt_boxes):" }, { "identifier": "common_utils", "path": "pcdet/utils/common_utils.py", "snippet": "def check_numpy_to_torch(x):\ndef limit_period(val, offset=0.5, period=np.pi):\ndef drop_info_with_name(info, name):\ndef rotate_points_along_z(points, angle):\ndef angle2matrix(angle):\ndef mask_points_by_range(points, limit_range):\ndef get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):\ndef create_logger(log_file=None, rank=0, log_level=logging.INFO):\ndef set_random_seed(seed):\ndef worker_init_fn(worker_id, seed=666):\ndef get_pad_params(desired_size, cur_size):\ndef keep_arrays_by_name(gt_names, used_classes):\ndef init_dist_slurm(tcp_port, local_rank, backend='nccl'):\ndef init_dist_pytorch(tcp_port, local_rank, backend='nccl'):\ndef get_dist_info(return_gpu_per_machine=False):\ndef merge_results_dist(result_part, size, tmpdir):\ndef scatter_point_inds(indices, point_inds, shape):\ndef generate_voxel2pinds(sparse_tensor):\ndef sa_create(name, var):\n def __init__(self):\n def reset(self):\n def update(self, val, n=1):\nclass AverageMeter(object):" }, { "identifier": "DatasetTemplate", "path": "pcdet/datasets/dataset.py", "snippet": "class DatasetTemplate(torch_data.Dataset):\n def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):\n super().__init__()\n self.dataset_cfg = dataset_cfg\n self.training = training\n self.class_names = class_names\n self.logger = logger\n self.root_path = root_path if root_path is not None else Path(self.dataset_cfg.DATA_PATH)\n self.logger = logger\n if self.dataset_cfg is None or class_names is None:\n return\n\n self.point_cloud_range = np.array(self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32)\n self.point_feature_encoder = PointFeatureEncoder(\n self.dataset_cfg.POINT_FEATURE_ENCODING,\n point_cloud_range=self.point_cloud_range\n )\n self.data_augmentor = DataAugmentor(\n self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger\n ) if self.training else None\n self.data_processor = DataProcessor(\n self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range,\n training=self.training, num_point_features=self.point_feature_encoder.num_point_features\n )\n\n self.grid_size = self.data_processor.grid_size\n self.voxel_size = self.data_processor.voxel_size\n self.total_epochs = 0\n self._merge_all_iters_to_one_epoch = False\n\n if hasattr(self.data_processor, \"depth_downsample_factor\"):\n self.depth_downsample_factor = self.data_processor.depth_downsample_factor\n else:\n self.depth_downsample_factor = None\n \n @property\n def mode(self):\n return 'train' if self.training else 'test'\n\n def __getstate__(self):\n d = dict(self.__dict__)\n del d['logger']\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None):\n \"\"\"\n Args:\n batch_dict:\n frame_id:\n pred_dicts: list of pred_dicts\n pred_boxes: (N, 7 or 9), Tensor\n pred_scores: (N), Tensor\n pred_labels: (N), Tensor\n class_names:\n output_path:\n\n Returns:\n\n \"\"\"\n \n def get_template_prediction(num_samples):\n box_dim = 9 if self.dataset_cfg.get('TRAIN_WITH_SPEED', False) else 7\n ret_dict = {\n 'name': np.zeros(num_samples), 'score': np.zeros(num_samples),\n 'boxes_lidar': np.zeros([num_samples, box_dim]), 'pred_labels': np.zeros(num_samples)\n }\n return ret_dict\n\n def generate_single_sample_dict(box_dict):\n pred_scores = box_dict['pred_scores'].cpu().numpy()\n pred_boxes = box_dict['pred_boxes'].cpu().numpy()\n pred_labels = box_dict['pred_labels'].cpu().numpy()\n pred_dict = get_template_prediction(pred_scores.shape[0])\n if pred_scores.shape[0] == 0:\n return pred_dict\n\n pred_dict['name'] = np.array(class_names)[pred_labels - 1]\n pred_dict['score'] = pred_scores\n pred_dict['boxes_lidar'] = pred_boxes\n pred_dict['pred_labels'] = pred_labels\n\n return pred_dict\n\n annos = []\n for index, box_dict in enumerate(pred_dicts):\n single_pred_dict = generate_single_sample_dict(box_dict)\n single_pred_dict['frame_id'] = batch_dict['frame_id'][index]\n if 'metadata' in batch_dict:\n single_pred_dict['metadata'] = batch_dict['metadata'][index]\n annos.append(single_pred_dict)\n\n return annos\n\n def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):\n if merge:\n self._merge_all_iters_to_one_epoch = True\n self.total_epochs = epochs\n else:\n self._merge_all_iters_to_one_epoch = False\n\n def __len__(self):\n raise NotImplementedError\n\n def __getitem__(self, index):\n \"\"\"\n To support a custom dataset, implement this function to load the raw data (and labels), then transform them to\n the unified normative coordinate and call the function self.prepare_data() to process the data and send them\n to the model.\n\n Args:\n index:\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n def set_lidar_aug_matrix(self, data_dict):\n \"\"\"\n Get lidar augment matrix (4 x 4), which are used to recover orig point coordinates.\n \"\"\"\n lidar_aug_matrix = np.eye(4)\n if 'flip_y' in data_dict.keys():\n flip_x = data_dict['flip_x']\n flip_y = data_dict['flip_y']\n if flip_x:\n lidar_aug_matrix[:3,:3] = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]]) @ lidar_aug_matrix[:3,:3]\n if flip_y:\n lidar_aug_matrix[:3,:3] = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) @ lidar_aug_matrix[:3,:3]\n if 'noise_rot' in data_dict.keys():\n noise_rot = data_dict['noise_rot']\n lidar_aug_matrix[:3,:3] = common_utils.angle2matrix(torch.tensor(noise_rot)) @ lidar_aug_matrix[:3,:3]\n if 'noise_scale' in data_dict.keys():\n noise_scale = data_dict['noise_scale']\n lidar_aug_matrix[:3,:3] *= noise_scale\n if 'noise_translate' in data_dict.keys():\n noise_translate = data_dict['noise_translate']\n lidar_aug_matrix[:3,3:4] = noise_translate.T\n data_dict['lidar_aug_matrix'] = lidar_aug_matrix\n return data_dict\n\n def prepare_data(self, data_dict):\n \"\"\"\n Args:\n data_dict:\n points: optional, (N, 3 + C_in)\n gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n gt_names: optional, (N), string\n ...\n\n Returns:\n data_dict:\n frame_id: string\n points: (N, 3 + C_in)\n gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]\n gt_names: optional, (N), string\n use_lead_xyz: bool\n voxels: optional (num_voxels, max_points_per_voxel, 3 + C)\n voxel_coords: optional (num_voxels, 3)\n voxel_num_points: optional (num_voxels)\n ...\n \"\"\"\n if self.training:\n assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'\n gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)\n \n if 'calib' in data_dict:\n calib = data_dict['calib']\n data_dict = self.data_augmentor.forward(\n data_dict={\n **data_dict,\n 'gt_boxes_mask': gt_boxes_mask\n }\n )\n if 'calib' in data_dict:\n data_dict['calib'] = calib\n data_dict = self.set_lidar_aug_matrix(data_dict)\n if data_dict.get('gt_boxes', None) is not None:\n selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)\n data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]\n data_dict['gt_names'] = data_dict['gt_names'][selected]\n gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)\n gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)\n data_dict['gt_boxes'] = gt_boxes\n\n if data_dict.get('gt_boxes2d', None) is not None:\n data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][selected]\n\n if data_dict.get('points', None) is not None:\n data_dict = self.point_feature_encoder.forward(data_dict)\n\n data_dict = self.data_processor.forward(\n data_dict=data_dict\n )\n\n if self.training and len(data_dict['gt_boxes']) == 0:\n new_index = np.random.randint(self.__len__())\n return self.__getitem__(new_index)\n\n data_dict.pop('gt_names', None)\n\n return data_dict\n\n @staticmethod\n def collate_batch(batch_list, _unused=False):\n data_dict = defaultdict(list)\n for cur_sample in batch_list:\n for key, val in cur_sample.items():\n data_dict[key].append(val)\n batch_size = len(batch_list)\n ret = {}\n batch_size_ratio = 1\n\n for key, val in data_dict.items():\n try:\n if key in ['voxels', 'voxel_num_points']:\n if isinstance(val[0], list):\n batch_size_ratio = len(val[0])\n val = [i for item in val for i in item]\n ret[key] = np.concatenate(val, axis=0)\n elif key in ['points', 'voxel_coords']:\n coors = []\n if isinstance(val[0], list):\n val = [i for item in val for i in item]\n for i, coor in enumerate(val):\n coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)\n coors.append(coor_pad)\n ret[key] = np.concatenate(coors, axis=0)\n elif key in ['gt_boxes']:\n max_gt = max([len(x) for x in val])\n batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)\n for k in range(batch_size):\n batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]\n ret[key] = batch_gt_boxes3d\n\n elif key in ['roi_boxes']:\n max_gt = max([x.shape[1] for x in val])\n batch_gt_boxes3d = np.zeros((batch_size, val[0].shape[0], max_gt, val[0].shape[-1]), dtype=np.float32)\n for k in range(batch_size):\n batch_gt_boxes3d[k,:, :val[k].shape[1], :] = val[k]\n ret[key] = batch_gt_boxes3d\n\n elif key in ['roi_scores', 'roi_labels']:\n max_gt = max([x.shape[1] for x in val])\n batch_gt_boxes3d = np.zeros((batch_size, val[0].shape[0], max_gt), dtype=np.float32)\n for k in range(batch_size):\n batch_gt_boxes3d[k,:, :val[k].shape[1]] = val[k]\n ret[key] = batch_gt_boxes3d\n\n elif key in ['gt_boxes2d']:\n max_boxes = 0\n max_boxes = max([len(x) for x in val])\n batch_boxes2d = np.zeros((batch_size, max_boxes, val[0].shape[-1]), dtype=np.float32)\n for k in range(batch_size):\n if val[k].size > 0:\n batch_boxes2d[k, :val[k].__len__(), :] = val[k]\n ret[key] = batch_boxes2d\n elif key in [\"images\", \"depth_maps\"]:\n # Get largest image size (H, W)\n max_h = 0\n max_w = 0\n for image in val:\n max_h = max(max_h, image.shape[0])\n max_w = max(max_w, image.shape[1])\n\n # Change size of images\n images = []\n for image in val:\n pad_h = common_utils.get_pad_params(desired_size=max_h, cur_size=image.shape[0])\n pad_w = common_utils.get_pad_params(desired_size=max_w, cur_size=image.shape[1])\n pad_width = (pad_h, pad_w)\n pad_value = 0\n\n if key == \"images\":\n pad_width = (pad_h, pad_w, (0, 0))\n elif key == \"depth_maps\":\n pad_width = (pad_h, pad_w)\n\n image_pad = np.pad(image,\n pad_width=pad_width,\n mode='constant',\n constant_values=pad_value)\n\n images.append(image_pad)\n ret[key] = np.stack(images, axis=0)\n elif key in ['calib']:\n ret[key] = val\n elif key in [\"points_2d\"]:\n max_len = max([len(_val) for _val in val])\n pad_value = 0\n points = []\n for _points in val:\n pad_width = ((0, max_len-len(_points)), (0,0))\n points_pad = np.pad(_points,\n pad_width=pad_width,\n mode='constant',\n constant_values=pad_value)\n points.append(points_pad)\n ret[key] = np.stack(points, axis=0)\n elif key in ['camera_imgs']:\n ret[key] = torch.stack([torch.stack(imgs,dim=0) for imgs in val],dim=0)\n else:\n ret[key] = np.stack(val, axis=0)\n except:\n print('Error in collate_batch: key=%s' % key)\n raise TypeError\n\n ret['batch_size'] = batch_size * batch_size_ratio\n return ret" } ]
import os import pickle import copy import numpy as np import torch import multiprocessing import SharedArray import torch.distributed as dist import argparse import yaml from tqdm import tqdm from pathlib import Path from functools import partial from ...ops.roiaware_pool3d import roiaware_pool3d_utils from ...utils import box_utils, common_utils from ..dataset import DatasetTemplate from . import waymo_utils from ..kitti.kitti_object_eval_python import eval as kitti_eval from ..kitti import kitti_utils from .waymo_eval import OpenPCDetWaymoDetectionMetricsEstimator from easydict import EasyDict
5,305
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset # Reference https://github.com/open-mmlab/OpenPCDet # Written by Shaoshuai Shi, Chaoxu Guo # All Rights Reserved. class WaymoDataset(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger ) self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG self.split = self.dataset_cfg.DATA_SPLIT[self.mode] split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] self.infos = [] self.seq_name_to_infos = self.include_waymo_data(self.mode) self.use_shared_memory = self.dataset_cfg.get('USE_SHARED_MEMORY', False) and self.training if self.use_shared_memory: self.shared_memory_file_limit = self.dataset_cfg.get('SHARED_MEMORY_FILE_LIMIT', 0x7FFFFFFF) self.load_data_to_shared_memory() if self.dataset_cfg.get('USE_PREDBOX', False): self.pred_boxes_dict = self.load_pred_boxes_to_dict( pred_boxes_path=self.dataset_cfg.ROI_BOXES_PATH[self.mode] ) else: self.pred_boxes_dict = {} def set_split(self, split): super().__init__( dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger ) self.split = split split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] self.infos = [] self.seq_name_to_infos = self.include_waymo_data(self.mode) def include_waymo_data(self, mode): self.logger.info('Loading Waymo dataset') waymo_infos = [] seq_name_to_infos = {} num_skipped_infos = 0 for k in range(len(self.sample_sequence_list)): sequence_name = os.path.splitext(self.sample_sequence_list[k])[0] info_path = self.data_path / sequence_name / ('%s.pkl' % sequence_name) info_path = self.check_sequence_name_with_all_version(info_path) if not info_path.exists(): num_skipped_infos += 1 continue with open(info_path, 'rb') as f: infos = pickle.load(f) waymo_infos.extend(infos) seq_name_to_infos[infos[0]['point_cloud']['lidar_sequence']] = infos self.infos.extend(waymo_infos[:]) self.logger.info('Total skipped info %s' % num_skipped_infos) self.logger.info('Total samples for Waymo dataset: %d' % (len(waymo_infos))) if self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1: sampled_waymo_infos = [] for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]): sampled_waymo_infos.append(self.infos[k]) self.infos = sampled_waymo_infos self.logger.info('Total sampled samples for Waymo dataset: %d' % len(self.infos)) use_sequence_data = self.dataset_cfg.get('SEQUENCE_CONFIG', None) is not None and self.dataset_cfg.SEQUENCE_CONFIG.ENABLED if not use_sequence_data: seq_name_to_infos = None return seq_name_to_infos def load_pred_boxes_to_dict(self, pred_boxes_path): self.logger.info(f'Loading and reorganizing pred_boxes to dict from path: {pred_boxes_path}') with open(pred_boxes_path, 'rb') as f: pred_dicts = pickle.load(f) pred_boxes_dict = {} for index, box_dict in enumerate(pred_dicts): seq_name = box_dict['frame_id'][:-4].replace('training_', '').replace('validation_', '') sample_idx = int(box_dict['frame_id'][-3:]) if seq_name not in pred_boxes_dict: pred_boxes_dict[seq_name] = {} pred_labels = np.array([self.class_names.index(box_dict['name'][k]) + 1 for k in range(box_dict['name'].shape[0])]) pred_boxes = np.concatenate((box_dict['boxes_lidar'], box_dict['score'][:, np.newaxis], pred_labels[:, np.newaxis]), axis=-1) pred_boxes_dict[seq_name][sample_idx] = pred_boxes self.logger.info(f'Predicted boxes has been loaded, total sequences: {len(pred_boxes_dict)}') return pred_boxes_dict def load_data_to_shared_memory(self): self.logger.info(f'Loading training data to shared memory (file limit={self.shared_memory_file_limit})')
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset # Reference https://github.com/open-mmlab/OpenPCDet # Written by Shaoshuai Shi, Chaoxu Guo # All Rights Reserved. class WaymoDataset(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger ) self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG self.split = self.dataset_cfg.DATA_SPLIT[self.mode] split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] self.infos = [] self.seq_name_to_infos = self.include_waymo_data(self.mode) self.use_shared_memory = self.dataset_cfg.get('USE_SHARED_MEMORY', False) and self.training if self.use_shared_memory: self.shared_memory_file_limit = self.dataset_cfg.get('SHARED_MEMORY_FILE_LIMIT', 0x7FFFFFFF) self.load_data_to_shared_memory() if self.dataset_cfg.get('USE_PREDBOX', False): self.pred_boxes_dict = self.load_pred_boxes_to_dict( pred_boxes_path=self.dataset_cfg.ROI_BOXES_PATH[self.mode] ) else: self.pred_boxes_dict = {} def set_split(self, split): super().__init__( dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger ) self.split = split split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] self.infos = [] self.seq_name_to_infos = self.include_waymo_data(self.mode) def include_waymo_data(self, mode): self.logger.info('Loading Waymo dataset') waymo_infos = [] seq_name_to_infos = {} num_skipped_infos = 0 for k in range(len(self.sample_sequence_list)): sequence_name = os.path.splitext(self.sample_sequence_list[k])[0] info_path = self.data_path / sequence_name / ('%s.pkl' % sequence_name) info_path = self.check_sequence_name_with_all_version(info_path) if not info_path.exists(): num_skipped_infos += 1 continue with open(info_path, 'rb') as f: infos = pickle.load(f) waymo_infos.extend(infos) seq_name_to_infos[infos[0]['point_cloud']['lidar_sequence']] = infos self.infos.extend(waymo_infos[:]) self.logger.info('Total skipped info %s' % num_skipped_infos) self.logger.info('Total samples for Waymo dataset: %d' % (len(waymo_infos))) if self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1: sampled_waymo_infos = [] for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]): sampled_waymo_infos.append(self.infos[k]) self.infos = sampled_waymo_infos self.logger.info('Total sampled samples for Waymo dataset: %d' % len(self.infos)) use_sequence_data = self.dataset_cfg.get('SEQUENCE_CONFIG', None) is not None and self.dataset_cfg.SEQUENCE_CONFIG.ENABLED if not use_sequence_data: seq_name_to_infos = None return seq_name_to_infos def load_pred_boxes_to_dict(self, pred_boxes_path): self.logger.info(f'Loading and reorganizing pred_boxes to dict from path: {pred_boxes_path}') with open(pred_boxes_path, 'rb') as f: pred_dicts = pickle.load(f) pred_boxes_dict = {} for index, box_dict in enumerate(pred_dicts): seq_name = box_dict['frame_id'][:-4].replace('training_', '').replace('validation_', '') sample_idx = int(box_dict['frame_id'][-3:]) if seq_name not in pred_boxes_dict: pred_boxes_dict[seq_name] = {} pred_labels = np.array([self.class_names.index(box_dict['name'][k]) + 1 for k in range(box_dict['name'].shape[0])]) pred_boxes = np.concatenate((box_dict['boxes_lidar'], box_dict['score'][:, np.newaxis], pred_labels[:, np.newaxis]), axis=-1) pred_boxes_dict[seq_name][sample_idx] = pred_boxes self.logger.info(f'Predicted boxes has been loaded, total sequences: {len(pred_boxes_dict)}') return pred_boxes_dict def load_data_to_shared_memory(self): self.logger.info(f'Loading training data to shared memory (file limit={self.shared_memory_file_limit})')
cur_rank, num_gpus = common_utils.get_dist_info()
2
2023-10-25 02:57:35+00:00
8k
GaryGuTC/COMG_model
COMG_model_RL/modules/base_cmn.py
[ { "identifier": "pack_wrapper", "path": "COMG_model_RL/modules/att_model.py", "snippet": "def pack_wrapper(module, att_feats, att_masks):\n if att_masks is not None:\n packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))\n return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)\n else:\n return module(att_feats)" }, { "identifier": "AttModel", "path": "COMG_model_RL/modules/att_model.py", "snippet": "class AttModel(CaptionModel):\n def __init__(self, args, tokenizer):\n super(AttModel, self).__init__()\n self.args = args\n self.tokenizer = tokenizer\n self.vocab_size = len(tokenizer.idx2token)\n self.input_encoding_size = args.d_model\n self.rnn_size = args.d_ff\n self.num_layers = args.num_layers\n self.drop_prob_lm = args.drop_prob_lm\n self.max_seq_length = args.max_seq_length\n self.att_feat_size = args.d_vf\n self.att_hid_size = args.d_model\n\n self.bos_idx = args.bos_idx\n self.eos_idx = args.eos_idx\n self.pad_idx = args.pad_idx\n\n self.use_bn = args.use_bn\n\n self.embed = lambda x: x\n self.fc_embed = lambda x: x\n self.att_embed = nn.Sequential(*(\n ((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ()) +\n (nn.Linear(self.att_feat_size, self.input_encoding_size),\n nn.ReLU(),\n nn.Dropout(self.drop_prob_lm)) +\n ((nn.BatchNorm1d(self.input_encoding_size),) if self.use_bn == 2 else ())))\n\n def clip_att(self, att_feats, att_masks):\n # Clip the length of att_masks and att_feats to the maximum length\n if att_masks is not None:\n max_len = att_masks.data.long().sum(1).max()\n att_feats = att_feats[:, :max_len].contiguous()\n att_masks = att_masks[:, :max_len].contiguous()\n return att_feats, att_masks\n\n def _prepare_feature(self, fc_feats, att_feats, att_masks):\n att_feats, att_masks = self.clip_att(att_feats, att_masks)\n\n # embed fc and att feats\n fc_feats = self.fc_embed(fc_feats)\n att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)\n\n # Project the attention feats first to reduce memory and computation comsumptions.\n p_att_feats = self.ctx2att(att_feats)\n\n return fc_feats, att_feats, p_att_feats, att_masks\n\n def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1):\n # 'it' contains a word index\n xt = self.embed(it)\n\n output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)\n if output_logsoftmax:\n logprobs = F.log_softmax(self.logit(output), dim=1)\n else:\n logprobs = self.logit(output)\n\n return logprobs, state\n\n def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):\n beam_size = opt.get('beam_size', 10)\n group_size = opt.get('group_size', 1)\n sample_n = opt.get('sample_n', 10)\n # when sample_n == beam_size then each beam is a sample.\n assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'\n batch_size = fc_feats.size(0)\n\n p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)\n\n assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'\n seq = fc_feats.new_full((batch_size * sample_n, self.max_seq_length), self.pad_idx, dtype=torch.long)\n seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.max_seq_length, self.vocab_size + 1)\n # lets process every image independently for now, for simplicity\n\n self.done_beams = [[] for _ in range(batch_size)]\n\n state = self.init_hidden(batch_size)\n\n # first step, feed bos\n it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)\n logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)\n\n p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,\n [p_fc_feats, p_att_feats,\n pp_att_feats, p_att_masks]\n )\n self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)\n for k in range(batch_size):\n if sample_n == beam_size:\n for _n in range(sample_n):\n seq_len = self.done_beams[k][_n]['seq'].shape[0]\n seq[k * sample_n + _n, :seq_len] = self.done_beams[k][_n]['seq']\n seqLogprobs[k * sample_n + _n, :seq_len] = self.done_beams[k][_n]['logps']\n else:\n seq_len = self.done_beams[k][0]['seq'].shape[0]\n seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score\n seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']\n # return the samples and their log likelihoods\n return seq, seqLogprobs\n\n def _sample(self, fc_feats, att_feats, att_masks=None, update_opts={}):\n opt = self.args.__dict__\n opt.update(**update_opts)\n\n sample_method = opt.get('sample_method', 'greedy')\n beam_size = opt.get('beam_size', 1)\n temperature = opt.get('temperature', 1.0)\n sample_n = int(opt.get('sample_n', 1))\n group_size = opt.get('group_size', 1)\n output_logsoftmax = opt.get('output_logsoftmax', 1)\n decoding_constraint = opt.get('decoding_constraint', 0)\n block_trigrams = opt.get('block_trigrams', 0)\n if beam_size > 1 and sample_method in ['greedy', 'beam_search']:\n return self._sample_beam(fc_feats, att_feats, att_masks, opt)\n if group_size > 1:\n return self._diverse_sample(fc_feats, att_feats, att_masks, opt)\n\n batch_size = fc_feats.size(0)\n state = self.init_hidden(batch_size * sample_n)\n\n p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)\n\n if sample_n > 1:\n p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,\n [p_fc_feats, p_att_feats,\n pp_att_feats, p_att_masks]\n )\n\n trigrams = [] # will be a list of batch_size dictionaries\n\n seq = fc_feats.new_full((batch_size * sample_n, self.max_seq_length), self.pad_idx, dtype=torch.long)\n seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.max_seq_length, self.vocab_size + 1)\n for t in range(self.max_seq_length + 1):\n if t == 0: # input <bos>\n it = fc_feats.new_full([batch_size * sample_n], self.bos_idx, dtype=torch.long)\n\n logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,\n output_logsoftmax=output_logsoftmax)\n\n if decoding_constraint and t > 0:\n tmp = logprobs.new_zeros(logprobs.size())\n tmp.scatter_(1, seq[:, t - 1].data.unsqueeze(1), float('-inf'))\n logprobs = logprobs + tmp\n\n # Mess with trigrams\n # Copy from https://github.com/lukemelas/image-paragraph-captioning\n if block_trigrams and t >= 3:\n # Store trigram generated at last step\n prev_two_batch = seq[:, t - 3:t - 1]\n for i in range(batch_size): # = seq.size(0)\n prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())\n current = seq[i][t - 1]\n if t == 3: # initialize\n trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}\n elif t > 3:\n if prev_two in trigrams[i]: # add to list\n trigrams[i][prev_two].append(current)\n else: # create list\n trigrams[i][prev_two] = [current]\n # Block used trigrams at next step\n prev_two_batch = seq[:, t - 2:t]\n mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size\n for i in range(batch_size):\n prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())\n if prev_two in trigrams[i]:\n for j in trigrams[i][prev_two]:\n mask[i, j] += 1\n # Apply mask to log probs\n # logprobs = logprobs - (mask * 1e9)\n alpha = 2.0 # = 4\n logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)\n\n # sample the next word\n if t == self.max_seq_length: # skip if we achieve maximum length\n break\n it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)\n\n # stop when all finished\n if t == 0:\n unfinished = it != self.eos_idx\n else:\n it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0\n logprobs = logprobs * unfinished.unsqueeze(1).float()\n unfinished = unfinished * (it != self.eos_idx)\n seq[:, t] = it\n seqLogprobs[:, t] = logprobs\n # quit loop if all sequences have finished\n if unfinished.sum() == 0:\n break\n\n return seq, seqLogprobs\n\n def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):\n\n sample_method = opt.get('sample_method', 'greedy')\n beam_size = opt.get('beam_size', 1)\n temperature = opt.get('temperature', 1.0)\n group_size = opt.get('group_size', 1)\n diversity_lambda = opt.get('diversity_lambda', 0.5)\n decoding_constraint = opt.get('decoding_constraint', 0)\n block_trigrams = opt.get('block_trigrams', 0)\n\n batch_size = fc_feats.size(0)\n state = self.init_hidden(batch_size)\n\n p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)\n\n trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries\n\n seq_table = [fc_feats.new_full((batch_size, self.max_seq_length), self.pad_idx, dtype=torch.long) for _ in\n range(group_size)]\n seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.max_seq_length) for _ in range(group_size)]\n state_table = [self.init_hidden(batch_size) for _ in range(group_size)]\n\n for tt in range(self.max_seq_length + group_size):\n for divm in range(group_size):\n t = tt - divm\n seq = seq_table[divm]\n seqLogprobs = seqLogprobs_table[divm]\n trigrams = trigrams_table[divm]\n if t >= 0 and t <= self.max_seq_length - 1:\n if t == 0: # input <bos>\n it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)\n else:\n it = seq[:, t - 1] # changed\n\n logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats,\n p_att_masks, state_table[divm]) # changed\n logprobs = F.log_softmax(logprobs / temperature, dim=-1)\n\n # Add diversity\n if divm > 0:\n unaug_logprobs = logprobs.clone()\n for prev_choice in range(divm):\n prev_decisions = seq_table[prev_choice][:, t]\n logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda\n\n if decoding_constraint and t > 0:\n tmp = logprobs.new_zeros(logprobs.size())\n tmp.scatter_(1, seq[:, t - 1].data.unsqueeze(1), float('-inf'))\n logprobs = logprobs + tmp\n\n # Mess with trigrams\n if block_trigrams and t >= 3:\n # Store trigram generated at last step\n prev_two_batch = seq[:, t - 3:t - 1]\n for i in range(batch_size): # = seq.size(0)\n prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())\n current = seq[i][t - 1]\n if t == 3: # initialize\n trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}\n elif t > 3:\n if prev_two in trigrams[i]: # add to list\n trigrams[i][prev_two].append(current)\n else: # create list\n trigrams[i][prev_two] = [current]\n # Block used trigrams at next step\n prev_two_batch = seq[:, t - 2:t]\n mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size\n for i in range(batch_size):\n prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())\n if prev_two in trigrams[i]:\n for j in trigrams[i][prev_two]:\n mask[i, j] += 1\n # Apply mask to log probs\n # logprobs = logprobs - (mask * 1e9)\n alpha = 2.0 # = 4\n logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)\n\n it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)\n\n # stop when all finished\n if t == 0:\n unfinished = it != self.eos_idx\n else:\n unfinished = seq[:, t - 1] != self.pad_idx & seq[:, t - 1] != self.eos_idx\n it[~unfinished] = self.pad_idx\n unfinished = unfinished & (it != self.eos_idx) # changed\n seq[:, t] = it\n seqLogprobs[:, t] = sampleLogprobs.view(-1)\n\n return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table,\n 1).reshape(\n batch_size * group_size, -1)" } ]
import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .att_model import pack_wrapper, AttModel
6,872
self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) self.attn = None self.dropout = nn.Dropout(p=dropout) self.topk = topk def forward(self, query, key, value, mask=None, layer_past=None): if mask is not None: mask = mask.unsqueeze(1) nbatches = query.size(0) if layer_past is not None and layer_past.shape[2] == key.shape[1] > 1: query = self.linears[0](query) key, value = layer_past[0], layer_past[1] present = torch.stack([key, value]) else: query, key, value = \ [l(x) for l, x in zip(self.linears, (query, key, value))] if layer_past is not None and not (layer_past.shape[2] == key.shape[1] > 1): past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) present = torch.stack([key, value]) query, key, value = \ [x.view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for x in [query, key, value]] x, self.attn = memory_querying_responding(query, key, value, mask=mask, dropout=self.dropout, topk=self.topk) x = x.transpose(1, 2).contiguous() \ .view(nbatches, -1, self.h * self.d_k) if layer_past is not None: return self.linears[-1](x), present else: return self.linears[-1](x) class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.1): super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 self.d_k = d_model // h self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) self.attn = None self.dropout = nn.Dropout(p=dropout) def forward(self, query, key, value, mask=None, layer_past=None): if mask is not None: mask = mask.unsqueeze(1) nbatches = query.size(0) if layer_past is not None and layer_past.shape[2] == key.shape[1] > 1: query = self.linears[0](query) key, value = layer_past[0], layer_past[1] present = torch.stack([key, value]) else: query, key, value = \ [l(x) for l, x in zip(self.linears, (query, key, value))] if layer_past is not None and not (layer_past.shape[2] == key.shape[1] > 1): past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) present = torch.stack([key, value]) query, key, value = \ [x.view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for x in [query, key, value]] x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout) x = x.transpose(1, 2).contiguous() \ .view(nbatches, -1, self.h * self.d_k) if layer_past is not None: return self.linears[-1](x), present else: return self.linears[-1](x) class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class Embeddings(nn.Module): def __init__(self, d_model, vocab): super(Embeddings, self).__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): return self.lut(x) * math.sqrt(self.d_model) class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1).float() div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:, :x.size(1)] return self.dropout(x)
from __future__ import absolute_import from __future__ import division from __future__ import print_function def clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) def subsequent_mask(size): attn_shape = (1, size, size) subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') return torch.from_numpy(subsequent_mask) == 0 def attention(query, key, value, mask=None, dropout=None): d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, float('-inf')) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def memory_querying_responding(query, key, value, mask=None, dropout=None, topk=32): d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, float('-inf')) selected_scores, idx = scores.topk(topk) dummy_value = value.unsqueeze(2).expand(idx.size(0), idx.size(1), idx.size(2), value.size(-2), value.size(-1)) dummy_idx = idx.unsqueeze(-1).expand(idx.size(0), idx.size(1), idx.size(2), idx.size(3), value.size(-1)) selected_value = torch.gather(dummy_value, 3, dummy_idx) p_attn = F.softmax(selected_scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn.unsqueeze(3), selected_value).squeeze(3), p_attn class Transformer(nn.Module): def __init__(self, encoder, decoder, src_embed, tgt_embed, cmn): super(Transformer, self).__init__() self.encoder = encoder self.decoder = decoder self.src_embed = src_embed self.tgt_embed = tgt_embed self.cmn = cmn def forward(self, src, tgt, src_mask, tgt_mask, memory_matrix): return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask, memory_matrix=memory_matrix) def encode(self, src, src_mask): return self.encoder(self.src_embed(src), src_mask) def decode(self, memory, src_mask, tgt, tgt_mask, past=None, memory_matrix=None): text_embeddings = self.tgt_embed(tgt) # Memory querying and responding for textual features dummy_memory_matrix = memory_matrix.unsqueeze(0).expand(text_embeddings.size(0), memory_matrix.size(0), memory_matrix.size(1)) responses = self.cmn(text_embeddings, dummy_memory_matrix, dummy_memory_matrix) embeddings = text_embeddings + responses # Memory querying and responding for textual features return text_embeddings, self.decoder(embeddings, memory, src_mask, tgt_mask, past=past) class Encoder(nn.Module): def __init__(self, layer, N): super(Encoder, self).__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size) def forward(self, x, mask): for layer in self.layers: x = layer(x, mask) return self.norm(x) class LayerNorm(nn.Module): def __init__(self, features, eps=1e-6): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) # bs,98,512 => 16,98,1 std = x.std(-1, keepdim=True) # # bs,98,512 => 16,98,1 return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 class SublayerConnection(nn.Module): def __init__(self, size, dropout): super(SublayerConnection, self).__init__() self.norm = LayerNorm(size) self.dropout = nn.Dropout(dropout) def forward(self, x, sublayer): _x = sublayer(self.norm(x)) if type(_x) is tuple: return x + self.dropout(_x[0]), _x[1] return x + self.dropout(_x) class EncoderLayer(nn.Module): def __init__(self, size, self_attn, feed_forward, dropout): super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 2) self.size = size def forward(self, x, mask): x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask)) return self.sublayer[1](x, self.feed_forward) class Decoder(nn.Module): def __init__(self, layer, N): super(Decoder, self).__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size) def forward(self, x, memory, src_mask, tgt_mask, past=None): if past is not None: present = [[], []] x = x[:, -1:] tgt_mask = tgt_mask[:, -1:] if tgt_mask is not None else None past = list(zip(past[0].split(2, dim=0), past[1].split(2, dim=0))) else: past = [None] * len(self.layers) for i, (layer, layer_past) in enumerate(zip(self.layers, past)): x = layer(x, memory, src_mask, tgt_mask, layer_past) if layer_past is not None: present[0].append(x[1][0]) present[1].append(x[1][1]) x = x[0] if past[0] is None: return self.norm(x) else: return self.norm(x), [torch.cat(present[0], 0), torch.cat(present[1], 0)] class DecoderLayer(nn.Module): def __init__(self, size, self_attn, src_attn, feed_forward, dropout): super(DecoderLayer, self).__init__() self.size = size self.self_attn = self_attn self.src_attn = src_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 3) def forward(self, x, memory, src_mask, tgt_mask, layer_past=None): m = memory if layer_past is None: x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask)) x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask)) return self.sublayer[2](x, self.feed_forward) else: present = [None, None] x, present[0] = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask, layer_past[0])) x, present[1] = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask, layer_past[1])) return self.sublayer[2](x, self.feed_forward), present class MultiThreadMemory(nn.Module): def __init__(self, h, d_model, dropout=0.1, topk=32): super(MultiThreadMemory, self).__init__() assert d_model % h == 0 self.d_k = d_model // h self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) self.attn = None self.dropout = nn.Dropout(p=dropout) self.topk = topk def forward(self, query, key, value, mask=None, layer_past=None): if mask is not None: mask = mask.unsqueeze(1) nbatches = query.size(0) if layer_past is not None and layer_past.shape[2] == key.shape[1] > 1: query = self.linears[0](query) key, value = layer_past[0], layer_past[1] present = torch.stack([key, value]) else: query, key, value = \ [l(x) for l, x in zip(self.linears, (query, key, value))] if layer_past is not None and not (layer_past.shape[2] == key.shape[1] > 1): past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) present = torch.stack([key, value]) query, key, value = \ [x.view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for x in [query, key, value]] x, self.attn = memory_querying_responding(query, key, value, mask=mask, dropout=self.dropout, topk=self.topk) x = x.transpose(1, 2).contiguous() \ .view(nbatches, -1, self.h * self.d_k) if layer_past is not None: return self.linears[-1](x), present else: return self.linears[-1](x) class MultiHeadedAttention(nn.Module): def __init__(self, h, d_model, dropout=0.1): super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 self.d_k = d_model // h self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) self.attn = None self.dropout = nn.Dropout(p=dropout) def forward(self, query, key, value, mask=None, layer_past=None): if mask is not None: mask = mask.unsqueeze(1) nbatches = query.size(0) if layer_past is not None and layer_past.shape[2] == key.shape[1] > 1: query = self.linears[0](query) key, value = layer_past[0], layer_past[1] present = torch.stack([key, value]) else: query, key, value = \ [l(x) for l, x in zip(self.linears, (query, key, value))] if layer_past is not None and not (layer_past.shape[2] == key.shape[1] > 1): past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) present = torch.stack([key, value]) query, key, value = \ [x.view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for x in [query, key, value]] x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout) x = x.transpose(1, 2).contiguous() \ .view(nbatches, -1, self.h * self.d_k) if layer_past is not None: return self.linears[-1](x), present else: return self.linears[-1](x) class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, d_ff, dropout=0.1): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) class Embeddings(nn.Module): def __init__(self, d_model, vocab): super(Embeddings, self).__init__() self.lut = nn.Embedding(vocab, d_model) self.d_model = d_model def forward(self, x): return self.lut(x) * math.sqrt(self.d_model) class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout, max_len=5000): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1).float() div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:, :x.size(1)] return self.dropout(x)
class BaseCMN(AttModel):
1
2023-10-27 13:18:53+00:00
8k
OpenProteinAI/PoET
poet/models/poet.py
[ { "identifier": "Uniprot21", "path": "poet/alphabets.py", "snippet": "class Uniprot21(Alphabet):\n def __init__(\n self,\n mask=False,\n include_gap=False,\n include_startstop=False,\n distinct_startstop=False,\n ):\n chars = b\"ARNDCQEGHILKMFPSTWYV\"\n gap_token = start_token = stop_token = -1\n if include_gap:\n chars = chars + b\"-\"\n gap_token = len(chars) - 1\n if include_startstop:\n chars = chars + b\"*\"\n start_token = stop_token = len(chars) - 1\n if distinct_startstop:\n chars = chars + b\"$\"\n stop_token = len(chars) - 1\n # add the synonym tokens\n mask_token = len(chars)\n chars = chars + b\"XOUBZ\"\n\n encoding = np.arange(len(chars))\n encoding[mask_token + 1 :] = [\n 11,\n 4,\n mask_token,\n mask_token,\n ] # encode 'OUBZ' as synonyms\n missing = mask_token\n\n super(Uniprot21, self).__init__(\n chars, encoding=encoding, mask=mask, missing=missing\n )\n\n self.gap_token = gap_token\n self.start_token = start_token\n self.stop_token = stop_token\n self.mask_token = mask_token" }, { "identifier": "gelu", "path": "poet/models/modules/activation.py", "snippet": "" }, { "identifier": "MultiheadAttention", "path": "poet/models/modules/attention.py", "snippet": "class MultiheadAttention(nn.Module):\n def __init__(\n self,\n embed_dim,\n num_heads,\n bias=False,\n batch_first=True,\n dropout=0.0,\n init_scaling=1 / math.sqrt(2),\n self_attention=False,\n causal=False,\n **kwargs,\n ) -> None:\n super().__init__()\n assert batch_first\n self.batch_first = batch_first\n self.embed_dim = embed_dim\n\n self.num_heads = num_heads\n assert (\n self.embed_dim % num_heads == 0\n ), \"self.kdim must be divisible by num_heads\"\n self.head_dim = self.embed_dim // num_heads\n self.scaling = self.head_dim**-0.5\n\n self.self_attention = self_attention\n self.causal = causal\n\n self.init_scaling = init_scaling\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n\n self.dropout = dropout\n\n self.reset_parameters()\n\n def reset_parameters(self):\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=self.init_scaling)\n if self.k_proj.bias is not None:\n nn.init.constant_(self.k_proj.bias, 0.0)\n nn.init.xavier_uniform_(self.v_proj.weight, gain=self.init_scaling)\n if self.v_proj.bias is not None:\n nn.init.constant_(self.v_proj.bias, 0.0)\n nn.init.xavier_uniform_(self.q_proj.weight, gain=self.init_scaling)\n if self.q_proj.bias is not None:\n nn.init.constant_(self.q_proj.bias, 0.0)\n\n # nn.init.xavier_uniform_(self.out_proj.weight, gain=self.init_scaling)\n nn.init.constant_(self.out_proj.weight, 0.0)\n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.0)\n\n def _transform_qkv(\n self,\n query,\n key,\n value,\n query_positions=None,\n key_positions=None,\n transform_query=True,\n transform_key=True,\n transform_value=True,\n ):\n return query, key, value\n\n def _inner_attn(\n self,\n q,\n k,\n v,\n key_padding_mask=None,\n attn_mask=None,\n return_weights=False,\n ):\n # need to unpack inputs for usual mha attention...\n is_packed = False\n query_packed = q\n if type(q) is PackedTensorSequences:\n q = q.to_padded()\n is_packed = True\n if type(k) is PackedTensorSequences:\n # key padding mask is stored as the padding indices in the PackedTensor\n k, key_padding_mask = k.to_padded(return_mask=True)\n if type(v) is PackedTensorSequences:\n v = v.to_padded()\n\n dropout = self.dropout if self.training else 0\n attn, attn_weights = mha_attn(\n q,\n k,\n v,\n key_padding_mask=key_padding_mask,\n attn_mask=attn_mask,\n return_weights=return_weights,\n scaling=self.scaling,\n batch_first=self.batch_first,\n dropout=dropout,\n causal=self.causal,\n )\n\n # repack the output if the inputs were packed\n if is_packed:\n attn_packed = copy.copy(query_packed)\n attn_packed.x = attn\n attn = attn_packed\n\n return attn, attn_weights\n\n def forward_packed(\n self,\n query: PackedTensorSequences,\n key: Optional[PackedTensorSequences] = None,\n value: Optional[PackedTensorSequences] = None,\n key_padding_mask: Optional[torch.Tensor] = None,\n attn_mask: Optional[torch.Tensor] = None,\n return_weights: bool = False,\n return_projs: bool = False,\n transform_query: bool = True,\n transform_key: bool = True,\n transform_value: bool = True,\n ) -> Tuple[PackedTensorSequences, Optional[torch.Tensor]]:\n \"\"\"\n When the input is packed, we can apply the projections efficiently to only the non-padding entries.\n \"\"\"\n if self.self_attention:\n assert key is None and value is None\n key = value = query\n assert key is not None and value is not None\n\n query_positions = query.positions\n key_positions = key.positions\n\n if transform_query:\n qm = self.q_proj(query.x)\n qm = qm.view(-1, self.num_heads, self.head_dim)\n else:\n qm = None\n if transform_key:\n km = self.k_proj(key.x)\n km = km.view(-1, self.num_heads, self.head_dim)\n else:\n km = None\n if transform_value:\n vm = self.v_proj(value.x)\n vm = vm.view(-1, self.num_heads, self.head_dim)\n else:\n vm = None\n\n qm, km, vm = self._transform_qkv(\n qm,\n km,\n vm,\n query_positions=query_positions,\n key_positions=key_positions,\n transform_query=transform_query,\n transform_key=transform_key,\n transform_value=transform_value,\n )\n\n if transform_query:\n query = copy.copy(query)\n query.x = qm\n\n if transform_key:\n key = copy.copy(key)\n key.x = km\n\n if transform_value:\n value = copy.copy(value)\n value.x = vm\n\n # now calculate the attention values\n context_packed, attn_weights = self._inner_attn(\n query,\n key,\n value,\n attn_mask=attn_mask,\n return_weights=return_weights,\n )\n\n # handle packing again...\n context = context_packed.x\n context = context.view(context.size(0), self.embed_dim)\n\n output = self.out_proj(context)\n\n # repack ...\n output_packed = copy.copy(context_packed)\n output_packed.x = output\n output = output_packed\n\n if return_projs:\n return (output, attn_weights, (query, key, value))\n else:\n return output, attn_weights\n\n def forward_padded(\n self,\n query: torch.Tensor,\n key: Optional[torch.Tensor] = None,\n value: Optional[torch.Tensor] = None,\n key_padding_mask: Optional[torch.Tensor] = None,\n attn_mask: Optional[torch.Tensor] = None,\n return_weights: bool = False,\n return_projs: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n \"\"\"\n Normal MHA approach for padded inputs.\n \"\"\"\n if self.self_attention:\n assert key is None and value is None\n key = value = query\n assert key is not None and value is not None\n\n query = self.q_proj(query).view(\n query.size(0), query.size(1), self.num_heads, self.head_dim\n )\n key = self.k_proj(key).view(\n key.size(0), key.size(1), self.num_heads, self.head_dim\n )\n value = self.v_proj(value).view(\n value.size(0), value.size(1), self.num_heads, self.head_dim\n )\n\n query, key, value = self._transform_qkv(query, key, value)\n\n # now calculate the attention values\n context, attn_weights = self._inner_attn(\n query,\n key,\n value,\n attn_mask=attn_mask,\n key_padding_mask=key_padding_mask,\n return_weights=return_weights,\n )\n context = context.view(context.size(0), context.size(1), self.embed_dim)\n output = self.out_proj(context)\n\n if return_projs:\n return (output, attn_weights, (query, key, value))\n else:\n return output, attn_weights\n\n def forward(\n self,\n query: Union[torch.Tensor, PackedTensorSequences],\n key: Optional[Union[torch.Tensor, PackedTensorSequences]] = None,\n value: Optional[Union[torch.Tensor, PackedTensorSequences]] = None,\n key_padding_mask: Optional[torch.Tensor] = None,\n attn_mask: Optional[torch.Tensor] = None,\n return_weights: bool = False,\n return_projs: bool = False,\n ) -> Tuple[Union[torch.Tensor, PackedTensorSequences], Optional[torch.Tensor]]:\n # dispatch depending on whether input is Packed or unpacked\n packed_input = type(query) is PackedTensorSequences\n fn = self.forward_padded\n if packed_input:\n fn = self.forward_packed\n\n return fn(\n query, key, value, key_padding_mask, attn_mask, return_weights, return_projs\n )" }, { "identifier": "RotaryEmbedding", "path": "poet/models/modules/embedding.py", "snippet": "class RotaryEmbedding(nn.Module):\n \"\"\"\n The rotary position embeddings from RoFormer_ (Su et. al).\n A crucial insight from the method is that the query and keys are\n transformed by rotation matrices which depend on the relative positions.\n Other implementations are available in the Rotary Transformer repo_ and in\n GPT-NeoX_, GPT-NeoX was an inspiration\n .. _RoFormer: https://arxiv.org/abs/2104.09864\n .. _repo: https://github.com/ZhuiyiTechnology/roformer\n .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox\n .. warning: Please note that this embedding is not registered on purpose, as it is transformative\n (it does not create the embedding dimension) and will likely be picked up (imported) on a ad-hoc basis\n \"\"\"\n\n def __init__(\n self,\n dim_model: int,\n scale: Optional[int] = None,\n force_fp32: Optional[bool] = None,\n *_,\n **__,\n ):\n super().__init__()\n self.dim_model = dim_model\n self.scale = scale or 10_000\n self.force_fp32 = force_fp32 or False\n # Generate and save the inverse frequency buffer (non trainable)\n inv_freq = self._get_inv_freq()\n if not force_fp32:\n self.register_buffer(\"inv_freq\", inv_freq)\n else:\n self.inv_freq = inv_freq\n\n self._seq_len_cached = None\n self._cos_cached = None\n self._sin_cached = None\n\n def _get_inv_freq(self):\n r = (\n torch.div(torch.arange(self.dim_model), 2, rounding_mode=\"floor\")\n * 2.0\n / self.dim_model\n )\n return 1.0 / (self.scale**r)\n\n def _update_cos_sin_tables(self, x, seq_dimension=-2):\n seq_len = x.shape[seq_dimension]\n\n # Reset the tables if the sequence length has changed,\n # or if we're on a new device (possibly due to tracing for instance)\n if (\n seq_len != self._seq_len_cached\n or self._cos_cached.device != x.device\n or self._cos_cached.dtype != x.dtype\n ):\n self._seq_len_cached = seq_len\n t = torch.arange(\n x.shape[seq_dimension], device=x.device, dtype=self.inv_freq.dtype\n )\n # Don't do einsum, it converts fp32 to fp16\n # freqs = torch.einsum(\"i,j->ij\", t, self.inv_freq)\n freqs = torch.outer(t, self.inv_freq)\n self._cos_cached = torch.cos(freqs).to(x.dtype)\n self._sin_cached = torch.sin(freqs).to(x.dtype)\n\n return self._cos_cached, self._sin_cached\n\n def get_cos_sin_tables(self, t: torch.Tensor, dtype=torch.float32):\n # t is the tensor of indices\n\n # cast self.inv_freq to force computation in single precision\n # lower precision may not be able to represent all possible values of t\n self.inv_freq = self.inv_freq.to(t.device)\n freqs = torch.outer(t, self.inv_freq.float())\n cos = torch.cos(freqs).to(dtype)\n sin = torch.sin(freqs).to(dtype)\n return cos, sin\n\n def forward(\n self,\n q: torch.Tensor,\n k: torch.Tensor,\n q_positions: Optional[torch.Tensor] = None,\n k_positions: Optional[torch.Tensor] = None,\n transform_q: bool = True,\n transform_k: bool = True,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n # q and k are either (b, s, h, d)\n # or they are packed (bs, h, d)\n\n if transform_q:\n if q_positions is None:\n # in this case, q must be (b, s, ..., d)\n s = q.size(1)\n q_positions = torch.arange(s, device=q.device)\n cos, sin = self.get_cos_sin_tables(q_positions, q.dtype)\n # apply the rotary embedding to q\n q = apply_rotary_pos_emb(q, cos, sin)\n\n if transform_k:\n if k_positions is not q_positions or not transform_q:\n # need to compute new cos, sin for k positions\n if k_positions is None:\n s = k.size(1)\n k_positions = torch.arange(s, device=k.device)\n cos, sin = self.get_cos_sin_tables(k_positions, k.dtype)\n # apply the rotary embedding to k\n k = apply_rotary_pos_emb(k, cos, sin)\n\n return q, k" }, { "identifier": "PackedTensorSequences", "path": "poet/models/modules/packed_sequence.py", "snippet": "class PackedTensorSequences:\n def __init__(\n self,\n packed_tensor: torch.Tensor,\n positions: torch.Tensor,\n indices: Optional[torch.Tensor],\n cu_seqlens: torch.Tensor,\n cu_seqlens_cpu: torch.Tensor,\n max_s: Union[torch.Tensor, int],\n batch_size: Optional[int],\n to_paddedable: bool = True,\n ):\n \"\"\"\n If to_paddedable, indicies and batch_size must be set to values that allow this\n object to be correctly padded.\n \"\"\"\n if to_paddedable:\n assert batch_size is not None\n\n self.x = packed_tensor\n self.positions = positions\n self.indices = indices\n self.cu_seqlens = cu_seqlens\n self.cu_seqlens_cpu = cu_seqlens_cpu\n self.max_s = max_s\n self.batch_size = batch_size\n self.to_paddedable = to_paddedable\n\n @property\n def dtype(self):\n return self.x.dtype\n\n @property\n def is_cuda(self):\n return self.x.is_cuda\n\n @property\n def device(self):\n return self.x.device\n\n @staticmethod\n def pack_input(x: torch.Tensor, positions=None, key_padding_mask=None):\n b = x.size(0)\n s = x.size(1)\n if positions is None:\n positions = (\n torch.arange(s, dtype=torch.long, device=x.device)\n .unsqueeze(0)\n .expand(b, s)\n )\n if key_padding_mask is None:\n x_packed = x.reshape(b * s, -1)\n positions = positions.reshape(b * s)\n indices = None\n cu_seqlens = torch.arange(\n 0, (b + 1) * s, step=s, dtype=torch.int32, device=x.device\n )\n cu_seqlens_cpu = torch.arange(\n 0,\n (b + 1) * s,\n step=s,\n dtype=torch.int32,\n )\n max_s = s\n else:\n # flash attention padding function expects 1 for valid and 0 for invalid positions...\n key_padding_mask_bool = ~(key_padding_mask.bool())\n x_packed, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask_bool)\n cu_seqlens_cpu = cu_seqlens.cpu()\n positions, _, _, _ = unpad_input(\n positions.unsqueeze(2), key_padding_mask_bool\n )\n positions = positions.squeeze(1)\n return PackedTensorSequences(\n x_packed, positions, indices, cu_seqlens, cu_seqlens_cpu, max_s, b\n )\n\n def to_padded(self, return_mask=False, return_positions=False):\n if not self.to_paddedable:\n raise ValueError(\"Cannot be to_padded\")\n\n s = self.max_s\n b = self.batch_size\n mask = None\n x = self.x\n pos = self.positions\n if self.indices is None:\n # we are just a flattened matrix...\n x = x.view(b, s, *x.shape[1:])\n pos = pos.view(b, s)\n else:\n dims = None\n if x.ndim > 2:\n dims = x.shape[1:]\n x = x.view(x.size(0), -1)\n x, mask = pad_input(x, self.indices, b, s, return_mask=return_mask)\n pos, _ = pad_input(pos.unsqueeze(1), self.indices, b, s)\n pos = pos.squeeze(2)\n if dims is not None:\n x = x.view(x.size(0), x.size(1), *dims)\n\n if return_mask and return_positions:\n return x, mask, pos\n elif return_mask:\n return x, mask\n elif return_positions:\n return x, pos\n else:\n return x\n\n @staticmethod\n def compute_indices(seqlens: torch.Tensor):\n indices_mask = get_mask(seqlens)\n indices = torch.nonzero(~indices_mask.flatten(), as_tuple=False).flatten()\n return indices" }, { "identifier": "get_mask", "path": "poet/models/modules/packed_sequence.py", "snippet": "def get_mask(batch_sizes: torch.Tensor) -> torch.Tensor:\n \"\"\"\n batch_sizes: (B,)\n\n Returns a bool tensor of shape n_samples x max_batch_size.\n 0s are non-masked and 1s and masked elements\n \"\"\"\n max_len = batch_sizes.max()\n # taken from https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3\n mask = (\n torch.arange(max_len, device=batch_sizes.device)[None, :]\n >= batch_sizes[:, None]\n )\n return mask" }, { "identifier": "pad_input", "path": "poet/models/modules/packed_sequence.py", "snippet": "def pad_input(hidden_states, indices, batch, seqlen, return_mask=False):\n \"\"\"\n Arguments:\n hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.\n indices: (total_nnz)\n Return:\n hidden_states: (batch, seqlen, dim)\n \"\"\"\n dim = hidden_states.shape[-1]\n output = torch.zeros(\n (batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype\n )\n output[indices] = hidden_states\n output = output.view(batch, seqlen, dim)\n if return_mask:\n mask = torch.ones(\n (batch * seqlen), device=hidden_states.device, dtype=torch.bool\n )\n mask[indices] = False\n mask = mask.view(batch, seqlen)\n return output, mask\n return output, None" }, { "identifier": "unpad_input", "path": "poet/models/modules/packed_sequence.py", "snippet": "def unpad_input(hidden_states, attention_mask):\n \"\"\"\n Arguments:\n hidden_states: (batch, seqlen, dim)\n attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.\n Return:\n hidden_states: (total_nnz, dim), where total_nnz = number of tokens in selected in attention_mask.\n cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.\n max_seqlen_in_batch: int\n \"\"\"\n assert hidden_states.size(0) == attention_mask.size(0)\n # padding/unpadding is not invertible when sequence length is less than the mask size\n # because the final position(s) is masked in all sequences...\n # this causes indices to not match with the tensor given by max_seqlen_in_batch\n # there are two possible solutions:\n # 1) first remove these positions from hidden_states\n # 2) set max_seqlen_in_batch to be the number of columns even if fully masked\n # let's opt for (2), because we assume those columns are wanted for some reason\n\n seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n # max_seqlen_in_batch = seqlens_in_batch.max().item()\n max_seqlen_in_batch = attention_mask.size(-1)\n cu_seqlens = F.pad(\n torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)\n )\n\n b, s, d = hidden_states.size()\n hidden_states = hidden_states.reshape(b * s, d)\n\n selected_hidden_states = torch.gather(\n hidden_states, 0, indices.unsqueeze(1).expand(indices.size(0), d)\n )\n return selected_hidden_states, indices, cu_seqlens, max_seqlen_in_batch" }, { "identifier": "TransformerEncoder", "path": "poet/models/modules/transformer.py", "snippet": "class TransformerEncoder(nn.TransformerEncoder):\n def __init__(\n self, encoder_layer, num_layers, norm=None, enable_nested_tensor=False\n ):\n super().__init__(encoder_layer, num_layers, norm, enable_nested_tensor)\n for layer in self.layers:\n layer.reset_parameters()\n\n def __len__(self):\n return len(self.layers)\n\n def __getitem__(self, i):\n return self.layers[i]\n\n def forward(\n self,\n x,\n src_mask=None,\n src_key_padding_mask=None,\n return_attention=False,\n activation_checkpointing=False,\n **kwargs,\n ):\n attn = []\n for layer in self.layers:\n if not activation_checkpointing:\n x = layer(\n x,\n src_mask=src_mask,\n src_key_padding_mask=src_key_padding_mask,\n return_attention=return_attention,\n **kwargs,\n )\n else:\n x = checkpoint.checkpoint(\n layer,\n x,\n src_mask=src_mask,\n src_key_padding_mask=src_key_padding_mask,\n return_attention=return_attention,\n **kwargs,\n use_reentrant=False,\n )\n if return_attention:\n x, a = x\n attn.append(a)\n\n if return_attention:\n return x, attn\n\n return x" }, { "identifier": "TieredRotaryTransformerEncoderLayer", "path": "poet/models/modules/transformer_rotary.py", "snippet": "class TieredRotaryTransformerEncoderLayer(TieredTransformerEncoderLayer):\n def __init__(\n self,\n *args,\n rotary_scale=None,\n rotary_force_fp32=None,\n use_multi_rotary=True,\n **kwargs,\n ):\n self.rotary_scale = rotary_scale\n self.rotary_force_fp32 = rotary_force_fp32\n self.use_multi_rotary = use_multi_rotary\n super().__init__(*args, **kwargs)\n\n def _init_self_mha_module(\n self,\n d_model,\n nhead,\n dropout=0,\n use_qkv_bias=False,\n batch_first=True,\n causal=False,\n ):\n \"\"\"\n Initialize the multi-head attention module used for each sequence independently.\n \"\"\"\n return RotaryFlashMultiheadAttention(\n d_model,\n nhead,\n self_attention=True,\n dropout=dropout,\n bias=use_qkv_bias,\n batch_first=batch_first,\n causal=causal,\n rotary_scale=self.rotary_scale,\n rotary_force_fp32=self.rotary_force_fp32,\n )\n\n def _init_multi_mha_module(\n self,\n d_model,\n nhead,\n dropout=0,\n use_qkv_bias=False,\n batch_first=True,\n causal=False,\n ):\n \"\"\"\n Initialize the multi-head attention module used for each sequence-of-sequences.\n \"\"\"\n Module = FlashMultiheadAttention\n if self.use_multi_rotary:\n Module = RotaryFlashMultiheadAttention\n return Module(\n d_model,\n nhead,\n self_attention=True,\n dropout=dropout,\n bias=use_qkv_bias,\n batch_first=batch_first,\n causal=causal,\n rotary_scale=self.rotary_scale,\n rotary_force_fp32=self.rotary_force_fp32,\n )" } ]
import copy import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Union from tqdm import tqdm from poet.alphabets import Uniprot21 from poet.models.modules.activation import gelu from poet.models.modules.attention import MultiheadAttention from poet.models.modules.embedding import RotaryEmbedding from poet.models.modules.packed_sequence import ( PackedTensorSequences, get_mask, pad_input, unpad_input, ) from poet.models.modules.transformer import TransformerEncoder from poet.models.modules.transformer_rotary import TieredRotaryTransformerEncoderLayer
7,172
def top_k_top_p_filtering( logits: torch.Tensor, top_k: Optional[int] = 0, top_p: Optional[float] = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.Tensor: """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 Adapted from: https://huggingface.co/transformers/v3.2.0/_modules/transformers/generation_utils.html """ if top_k is not None: top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p is not None and top_p < 1.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter( 1, sorted_indices, sorted_indices_to_remove ) logits[indices_to_remove] = filter_value return logits class LogitsAllocateMemoryMixin(object): """ Stateless mixin providing methods for preallocating memory for logits calculations. """ @classmethod def logits_allocate_memory( cls,
def top_k_top_p_filtering( logits: torch.Tensor, top_k: Optional[int] = 0, top_p: Optional[float] = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.Tensor: """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 Adapted from: https://huggingface.co/transformers/v3.2.0/_modules/transformers/generation_utils.html """ if top_k is not None: top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if top_p is not None and top_p < 1.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > top_p if min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter( 1, sorted_indices, sorted_indices_to_remove ) logits[indices_to_remove] = filter_value return logits class LogitsAllocateMemoryMixin(object): """ Stateless mixin providing methods for preallocating memory for logits calculations. """ @classmethod def logits_allocate_memory( cls,
memory: Optional[list[PackedTensorSequences]],
4
2023-10-28 01:30:26+00:00
8k
Transconnectome/SwiFT
project/module/pl_classifier.py
[ { "identifier": "load_model", "path": "project/module/models/load_model.py", "snippet": "def load_model(model_name, hparams=None):\n #number of transformer stages\n n_stages = len(hparams.depths)\n\n if hparams.precision == 16:\n to_float = False\n elif hparams.precision == 32:\n to_float = True\n\n print(to_float)\n\n if model_name == \"swin4d_ver7\":\n net = SwinTransformer4D_ver7(\n img_size=hparams.img_size,\n in_chans=hparams.in_chans,\n embed_dim=hparams.embed_dim,\n window_size=hparams.window_size,\n first_window_size=hparams.first_window_size,\n patch_size=hparams.patch_size,\n depths=hparams.depths,\n num_heads=hparams.num_heads,\n c_multiplier=hparams.c_multiplier,\n last_layer_full_MSA=hparams.last_layer_full_MSA,\n to_float = to_float,\n drop_rate=hparams.attn_drop_rate,\n drop_path_rate=hparams.attn_drop_rate,\n attn_drop_rate=hparams.attn_drop_rate\n )\n elif model_name == \"emb_mlp\":\n from .emb_mlp import mlp\n net = mlp(final_embedding_size=128, num_tokens = hparams.embed_dim * (hparams.c_multiplier ** (n_stages - 1)), use_normalization=True)\n elif model_name == \"clf_mlp\":\n if hparams.clf_head_version == 'v1':\n from .clf_mlp import mlp\n net = mlp(num_classes=2, num_tokens = hparams.embed_dim * (hparams.c_multiplier ** (n_stages - 1)))\n elif hparams.clf_head_version == 'v2':\n from .clf_mlp_v2 import mlp\n net = mlp(num_classes=2, num_tokens = hparams.embed_dim * (hparams.c_multiplier ** (n_stages - 1)))\n else:\n raise NotImplementedError\n # x -> (b, 96, 4, 4, 4, t)\n elif model_name == \"reg_mlp\":\n from .clf_mlp import mlp\n net = mlp(num_classes=1, num_tokens = hparams.embed_dim * (hparams.c_multiplier ** (n_stages - 1)))\n else:\n raise NameError(f\"{model_name} is a wrong model name\")\n\n return net" }, { "identifier": "Metrics", "path": "project/module/utils/metrics.py", "snippet": "class Metrics:\n @staticmethod\n def get_accuracy(y_hat, y):\n return (y_hat.argmax(dim=1) == y).float().mean()\n\n @staticmethod\n def get_accuracy_binary(y_hat, y):\n return ((y_hat >= 0) == y).float().mean()" }, { "identifier": "str2bool", "path": "project/module/utils/parser.py", "snippet": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")" }, { "identifier": "NTXentLoss", "path": "project/module/utils/losses.py", "snippet": "class NTXentLoss(torch.nn.Module):\n def __init__(self, device, batch_size, temperature, use_cosine_similarity):\n super(NTXentLoss, self).__init__()\n self.batch_size = batch_size\n self.temperature = temperature\n self.device = device\n self.softmax = torch.nn.Softmax(dim=-1)\n self.mask_samples_from_same_repr = self._get_correlated_mask().type(torch.bool)\n self.similarity_function = self._get_similarity_function(use_cosine_similarity)\n self.criterion = torch.nn.CrossEntropyLoss(reduction=\"sum\")\n\n def _get_similarity_function(self, use_cosine_similarity):\n if use_cosine_similarity:\n self._cosine_similarity = torch.nn.CosineSimilarity(dim=-1)\n return self._cosine_simililarity\n else:\n return self._dot_simililarity\n\n def _get_correlated_mask(self):\n diag = np.eye(2 * self.batch_size)\n l1 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=-self.batch_size)\n l2 = np.eye((2 * self.batch_size), 2 * self.batch_size, k=self.batch_size)\n mask = torch.from_numpy((diag + l1 + l2))\n mask = (1 - mask).type(torch.bool)\n return mask.to(self.device)\n\n @staticmethod\n def _dot_simililarity(x, y):\n v = torch.tensordot(x.unsqueeze(1), y.T.unsqueeze(0), dims=2)\n # x shape: (N, 1, C)\n # y shape: (1, C, 2N)\n # v shape: (N, 2N)\n return v\n\n def _cosine_simililarity(self, x, y):\n # x shape: (N, 1, C)\n # y shape: (1, 2N, C)\n # v shape: (N, 2N)\n v = self._cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))\n return v\n\n def forward(self, zis, zjs):\n representations = torch.cat([zjs, zis], dim=0)\n\n similarity_matrix = self.similarity_function(representations, representations)\n # print(f'similarity_matrix shpae is {similarity_matrix.shape}')\n\n # filter out the scores from the positive samples\n l_pos = torch.diag(similarity_matrix, self.batch_size)\n # print(f'l_pos shpae is {l_pos.shape}')\n\n r_pos = torch.diag(similarity_matrix, -self.batch_size)\n positives = torch.cat([l_pos, r_pos]).view(2 * self.batch_size, 1)\n\n negatives = similarity_matrix[self.mask_samples_from_same_repr].view(2 * self.batch_size, -1)\n\n logits = torch.cat((positives, negatives), dim=1)\n logits /= self.temperature\n\n labels = torch.zeros(2 * self.batch_size).to(self.device).long()\n loss = self.criterion(logits, labels)\n\n return loss / (2 * self.batch_size)" }, { "identifier": "global_local_temporal_contrastive", "path": "project/module/utils/losses.py", "snippet": "def global_local_temporal_contrastive(lsr,gdr, temperature):\n #lsr denotes local sparse-clip representation= representation of temporal slice of global clip\n #gdr denotes global dense-clip representation= representation of global(pooled) feature of local clip\n\n #lsr,gdr shape should be [BS,num_clips,128]\n num_clips = lsr.shape[1]\n similarity_matrix = torch.bmm(lsr, gdr.permute(0,2,1)) # [BS, num_clips, num_clips]\n # print(similarity_matrix)\n similarity_matrix = torch.cat((similarity_matrix, similarity_matrix.permute(0,2,1)),dim=0) # [BS*2, num_clips, num_clips]\n # print()\n # print(similarity_matrix)\n similarity_matrix = similarity_matrix.view(-1, num_clips) # [BS*2*num_clips, num_clips]\n # print()\n # print(similarity_matrix)\n # print()\n sample_lab = [i for i in range(num_clips)]\n label = []\n for i in range(lsr.shape[0]*2):\n label.extend(sample_lab)\n label = torch.from_numpy(np.asarray(label)).long().cuda()\n similarity_matrix /= temperature\n\n loss = nn.functional.cross_entropy(similarity_matrix, label, reduction='sum')\n return loss/ (2*lsr.shape[0])" }, { "identifier": "WarmupCosineSchedule", "path": "project/module/utils/lr_scheduler.py", "snippet": "class WarmupCosineSchedule(LambdaLR):\n \"\"\"Linear warmup and then cosine decay.\n Based on https://huggingface.co/ implementation.\n \"\"\"\n\n def __init__(\n self, optimizer: Optimizer, warmup_steps: int, t_total: int, cycles: float = 0.5, last_epoch: int = -1, restart_interval: int = -1\n ) -> None:\n \"\"\"\n Args:\n optimizer: wrapped optimizer.\n warmup_steps: number of warmup iterations.\n t_total: total number of training iterations.\n cycles: cosine cycles parameter.\n last_epoch: the index of last epoch.\n Returns:\n None\n \"\"\"\n self.warmup_steps = warmup_steps\n self.t_total = t_total \n self.cycles = cycles\n self.restart_interval = restart_interval\n super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch)\n\n def lr_lambda(self, step):\n if self.restart_interval >= 0:\n step = step % self.restart_interval\n if step < self.warmup_steps:\n return float(step) / float(max(1.0, self.warmup_steps))\n progress = float(step - self.warmup_steps) / float(max(1, self.restart_interval - self.warmup_steps))\n return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))" }, { "identifier": "CosineAnnealingWarmUpRestarts", "path": "project/module/utils/lr_scheduler.py", "snippet": "class CosineAnnealingWarmUpRestarts(_LRScheduler):\n \"\"\"\n optimizer (Optimizer): Wrapped optimizer.\n first_cycle_steps (int): First cycle step size.\n cycle_mult(float): Cycle steps magnification. Default: -1.\n max_lr(float): First cycle's max learning rate. Default: 0.1.\n min_lr(float): Min learning rate. Default: 0.001.\n warmup_steps(int): Linear warmup step size. Default: 0.\n gamma(float): Decrease rate of max learning rate by cycle. Default: 1.\n last_epoch (int): The index of last epoch. Default: -1.\n \"\"\"\n \n def __init__(self,\n optimizer : torch.optim.Optimizer,\n first_cycle_steps : int,\n cycle_mult : float = 1.,\n max_lr : float = 0.1,\n min_lr : float = 0.001,\n warmup_steps : int = 0,\n gamma : float = 1.,\n last_epoch : int = -1\n ):\n assert warmup_steps < first_cycle_steps\n \n self.first_cycle_steps = first_cycle_steps # first cycle step size\n self.cycle_mult = cycle_mult # cycle steps magnification\n self.base_max_lr = max_lr # first max learning rate\n self.max_lr = max_lr # max learning rate in the current cycle\n self.min_lr = min_lr # min learning rate\n self.warmup_steps = warmup_steps # warmup step size\n self.gamma = gamma # decrease rate of max learning rate by cycle\n \n self.cur_cycle_steps = first_cycle_steps # first cycle step size\n self.cycle = 0 # cycle count\n self.step_in_cycle = last_epoch # step size of the current cycle\n \n super(CosineAnnealingWarmUpRestarts, self).__init__(optimizer, last_epoch)\n \n # set learning rate min_lr\n self.init_lr()\n \n def init_lr(self):\n self.base_lrs = []\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.min_lr\n self.base_lrs.append(self.min_lr)\n \n def get_lr(self):\n if self.step_in_cycle == -1:\n return self.base_lrs\n elif self.step_in_cycle < self.warmup_steps:\n return [(self.max_lr - base_lr)*self.step_in_cycle / self.warmup_steps + base_lr for base_lr in self.base_lrs]\n else:\n return [base_lr + (self.max_lr - base_lr) \\\n * (1 + math.cos(math.pi * (self.step_in_cycle-self.warmup_steps) \\\n / (self.cur_cycle_steps - self.warmup_steps))) / 2\n for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n self.step_in_cycle = self.step_in_cycle + 1\n if self.step_in_cycle >= self.cur_cycle_steps:\n self.cycle += 1\n self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps\n self.cur_cycle_steps = int((self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps\n else:\n if epoch >= self.first_cycle_steps:\n if self.cycle_mult == 1.:\n self.step_in_cycle = epoch % self.first_cycle_steps\n self.cycle = epoch // self.first_cycle_steps\n else:\n n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult))\n self.cycle = n\n self.step_in_cycle = epoch - int(self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1))\n self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n)\n else:\n self.cur_cycle_steps = self.first_cycle_steps\n self.step_in_cycle = epoch\n \n self.max_lr = self.base_max_lr * (self.gamma**self.cycle)\n self.last_epoch = math.floor(epoch)\n for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n param_group['lr'] = lr" } ]
import torch import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl import numpy as np import os import pickle import scipy import torchmetrics import torchmetrics.classification import monai.transforms as monai_t import nibabel as nb from torchmetrics.classification import BinaryAccuracy, BinaryAUROC, BinaryROC from torchmetrics import PearsonCorrCoef # Accuracy, from sklearn.metrics import accuracy_score, balanced_accuracy_score, roc_curve from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from .models.load_model import load_model from .utils.metrics import Metrics from .utils.parser import str2bool from .utils.losses import NTXentLoss, global_local_temporal_contrastive from .utils.lr_scheduler import WarmupCosineSchedule, CosineAnnealingWarmUpRestarts from einops import rearrange from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler, KBinsDiscretizer
3,619
class LitClassifier(pl.LightningModule): def __init__(self,data_module, **kwargs): super().__init__() self.save_hyperparameters(kwargs) # save hyperparameters except data_module (data_module cannot be pickled as a checkpoint) # you should define target_values at the Dataset classes target_values = data_module.train_dataset.target_values if self.hparams.label_scaling_method == 'standardization': scaler = StandardScaler() normalized_target_values = scaler.fit_transform(target_values) print(f'target_mean:{scaler.mean_[0]}, target_std:{scaler.scale_[0]}') elif self.hparams.label_scaling_method == 'minmax': scaler = MinMaxScaler() normalized_target_values = scaler.fit_transform(target_values) print(f'target_max:{scaler.data_max_[0]},target_min:{scaler.data_min_[0]}') self.scaler = scaler print(self.hparams.model)
class LitClassifier(pl.LightningModule): def __init__(self,data_module, **kwargs): super().__init__() self.save_hyperparameters(kwargs) # save hyperparameters except data_module (data_module cannot be pickled as a checkpoint) # you should define target_values at the Dataset classes target_values = data_module.train_dataset.target_values if self.hparams.label_scaling_method == 'standardization': scaler = StandardScaler() normalized_target_values = scaler.fit_transform(target_values) print(f'target_mean:{scaler.mean_[0]}, target_std:{scaler.scale_[0]}') elif self.hparams.label_scaling_method == 'minmax': scaler = MinMaxScaler() normalized_target_values = scaler.fit_transform(target_values) print(f'target_max:{scaler.data_max_[0]},target_min:{scaler.data_min_[0]}') self.scaler = scaler print(self.hparams.model)
self.model = load_model(self.hparams.model, self.hparams)
0
2023-10-28 09:26:03+00:00
8k
deepsearch-ai/deepsearch
deepsearchai/app.py
[ { "identifier": "EmbeddingModelsConfig", "path": "deepsearchai/embedding_models_config.py", "snippet": "class EmbeddingModelsConfig:\n def __init__(\n self,\n image_embedding_model: Optional[BaseEmbeddingModel] = None,\n audio_embedding_model: Optional[BaseEmbeddingModel] = None,\n video_embedding_model: Optional[BaseEmbeddingModel] = None,\n image_captioning_model: Optional[BaseEmbeddingModel] = BlipImageCaptioning(),\n ):\n if not image_embedding_model:\n image_embedding_model = Clip()\n if not audio_embedding_model:\n audio_embedding_model = WhisperOpenAi()\n if not video_embedding_model:\n video_embedding_model = WhisperOpenAi()\n image_embedding_models = [image_embedding_model]\n audio_embedding_models = [audio_embedding_model]\n video_embedding_models = [video_embedding_model]\n if image_captioning_model:\n image_embedding_models.append(image_captioning_model)\n\n self.llm_models = {\n MEDIA_TYPE.AUDIO: audio_embedding_models,\n MEDIA_TYPE.IMAGE: image_embedding_models,\n MEDIA_TYPE.VIDEO: video_embedding_models,\n }\n\n def get_embedding_model(self, media_type: MEDIA_TYPE):\n return self.llm_models.get(media_type, [])" }, { "identifier": "MEDIA_TYPE", "path": "deepsearchai/enums.py", "snippet": "class MEDIA_TYPE(Enum):\n UNKNOWN = -1\n IMAGE = 1\n TEXT = 2\n AUDIO = 3\n VIDEO = 4" }, { "identifier": "BaseLLM", "path": "deepsearchai/llms/base.py", "snippet": "class BaseLLM:\n DEFAULT_PROMPT_TEMPLATE = Template(DEFAULT_PROMPT)\n\n def __init__(self):\n pass\n\n def query(\n self,\n query: str,\n contexts: Dict[MEDIA_TYPE, List[MediaData]],\n ) -> QueryResult:\n raise NotImplementedError" }, { "identifier": "OpenAi", "path": "deepsearchai/llms/openai.py", "snippet": "class OpenAi(BaseLLM):\n def __init__(self, config: OpenAiConfig = OpenAiConfig()):\n self.config = config\n super().__init__()\n\n def generate_prompt(self, input_query: str, contexts: List[str]) -> str:\n \"\"\"\n Generates a prompt based on the given query and context, ready to be\n passed to an LLM\n\n :param input_query: The query to use.\n :type input_query: str\n :param contexts: List of similar documents to the query used as context.\n :type contexts: List[str]\n :return: The prompt\n :rtype: str\n \"\"\"\n context_string = (\" | \").join(contexts)\n # basic use case, no history.\n prompt = self.DEFAULT_PROMPT_TEMPLATE.substitute(\n context=context_string, query=input_query\n )\n return prompt\n\n def query(\n self,\n query: str,\n contexts: Dict[MEDIA_TYPE, List[MediaData]],\n ) -> QueryResult:\n results = []\n for item in contexts.items():\n media_data = item[1]\n for each_response in media_data:\n results.append(each_response.get(\"document\", \"\"))\n prompt = self.generate_prompt(query, results)\n llm_response = self.get_llm_model_answer(prompt)\n query_result = {\"llm_response\": llm_response, \"documents\": contexts}\n return query_result\n\n def get_llm_model_answer(self, prompt) -> str:\n response = self._get_answer(prompt, self.config)\n return response\n\n def _get_answer(self, prompt: str, config: OpenAiConfig) -> str:\n messages = []\n messages.append(HumanMessage(content=prompt))\n kwargs = {\n \"model\": \"gpt-3.5-turbo\",\n \"max_tokens\": 1000,\n \"model_kwargs\": {},\n }\n chat = ChatOpenAI(**kwargs)\n return chat(messages).content" }, { "identifier": "SourceUtils", "path": "deepsearchai/sources/utils.py", "snippet": "class SourceUtils:\n def __init__(self):\n self.local_data_source = LocalDataSource()\n self.s3_data_source = S3DataSource()\n self.youtube_data_source = YoutubeDatasource()\n\n def add_data(\n self,\n source: str,\n embedding_models_config: EmbeddingModelsConfig,\n vector_database: BaseVectorDatabase,\n ) -> None:\n datasource = self._infer_type(source)\n if datasource == DataSource.S3:\n self.s3_data_source.add_data(\n source, embedding_models_config, vector_database\n )\n elif datasource == DataSource.LOCAL:\n self.local_data_source.add_data(\n source, embedding_models_config, vector_database\n )\n elif datasource == DataSource.YOUTUBE:\n self.youtube_data_source.add_data(\n source, embedding_models_config, vector_database\n )\n else:\n raise ValueError(\"Invalid data source\")\n\n def get_data(\n self,\n query: str,\n media_types: List[MEDIA_TYPE],\n embedding_models_config: EmbeddingModelsConfig,\n vector_database: BaseVectorDatabase,\n n_results: int\n ) -> Dict[MEDIA_TYPE, List[MediaData]]:\n media_data = {}\n for media_type in media_types:\n if media_type == MEDIA_TYPE.UNKNOWN:\n continue\n media_data[media_type] = []\n for embedding_model in embedding_models_config.get_embedding_model(\n media_type\n ):\n media_data[media_type].extend(\n vector_database.query(query, n_results, media_type, 0.5, embedding_model)\n )\n return media_data\n\n def _infer_type(self, source: str) -> DataSource:\n if self._is_s3_path(source):\n return DataSource.S3\n elif self._is_local_datasource(source):\n return DataSource.LOCAL\n elif self._is_youtube_datasource(source):\n return DataSource.YOUTUBE\n else:\n raise ValueError(\"Invalid data source\")\n\n def _is_s3_path(self, path: str):\n \"\"\"Checks if a supplied string is an S3 path.\"\"\"\n\n # Regex pattern for an S3 path\n # s3_path_regex = r'^s3://(?P<bucket>[A-Za-z0-9\\-\\.]+)/(?P<key>.*)$'\n s3_path_regex = r\"^s3://(?P<bucket>[A-Za-z0-9\\-\\.\\/]+)$\"\n\n # Match the path against the regex pattern\n match = re.match(s3_path_regex, path)\n\n # If the path matches the regex pattern, then it is an S3 path\n return match is not None\n\n def _is_local_datasource(self, source: str) -> bool:\n \"\"\"Checks if a supplied string is a local directory or a file.\n\n Args:\n source: The string to check.\n\n Returns:\n True if the string is a local directory or a file, False otherwise.\n \"\"\"\n if os.path.isdir(source):\n return True\n elif os.path.isfile(source):\n return True\n else:\n return False\n\n def _is_youtube_datasource(self, source: str) -> bool:\n \"\"\"Checks if a supplied string is a youtube channel id\n\n Args:\n source: The string to check.\n\n Returns:\n True if the string is a Youtube channel id\n \"\"\"\n tokens = source.split(\":\")\n if len(tokens) == 2 and tokens[0] == \"youtube\":\n return True\n else:\n return False" }, { "identifier": "MediaData", "path": "deepsearchai/types.py", "snippet": "class MediaData(TypedDict):\n document: str\n metadata: Optional[Dict[str, str]]" }, { "identifier": "QueryResult", "path": "deepsearchai/types.py", "snippet": "class QueryResult(TypedDict):\n llm_response: str\n documents: Optional[Dict[MEDIA_TYPE, List[MediaData]]]" }, { "identifier": "BaseVectorDatabase", "path": "deepsearchai/vector_databases/base.py", "snippet": "class BaseVectorDatabase:\n def __init__(self, config: BaseVectorDatabaseConfig):\n self.config = config\n pass\n\n def add(\n self,\n data: Any,\n datasource: DataSource,\n file: str,\n source: str,\n media_type: MEDIA_TYPE,\n embedding_model: BaseEmbeddingModel,\n ):\n raise NotImplementedError\n\n def query(\n self,\n query: str,\n n_results: int,\n media_type: MEDIA_TYPE,\n distance_threshold: float,\n embedding_model: BaseEmbeddingModel,\n ) -> List[MediaData]:\n raise NotImplementedError\n\n def get_existing_document_ids(\n self, document_ids: List[str], collection_name: str\n ) -> List[str]:\n raise NotImplementedError\n\n def _construct_metadata(\n self, metadata: List[Dict[str, Any]], source: str, document_id: str, len: int\n ):\n new_metadata = copy.deepcopy(metadata)\n is_metadata_empty = not metadata\n if is_metadata_empty:\n new_metadata = []\n\n for i in range(len):\n temp_metadata = {\n \"source_type\": DataSource.LOCAL.name,\n \"source_id\": source,\n \"document_id\": document_id,\n }\n if is_metadata_empty:\n new_metadata.append(temp_metadata)\n else:\n new_metadata[i].update(temp_metadata)\n\n return new_metadata" }, { "identifier": "ChromaDB", "path": "deepsearchai/vector_databases/chromadb.py", "snippet": "class ChromaDB(BaseVectorDatabase):\n \"\"\"Vector database using ChromaDB.\"\"\"\n\n BATCH_SIZE = 100\n\n def __init__(\n self,\n embedding_models_config: EmbeddingModelsConfig = EmbeddingModelsConfig(),\n config: Optional[ChromaDbConfig] = None,\n ):\n \"\"\"Initialize a new ChromaDB instance\n\n :param config: Configuration options for Chroma, defaults to None\n :type config: Optional[ChromaDbConfig], optional\n \"\"\"\n if config:\n self.config = config\n else:\n self.config = ChromaDbConfig()\n\n self.client = chromadb.Client(self.config.settings)\n self.embedding_models_config = embedding_models_config\n self._set_all_collections()\n super().__init__(config=self.config)\n\n def add(\n self,\n data: Any,\n datasource: DataSource,\n file: str,\n source: str,\n media_type: MEDIA_TYPE,\n embedding_model: BaseEmbeddingModel,\n ):\n encodings_json = embedding_model.get_media_encoding(\n data, media_type, datasource\n )\n embeddings = encodings_json.get(\"embedding\", None)\n documents = (\n [file]\n if not encodings_json.get(\"documents\")\n else encodings_json.get(\"documents\")\n )\n metadata = self._construct_metadata(\n encodings_json.get(\"metadata\", None), source, file, len(documents)\n )\n ids = encodings_json.get(\"ids\", [])\n size = len(documents)\n if embeddings is not None and len(embeddings) != size:\n raise ValueError(\n \"Cannot add documents to chromadb with inconsistent embeddings\"\n )\n collection = self._get_or_create_collection(\n embedding_model.get_collection_name(media_type)\n )\n # embedding would be created by the llm model used\n for i in range(0, len(documents), self.BATCH_SIZE):\n print(\n \"Inserting batches from {} to {} in chromadb\".format(\n i, min(len(documents), i + self.BATCH_SIZE)\n )\n )\n if embeddings is not None:\n collection.add(\n embeddings=embeddings[i : i + self.BATCH_SIZE],\n documents=documents[i : i + self.BATCH_SIZE],\n ids=ids[i : i + self.BATCH_SIZE],\n metadatas=metadata[i : i + self.BATCH_SIZE],\n )\n\n else:\n collection.add(\n documents=documents[i : i + self.BATCH_SIZE],\n ids=ids[i : i + self.BATCH_SIZE],\n metadatas=metadata[i : i + self.BATCH_SIZE],\n )\n\n def query(\n self,\n query: str,\n n_results: int,\n media_type: MEDIA_TYPE,\n distance_threshold: float,\n embedding_model: BaseEmbeddingModel,\n ) -> List[MediaData]:\n response = embedding_model.get_text_encoding(query)\n input_embeddings = response.get(\"embedding\", None)\n input_query = response.get(\"text\", None)\n if input_embeddings:\n query_params = {\n \"query_embeddings\": [input_embeddings],\n \"n_results\": n_results,\n }\n else:\n query_params = {\"query_texts\": [input_query], \"n_results\": n_results}\n\n media_data = []\n\n collection = self._get_or_create_collection(\n embedding_model.get_collection_name(media_type)\n )\n try:\n results = collection.query(**query_params)\n except InvalidDimensionException as e:\n raise InvalidDimensionException(\n e.message()\n + \". This is commonly a side-effect when an embedding function, different from the one used to\"\n \" add the embeddings, is used to retrieve an embedding from the database.\"\n ) from None\n filtered_results = self.filter_query_result_by_distance(\n results, distance_threshold\n )\n if len(filtered_results.get(\"documents\", [])) == 0:\n return media_data\n\n documents = filtered_results.get(\"documents\")[0]\n metadatas = filtered_results.get(\"metadatas\")[0]\n distances = filtered_results.get(\"distances\")[0]\n for document, metadata, distance in zip(documents, metadatas, distances):\n media_data.append({\"document\": document, \"metadata\": metadata, \"distance\": distance})\n return media_data\n\n def filter_query_result_by_distance(\n self, query_result: QueryResult, distance_threshold: float\n ) -> QueryResult:\n filtered_result: QueryResult = {\n \"ids\": [],\n \"embeddings\": [],\n \"documents\": [],\n \"metadatas\": [],\n \"distances\": [],\n }\n\n for i, ids in enumerate(query_result[\"ids\"]):\n filtered_subresult = {\n \"ids\": [],\n \"embeddings\": [],\n \"documents\": [],\n \"metadatas\": [],\n \"distances\": [],\n }\n if query_result[\"distances\"][i] is None:\n continue\n\n for j, distance in enumerate(query_result[\"distances\"][i]):\n if distance >= distance_threshold:\n filtered_subresult[\"ids\"].append(query_result[\"ids\"][i][j])\n\n if \"embeddings\" in query_result and query_result[\"embeddings\"]:\n embeddings = query_result[\"embeddings\"][i]\n filtered_subresult[\"embeddings\"].append(embeddings[j])\n\n if \"documents\" in query_result and query_result[\"documents\"]:\n documents = query_result[\"documents\"][i]\n filtered_subresult[\"documents\"].append(documents[j])\n\n if \"metadatas\" in query_result and query_result[\"metadatas\"]:\n metadatas = query_result[\"metadatas\"][i]\n filtered_subresult[\"metadatas\"].append(metadatas[j])\n\n filtered_subresult[\"distances\"].append(distance)\n\n if filtered_subresult[\"ids\"]:\n filtered_result[\"ids\"].append(filtered_subresult[\"ids\"])\n filtered_result[\"distances\"].append(filtered_subresult[\"distances\"])\n\n if filtered_subresult[\"embeddings\"]:\n filtered_result[\"embeddings\"].append(\n filtered_subresult[\"embeddings\"]\n )\n\n if filtered_subresult[\"documents\"]:\n filtered_result[\"documents\"].append(filtered_subresult[\"documents\"])\n\n if filtered_subresult[\"metadatas\"]:\n filtered_result[\"metadatas\"].append(filtered_subresult[\"metadatas\"])\n\n return filtered_result\n\n def get_existing_document_ids(\n self, metadata_filters, collection_name: str\n ) -> List[str]:\n query_args = {\"where\": self._generate_where_clause(metadata_filters)}\n collection = self._get_or_create_collection(collection_name)\n\n results = []\n offset = 0\n first_iteration = True\n while offset != -1 or first_iteration:\n first_iteration = False\n query_result = collection.get(\n **query_args, offset=offset, limit=self.BATCH_SIZE\n )\n metadatas = query_result.get(\"metadatas\", [])\n document_ids = list(\n map(lambda metadata: metadata.get(\"document_id\", []), metadatas)\n )\n results.extend(document_ids)\n offset = offset + min(self.BATCH_SIZE, len(query_result.get(\"ids\")))\n if len(query_result.get(\"ids\")) == 0:\n break\n return results\n\n def count(self) -> Dict[str, int]:\n \"\"\"\n Count number of documents/chunks embedded in the database.\n\n :return: number of documents\n \"\"\"\n return self._get_collection_count()\n\n def delete(self, where, media_type: Optional[MEDIA_TYPE] = None):\n if not media_type or media_type == MEDIA_TYPE.AUDIO:\n media_collections = self.collections.get(MEDIA_TYPE.AUDIO)\n for collection in media_collections:\n collection.delete(where=where)\n if not media_type or media_type == MEDIA_TYPE.IMAGE:\n media_collections = self.collections.get(MEDIA_TYPE.IMAGE)\n for collection in media_collections:\n collection.delete(where=where)\n if not media_type or media_type == MEDIA_TYPE.VIDEO:\n media_collections = self.collections.get(MEDIA_TYPE.VIDEO)\n for collection in media_collections:\n collection.delete(where=where)\n\n def reset(self):\n \"\"\"\n Resets the database. Deletes all embeddings irreversibly.\n \"\"\"\n # Delete all data from the collection\n for media_collections in self.collections.values():\n for collection in media_collections:\n try:\n self.client.delete_collection(collection.name)\n except ValueError:\n raise ValueError(\n \"For safety reasons, resetting is disabled. \"\n \"Please enable it by setting `allow_reset=True` in your ChromaDbConfig\"\n ) from None\n self._set_all_collections()\n\n def _get_or_create_collection(\n self,\n collection_name: str,\n ):\n return self.client.get_or_create_collection(\n name=collection_name,\n embedding_function=self.config.embedding_function,\n metadata={\"hnsw:space\": \"cosine\"},\n )\n\n def _generate_where_clause(self, where_clause: Dict[str, any]):\n # If only one filter is supplied, return it as is\n # (no need to wrap in $and based on chroma docs)\n if not where_clause:\n return {}\n if len(where_clause.keys()) == 1:\n value = list(where_clause.values())[0]\n key = list(where_clause.keys())[0]\n if isinstance(value, list):\n where_filter = {key: {\"$in\": value}}\n else:\n where_filter = {key: value}\n return where_filter\n where_filters = []\n for k, v in where_clause.items():\n if isinstance(v, list):\n where_filters.append({k: {\"$in\": v}})\n if isinstance(v, str):\n where_filters.append({k: v})\n return {\"$and\": where_filters}\n\n def _set_all_collections(self):\n collections = {}\n for item in self.embedding_models_config.llm_models.items():\n collections[item[0]] = []\n for embedding_model in item[1]:\n media_type = item[0]\n collection_name = embedding_model.get_collection_name(media_type)\n collections[media_type].append(\n self._get_or_create_collection(collection_name)\n )\n self.collections = collections\n\n def _get_collection_count(self):\n collections_count = {}\n for media_collections in self.collections.values():\n for collection in media_collections:\n collections_count[collection.name] = collection.count()\n return collections_count" } ]
from typing import Dict, List, Optional from deepsearchai.embedding_models_config import EmbeddingModelsConfig from deepsearchai.enums import MEDIA_TYPE from deepsearchai.llms.base import BaseLLM from deepsearchai.llms.openai import OpenAi from deepsearchai.sources.utils import SourceUtils from deepsearchai.types import MediaData, QueryResult from deepsearchai.vector_databases.base import BaseVectorDatabase from deepsearchai.vector_databases.chromadb import ChromaDB import os import subprocess import streamlit
5,101
class App: def __init__( self, embedding_models_config: Optional[EmbeddingModelsConfig] = None, vector_database: Optional[BaseVectorDatabase] = None, llm: Optional[BaseLLM] = None, ): self.embedding_models_config = ( embedding_models_config if embedding_models_config else EmbeddingModelsConfig() ) self.vector_database = ( vector_database if vector_database else ChromaDB(embedding_models_config=self.embedding_models_config) ) self.llm = llm if llm else OpenAi(self.vector_database) self.source_utils = SourceUtils() def add_data(self, source: str): self.source_utils.add_data( source, self.embedding_models_config, self.vector_database ) def query( self, query: str, media_types: List[MEDIA_TYPE] = [MEDIA_TYPE.IMAGE], n_results: int = 1 ) -> QueryResult: data = self.get_data(query, media_types, n_results) response = self.llm.query(query, data) return response def get_data( self, query: str, media_types: List[MEDIA_TYPE] = [MEDIA_TYPE.IMAGE], n_results: int = 1
class App: def __init__( self, embedding_models_config: Optional[EmbeddingModelsConfig] = None, vector_database: Optional[BaseVectorDatabase] = None, llm: Optional[BaseLLM] = None, ): self.embedding_models_config = ( embedding_models_config if embedding_models_config else EmbeddingModelsConfig() ) self.vector_database = ( vector_database if vector_database else ChromaDB(embedding_models_config=self.embedding_models_config) ) self.llm = llm if llm else OpenAi(self.vector_database) self.source_utils = SourceUtils() def add_data(self, source: str): self.source_utils.add_data( source, self.embedding_models_config, self.vector_database ) def query( self, query: str, media_types: List[MEDIA_TYPE] = [MEDIA_TYPE.IMAGE], n_results: int = 1 ) -> QueryResult: data = self.get_data(query, media_types, n_results) response = self.llm.query(query, data) return response def get_data( self, query: str, media_types: List[MEDIA_TYPE] = [MEDIA_TYPE.IMAGE], n_results: int = 1
) -> Dict[MEDIA_TYPE, List[MediaData]]:
5
2023-10-27 06:46:22+00:00
8k
Paulo-Lopes-Estevao/ci-generator
cigen/core/github/go_action_test.py
[ { "identifier": "GoActionSteps", "path": "cigen/core/github/go_action.py", "snippet": "class GoActionSteps:\n def __init__(self, version) -> None:\n if version is None:\n raise TypeError('Version is required')\n self.version = version\n\n @staticmethod\n def step_checkout():\n return {\n 'name': 'Checkout',\n 'uses': 'actions/checkout@v4'\n }\n\n def step_setup_go(self):\n if self.version is list:\n raise Exception('Version size must be 1 using Version range')\n\n return {\n 'name': 'Setup Go',\n 'uses': 'actions/setup-go@v4',\n 'with': {\n 'go-version': self.version\n }\n }\n\n def step_setup_go_with_version_list(self):\n return {\n 'strategy': {\n 'matrix': {\n 'go-version': self.version if self.version is None else self.version_list_default()\n }\n },\n 'steps': [\n self.step_checkout(),\n {\n 'name': 'Setup Go',\n 'uses': 'actions/setup-go@v4',\n 'with': {\n 'go-version': '${{ matrix.go-version }}'\n }\n },\n ]\n }\n\n @staticmethod\n def step_setup_go_with_versions_matrix():\n return {\n 'name': 'Setup Go',\n 'uses': 'actions/setup-go@v4',\n 'with': {\n 'go-version': '${{ matrix.go-version }}'\n }\n }\n\n @staticmethod\n def step_run_cache():\n return {\n 'name': 'Cache',\n 'uses': 'actions/cache@v2',\n 'with': {\n 'path': '~/.cache/go-build',\n 'key': '${{ runner.os }}-go-${{ hashFiles(\\'**/go.sum\\') }}'\n }\n }\n\n @staticmethod\n def step_run_install_dependencies():\n return {\n 'name': 'Install Dependencies',\n 'run': 'go mod download'\n }\n\n @staticmethod\n def step_run_tests():\n return {\n 'name': 'Run Tests',\n 'run': 'go test ./...'\n }\n\n @staticmethod\n def step_run_tests_and_coverage():\n return {\n 'name': 'Run Tests and Coverage',\n 'run': 'go test ./... -coverprofile=coverage.out'\n }\n\n @staticmethod\n def step_run_tests_and_coverage_with_coverage():\n return {\n 'name': 'Run Tests and Coverage',\n 'run': 'go test ./... -coverprofile=coverage.out && go tool cover -func=coverage.out'\n }\n\n @staticmethod\n def step_run_tests_and_coverage_with_coverage_and_html():\n return {\n 'name': 'Run Tests and Coverage',\n 'run': 'go test ./... -coverprofile=coverage.out && go tool cover -html=coverage.out'\n }\n\n @staticmethod\n def step_run_tests_and_coverage_with_coverage_and_html_and_upload():\n return {\n 'name': 'Run Tests and Coverage',\n 'run': 'go test ./... -coverprofile=coverage.out && go tool cover -html=coverage.out && bash <(curl -s '\n 'https://codecov.io/bash)'\n }\n\n @staticmethod\n def step_run_build():\n return {\n 'name': 'Build',\n 'run': 'go build ./...'\n }\n\n @staticmethod\n def version_list_default():\n return ['1.19', '1.20', '1.21.x']" }, { "identifier": "ActionCIGenGolang", "path": "cigen/core/github/go_action.py", "snippet": "class ActionCIGenGolang:\n\n def __init__(self):\n self._builder = None\n\n def __int__(self):\n self._builder = None\n\n @property\n def builder(self) -> GoActionBuilder:\n return self._builder\n\n @builder.setter\n def builder(self, builder: GoActionBuilder) -> None:\n self._builder = builder\n\n def action_build_base(self):\n return self.builder.base()\n\n def action_build_base_with_version_list(self):\n return self.builder.base_version_list()\n\n def _list_steps(self):\n return self.builder.list_steps()\n\n def _action_steps_run_build(self):\n self.builder.add_steps(self.builder.step_run_build())" }, { "identifier": "GoActionBuilderImpl", "path": "cigen/core/github/go_action.py", "snippet": "class GoActionBuilderImpl(GoActionBuilder):\n def __init__(self, name, version, on, env=None) -> None:\n self._steps = None\n self._build = None\n self.name = name\n self.version = version\n self.on = on\n self.env = env\n self.step = Steps([])\n self.reset()\n self.reset_steps()\n\n def reset(self):\n self._build = Action(self.name, self.version, self.on, self.step, self.env)\n\n def reset_steps(self):\n self._steps = GoActionSteps(self.version)\n\n @property\n def build_steps(self):\n build_steps = self._steps\n self.reset_steps()\n return build_steps\n\n def add_steps(self, step):\n return self.step.add(step)\n\n @property\n def build(self):\n build = self._build\n self.reset()\n return build\n\n def base(self):\n return self._build.base()\n\n def base_version_list(self):\n return self._build.base_version_list()\n\n def base_to_yaml(self):\n return self._build.base_to_yaml()\n\n def run(self):\n return self._build.run()\n\n def run_with_env(self):\n self.step.add(self._build.run_with_env())\n\n def step_checkout(self):\n self.step.add_at(self._steps.step_checkout(), 0)\n\n def step_setup_go(self):\n self.step.add_at(self._steps.step_setup_go(), 1)\n\n def step_setup_go_with_version_list(self):\n self.step.add(self._steps.step_setup_go_with_version_list())\n\n def step_setup_go_with_versions_matrix(self):\n self.step.add_at(self._steps.step_setup_go_with_versions_matrix(), 1)\n\n def step_run_cache(self):\n self.step.add(self._steps.step_run_cache())\n\n def step_run_install_dependencies(self):\n self.step.add(self._steps.step_run_install_dependencies())\n\n def step_run_tests(self):\n self.step.add(self._steps.step_run_tests())\n\n def step_run_tests_and_coverage(self):\n self.step.add(self._steps.step_run_tests_and_coverage())\n\n def step_run_tests_and_coverage_with_coverage(self):\n self.step.add(self._steps.step_run_tests_and_coverage_with_coverage())\n\n def step_run_tests_and_coverage_with_coverage_and_html(self):\n self.step.add(self._steps.step_run_tests_and_coverage_with_coverage_and_html())\n\n def step_run_tests_and_coverage_with_coverage_and_html_and_upload(self):\n self.step.add(self._steps.step_run_tests_and_coverage_with_coverage_and_html_and_upload())\n\n def step_run_build(self):\n self.step.add(self._steps.step_run_build())\n\n def version_list_default(self):\n self._steps.version_list_default()\n\n def set_version(self, param):\n self._steps.version = param" }, { "identifier": "On", "path": "cigen/core/github/github_action.py", "snippet": "class On(OnEvent):\n push: Push\n pull_request: PullRequest\n\n def __init__(self, push: Push, pull_request: PullRequest) -> None:\n self.push = push\n self.pull_request = pull_request\n\n def to_dict(self) -> dict:\n return {\n 'push': self.push.to_dict(),\n 'pull_request': self.pull_request.to_dict()\n }\n\n def on_push(self) -> dict:\n return {\n 'push': self.push.to_dict()\n }\n\n def on_pull_request(self) -> dict:\n return {\n 'pull_request': self.pull_request.to_dict()\n }\n\n def on_push_and_pull_request(self) -> dict:\n return {\n **self.on_push(),\n **self.on_pull_request()\n }\n\n def to_yaml(self):\n return yaml.dump(self.to_dict())" }, { "identifier": "Steps", "path": "cigen/core/github/github_action.py", "snippet": "class Steps:\n def __init__(self, steps: list[dict]) -> None:\n self.steps = steps\n\n def to_dict(self) -> list[dict]:\n return self.steps\n\n def add(self, step: dict) -> None:\n self.steps.append(step)\n\n def add_at(self, step: dict, index: int) -> None:\n self.steps.insert(index, step)\n\n def add_all(self, steps: list[dict]) -> None:\n self.steps.extend(steps)\n\n def to_yaml(self):\n return yaml.dump(self.to_dict())" }, { "identifier": "Push", "path": "cigen/core/github/github_action.py", "snippet": "class Push:\n branches: list[str]\n tags: list[str]\n\n def __init__(self, branches: list[str], tags=None) -> None:\n if tags is None:\n tags = []\n self.branches = branches\n self.tags = tags\n\n def to_dict(self) -> dict:\n if len(self.tags) == 0:\n return {\n 'branches': self.branches\n }\n else:\n return {\n 'branches': self.branches,\n 'tags': self.tags\n }\n\n def to_yaml(self) -> str | bytes:\n return yaml.dump(self.to_dict())" }, { "identifier": "PullRequest", "path": "cigen/core/github/github_action.py", "snippet": "class PullRequest:\n branches: list[str]\n\n def __init__(self, branches: list[str]) -> None:\n self.branches = branches\n\n def to_dict(self) -> dict:\n return {\n 'branches': self.branches\n }\n\n def to_yaml(self) -> str | bytes:\n return yaml.dump(self.to_dict())" }, { "identifier": "OnEventFactory", "path": "cigen/core/github/github_action.py", "snippet": "class OnEventFactory:\n @staticmethod\n def create(on: OnEvent) -> OnEvent:\n if inspect.isclass(on):\n return on()\n else:\n return on\n\n @staticmethod\n def create_push(branches: list[str], tags=None) -> OnEvent:\n return OnPush(Push(branches, tags))\n\n @staticmethod\n def create_pull_request(branches: list[str]) -> OnEvent:\n return OnPullRequest(PullRequest(branches))\n\n @staticmethod\n def create_events(events: dict) -> dict:\n on_events = []\n\n if 'push' in events:\n on_events.append(OnPush(Push(events['push']['branches'])))\n if 'pull_request' in events:\n on_events.append(OnPullRequest(PullRequest(events['pull_request']['branches'])))\n return events" }, { "identifier": "Action", "path": "cigen/core/github/github_action.py", "snippet": "class Action:\n on: OnEvent\n steps: Steps\n\n def __init__(self, name, version, on, steps: Steps, env=None) -> None:\n self.name = name\n self.version = version\n self.on = on\n self.steps = steps\n self.env = env\n\n def base(self):\n return base_action(self.name, self.on, self.steps)\n\n def base_version_list(self):\n return base_version_list_action(self.name, self.on, self.steps, self.version)\n\n def base_to_yaml(self):\n return yaml.dump(self.base())\n\n def run(self):\n return self.base()\n\n def run_with_env(self):\n return {\n **self.base(),\n 'env': self.env\n }" } ]
import unittest from cigen.core.github.go_action import GoActionSteps, ActionCIGenGolang, GoActionBuilderImpl from cigen.core.github.github_action import On, Steps, Push, PullRequest, OnEventFactory, Action
3,649
self.assertEqual(go_action.base(), { 'name': 'Go Action', 'on': { 'push': { 'branches': ['main'] }, 'pull_request': { 'branches': ['main'] } }, 'jobs': { 'build': { 'name': 'Build', 'runs-on': 'ubuntu-latest', 'steps': [ go_action_steps.step_checkout(), go_action_steps.step_setup_go(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ] } } }) def test_base_push(self): on = On( Push(['main']), PullRequest(['main']) ) go_action_steps = GoActionSteps(['1.17']) steps = Steps([ go_action_steps.step_checkout(), go_action_steps.step_setup_go(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ]) go_action = Action( 'Go Action', go_action_steps.version, on.on_push(), steps, { 'GO_VERSION': '1.17' } ) self.assertEqual(go_action.base(), { 'name': 'Go Action', 'on': { 'push': { 'branches': ['main'] } }, 'jobs': { 'build': { 'name': 'Build', 'runs-on': 'ubuntu-latest', 'steps': [ go_action_steps.step_checkout(), go_action_steps.step_setup_go(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ] } } }) def test_go_runVersionWithRange(self): on = On( Push(['main']), PullRequest(['main']) ) go_action_steps = GoActionSteps(['1.19']) steps = Steps([ go_action_steps.step_checkout(), go_action_steps.step_setup_go_with_versions_matrix(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ]) go_action = Action( 'Go Action', go_action_steps.version, on.on_push(), steps, ) self.assertEqual(go_action.base_version_list(), { 'name': 'Go Action', 'on': { 'push': { 'branches': ['main'] } }, 'jobs': { 'build': { 'name': 'Build', 'runs-on': 'ubuntu-latest', 'strategy': { 'matrix': { 'go-version': go_action_steps.version } }, 'steps': [ go_action_steps.step_checkout(), go_action_steps.step_setup_go_with_versions_matrix(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ] } } }) def test_action_ci_base(self): action_ciGen_golang = ActionCIGenGolang()
class GoActionTestCase(unittest.TestCase): def test_something(self): self.assertNotEqual(True, False) # add assertion here def test_base(self): on = On( Push(['main']), PullRequest(['main']) ) go_action_steps = GoActionSteps('1.17') steps = Steps([ go_action_steps.step_checkout(), go_action_steps.step_setup_go(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ]) go_action = Action( 'Go Action', go_action_steps.version, on.to_dict(), steps, { 'GO_VERSION': '1.17' } ) self.assertEqual(go_action.base(), { 'name': 'Go Action', 'on': { 'push': { 'branches': ['main'] }, 'pull_request': { 'branches': ['main'] } }, 'jobs': { 'build': { 'name': 'Build', 'runs-on': 'ubuntu-latest', 'steps': [ go_action_steps.step_checkout(), go_action_steps.step_setup_go(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ] } } }) def test_base_push(self): on = On( Push(['main']), PullRequest(['main']) ) go_action_steps = GoActionSteps(['1.17']) steps = Steps([ go_action_steps.step_checkout(), go_action_steps.step_setup_go(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ]) go_action = Action( 'Go Action', go_action_steps.version, on.on_push(), steps, { 'GO_VERSION': '1.17' } ) self.assertEqual(go_action.base(), { 'name': 'Go Action', 'on': { 'push': { 'branches': ['main'] } }, 'jobs': { 'build': { 'name': 'Build', 'runs-on': 'ubuntu-latest', 'steps': [ go_action_steps.step_checkout(), go_action_steps.step_setup_go(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ] } } }) def test_go_runVersionWithRange(self): on = On( Push(['main']), PullRequest(['main']) ) go_action_steps = GoActionSteps(['1.19']) steps = Steps([ go_action_steps.step_checkout(), go_action_steps.step_setup_go_with_versions_matrix(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ]) go_action = Action( 'Go Action', go_action_steps.version, on.on_push(), steps, ) self.assertEqual(go_action.base_version_list(), { 'name': 'Go Action', 'on': { 'push': { 'branches': ['main'] } }, 'jobs': { 'build': { 'name': 'Build', 'runs-on': 'ubuntu-latest', 'strategy': { 'matrix': { 'go-version': go_action_steps.version } }, 'steps': [ go_action_steps.step_checkout(), go_action_steps.step_setup_go_with_versions_matrix(), go_action_steps.step_run_build(), go_action_steps.step_run_tests(), ] } } }) def test_action_ci_base(self): action_ciGen_golang = ActionCIGenGolang()
on_event_push = OnEventFactory.create_push(['main', 'master']).to_dict()
7
2023-10-31 03:36:36+00:00
8k
TheCompAce/ShellSpeak
modules/shellSpeak.py
[ { "identifier": "CommandResult", "path": "modules/command_result.py", "snippet": "class CommandResult:\n def __init__(self, stdout, stderr):\n self.out = stdout\n self.err = stderr" }, { "identifier": "LLM", "path": "modules/llm.py", "snippet": "class LLM:\n def __init__(self, model_type, use_cache=False, cache_file=None):\n self.ClearModel(model_type)\n self.use_cache = use_cache\n if use_cache:\n self.cache = ResponseCache(cache_file)\n\n def ClearModel(self, model_type):\n self.model = ModelTypes(model_type)\n self.modelObj = None\n self.tokenizerObj = None\n self.pipeObj = None\n\n def SetupModel(self):\n if self.model == ModelTypes.Mistral:\n return self._setup_mistral()\n elif self.model == ModelTypes.StableBeluga7B:\n return self._setup_beluga_7b()\n elif self.model == ModelTypes.Zephyr7bAlpha:\n return self._setup_zephyr_7b()\n elif self.model == ModelTypes.Zephyr7bBeta:\n return self._setup_zephyr_7bB()\n\n async def async_ask(llm, system_prompt, user_prompt, model_type=None, max_tokens=4096, return_type=\"text\"):\n loop = asyncio.get_event_loop()\n response = await loop.run_in_executor(executor, llm.ask, system_prompt, user_prompt, model_type, max_tokens, return_type)\n return response\n\n def ask(self, system_prompt, user_prompt, model_type=None, max_tokens=4096, return_type=\"text\"):\n if self.use_cache:\n cached_response = self.cache.get(system_prompt, user_prompt)\n if cached_response:\n return cached_response\n response = self._ask(system_prompt, user_prompt, model_type, max_tokens, return_type)\n if self.use_cache:\n self.cache.set(system_prompt, user_prompt, response)\n return response\n\n def _ask(self, system_prompt, user_prompt, model_type = None, max_tokens=4096, return_type=\"text\"):\n \n if model_type is None:\n model_type = self.model\n elif model_type is not self.model:\n self.ClearModel(model_type)\n if model_type == ModelTypes.OpenAI:\n return self._ask_openai(system_prompt, user_prompt, max_tokens=16000, return_type=return_type)\n elif model_type == ModelTypes.OpenAI4:\n return self._ask_openai(system_prompt, user_prompt, model=\"gpt-4-1106-preview\", max_tokens=140000, return_type=return_type)\n elif model_type == ModelTypes.Mistral:\n return self._ask_mistral(system_prompt, user_prompt)\n elif model_type == ModelTypes.StableBeluga7B:\n return self._ask_stable_beluga_7b(system_prompt, user_prompt)\n elif model_type == ModelTypes.Zephyr7bAlpha:\n return self._ask_zephyr_7b(system_prompt, user_prompt)\n elif model_type == ModelTypes.Zephyr7bBeta:\n return self._ask_zephyr_7bB(system_prompt, user_prompt)\n elif model_type == ModelTypes.Falcon7BInst:\n return self._ask_falcon_7b_instruct(system_prompt, user_prompt)\n\n def _ask_openai(self, system_prompt, user_prompt, model = \"gpt-3.5-turbo-1106\", max_tokens=16000, return_type=\"text\"):\n # Placeholder for actual OpenAI API request\n # Uncomment and complete the following code in your local environment\n api_key = os.environ.get(\"OPENAI_API_KEY\", \"your-default-openai-api-key-here\")\n api_url = \"https://api.openai.com/v1/chat/completions\"\n token_ct = 0\n token_ct = max_tokens - int(get_token_count(system_prompt + \"\\n\" + user_prompt) + 20)\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {api_key}\",\n }\n # \"max_tokens\": token_ct,\n data ={\n \"model\" : model,\n \"response_format\": { \"type\": return_type},\n \"messages\" : [\n {\n \"role\": \"system\",\n \"content\": system_prompt\n },\n {\n \"role\": \"user\",\n \"content\": user_prompt\n }\n ]\n }\n\n tries = 2\n response = None\n is_error = False\n while tries > 0:\n try:\n response = requests.post(api_url, headers=headers, json=data, timeout=(2, 60))\n tries = 0\n except requests.Timeout:\n tries -= 1\n if tries == 0:\n is_error = True\n response = \"Timeout\"\n except requests.exceptions.RequestException as e:\n is_error = True\n response = e.response\n tries -= 1\n\n try:\n response = requests.post(api_url, headers=headers, json=data, timeout=(2, 60))\n if response.status_code == 200:\n response_data = response.json()\n return response_data.get(\"choices\", [{}])[0].get(\"message\", {}).get(\"content\", \"\")\n elif response.status_code == 401:\n return \"Error: Unauthorized - Invalid API key (OPENAI_API_KEY).\"\n else:\n return f\"Error: Received HTTP status {response.status_code} - {response.text}\"\n except requests.Timeout:\n return \"Error: Timeout occurred while contacting OpenAI API.\"\n except requests.exceptions.RequestException as e:\n return f\"Error: An error occurred during the request - {str(e)}\"\n\n\n def _ask_mistral(self, system_prompt, user_prompt):\n if self.tokenizerObj is None or self.modelObj is None:\n self._setup_mistral()\n prompt = f\"<s>[INST] {system_prompt} {user_prompt} [/INST]\"\n inputs = self.tokenizerObj(prompt, return_tensors=\"pt\")\n outputs = self.modelObj.generate(**inputs, max_new_tokens=4096)\n decoded = self.tokenizerObj.decode(outputs[0], skip_special_tokens=True)\n return decoded\n \n def _setup_mistral(self):\n if self.modelObj is None or self.tokenizerObj is None:\n self.modelObj = AutoModelForCausalLM.from_pretrained(\"mistralai/Mistral-7B-Instruct-v0.1\")\n self.tokenizerObj = AutoTokenizer.from_pretrained(\"mistralai/Mistral-7B-Instruct-v0.1\")\n\n def _setup_beluga_7b(self):\n if self.modelObj is None or self.tokenizerObj is None:\n self.modelObj = AutoModelForCausalLM.from_pretrained(\"stabilityai/StableBeluga-7B\", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=\"auto\")\n self.tokenizerObj = AutoTokenizer.from_pretrained(\"stabilityai/StableBeluga-7B\", use_fast=False)\n\n\n def _ask_stable_beluga_7b(self, system_prompt, user_prompt):\n if self.tokenizerObj is None or self.modelObj is None:\n self._setup_beluga_7b()\n prompt = f\"### System: {system_prompt}\\\\n\\\\n### User: {user_prompt}\\\\n\\\\n### Assistant:\\\\n\"\n inputs = self.tokenizerObj(prompt, return_tensors=\"pt\").to(\"cuda\")\n output = self.modelObj.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=4096)\n return self.tokenizerObj.decode(output[0], skip_special_tokens=True)\n\n def _ask_zephyr_7b(self, system_prompt, user_prompt):\n if self.pipeObj is None:\n self._setup_zephyr_7b()\n messages = [\n {\n \"role\": \"system\",\n \"content\": system_prompt,\n },\n {\"role\": \"user\", \"content\": user_prompt},\n ]\n prompt = self.pipeObj.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n outputs = self.pipeObj(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)\n return outputs[0][\"generated_text\"]\n\n def _setup_zephyr_7b(self):\n if self.pipeObj is None:\n self.pipeObj= pipeline(\"text-generation\", model=\"HuggingFaceH4/zephyr-7b-alpha\", torch_dtype=torch.bfloat16, device_map=\"auto\")\n\n def _ask_zephyr_7bB(self, system_prompt, user_prompt):\n if self.pipeObj is None:\n self._setup_zephyr_7bB()\n messages = [\n {\n \"role\": \"system\",\n \"content\": system_prompt,\n },\n {\"role\": \"user\", \"content\": user_prompt},\n ]\n prompt = self.pipeObj.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n outputs = self.pipeObj(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)\n return outputs[0][\"generated_text\"]\n\n def _setup_zephyr_7bB(self):\n if self.pipeObj is None:\n self.pipeObj= pipeline(\"text-generation\", model=\"HuggingFaceH4/zephyr-7b-beta\", torch_dtype=torch.bfloat16, device_map=\"auto\")\n\n def _setup_falcon_7b_instruct(self):\n if self.modelObj is None or self.tokenizerObj is None:\n self.modelObj = AutoModelForCausalLM.from_pretrained(\"tiiuae/falcon-7b-instruct\").to(\"cuda\")\n self.tokenizerObj = AutoTokenizer.from_pretrained(\"tiiuae/falcon-7b-instruct\")\n\n\n\n def _ask_falcon_7b_instruct(self, system_prompt, user_prompt):\n if self.tokenizerObj is None or self.modelObj is None:\n self._setup_falcon_7b_instruct()\n device = 0 # This assumes that you have at least one GPU and it's device 0\n pipeline = transformers.pipeline(\n \"text-generation\",\n model=self.modelObj,\n tokenizer=self.tokenizerObj,\n torch_dtype=torch.bfloat16,\n trust_remote_code=True,\n device=device, # Specify the device here\n )\n sequences = pipeline(\n f\"{system_prompt}\\n{user_prompt}\",\n max_length=200,\n do_sample=True,\n top_k=10,\n num_return_sequences=1,\n eos_token_id=self.tokenizerObj.eos_token_id,\n )\n return sequences[0]['generated_text']\n\n\n\n def __repr__(self):\n return f\"LLMBase(model={self.model})\"" }, { "identifier": "ModelTypes", "path": "modules/llm.py", "snippet": "class ModelTypes(Enum):\n OpenAI = \"OpenAI\"\n OpenAI4 = \"OpenAI4\"\n Mistral = \"Mistral\"\n StableBeluga7B = \"StableBeluga7B\"\n Zephyr7bAlpha = \"Zephyr7bAlpha\"\n Zephyr7bBeta = \"Zephyr7bBeta\"\n Falcon7BInst = \"Falcon7BInst\"" }, { "identifier": "CommandRunner", "path": "modules/run_command.py", "snippet": "class CommandRunner:\n def __init__(self, shell_speak):\n self.shell_speak = shell_speak\n self.collected_output = \"\"\n self.collected_history = \"\"\n self.pause_time = 0.5\n self.use_input = False\n\n async def run(self, command):\n self.collected_output = \"\"\n self.collected_history = \"\"\n\n my_error = {\n \"err\": False,\n \"desc\": \"\"\n }\n \n process = await asyncio.create_subprocess_shell(\n command,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n\n async def read_lines(stream, timeout=1.0):\n lines = []\n while True:\n try:\n line = await asyncio.wait_for(stream.readline(), timeout)\n if line:\n lines.append(line)\n else:\n # print(\"No more output from stream.\")\n break\n except asyncio.TimeoutError:\n # print(\"Readline timed out. Process might be waiting for input or has finished.\")\n break\n return lines\n\n async def read_stream(stream, callback):\n while True:\n await asyncio.sleep(self.pause_time) \n lines = await read_lines(stream)\n for line in lines:\n self.use_input = False\n if line:\n if line != b'':\n decode_line = line.decode('utf-8').strip()\n if decode_line != \":WAIT_FOR_INPUT:\":\n self.collected_output += \"\\n\" + decode_line\n self.collected_history += \"\\n\" + decode_line\n else:\n self.use_input = True\n\n # Check if the process is still running\n return_code = process.returncode # None if the process is still running\n if return_code is not None:\n # The process has finished, so just return the collected output\n break\n\n async def write_stream():\n # Allow some time for the process to complete\n await asyncio.sleep(self.pause_time) \n \n while True:\n try:\n # Wait for a short period to see if new output arrives\n await asyncio.sleep(self.pause_time) \n\n # Check if the process is still running\n return_code = process.returncode # None if the process is still running\n if return_code is not None:\n # The process has finished, so just return the collected output\n break\n\n # Check for new output again.\n if self.collected_output:\n translated_output = self.shell_speak.translate_output(self.collected_output, True).strip()\n\n # Encase the 'translated_output' is empty from LLM, fix with orginal text.\n if translated_output == \"\":\n translated_output = self.collected_output\n\n self.shell_speak.display_output(translated_output)\n self.collected_history += \"\\n\" + self.collected_output\n self.collected_output = \"\"\n else: \n # No new output, so prompt for user input\n user_input = None\n if self.use_input:\n user_input = await asyncio.to_thread(input, self.collected_output)\n self.use_input = False\n \n if user_input:\n process.stdin.write(user_input.encode() + b'\\n')\n else:\n process.stdin.close() # Signal EOF to the subprocess\n except EOFError:\n # Handle Ctrl-Z (EOF) to cancel if needed\n my_error[\"err\"] = True\n my_error[\"desc\"] = \"Ctrl-Z\"\n print(\"Ctrl-Z detected, exiting...\")\n break\n except Exception as e:\n # Log or handle other exceptions\n my_error[\"err\"] = True\n my_error[\"desc\"] = e\n break # Optionally break out of the loop on any exception\n\n # Optionally add a delay to avoid busy-waiting\n # await asyncio.sleep(0.1)\n\n\n await asyncio.gather(\n read_stream(process.stdout, self.handle_stdout),\n read_stream(process.stderr, self.handle_stderr),\n write_stream()\n )\n\n # await asyncio.sleep(self.pause_time) \n # stdout, stderr = await process.communicate()\n\n stderr = \"\"\n\n if my_error[\"err\"]:\n stderr = my_error[\"desc\"]\n\n\n # print(f\"self.collected_history = {self.collected_history}\")\n return self.collected_output, stderr if not my_error[\"err\"] else stderr\n\n\n def handle_stdout(self, line):\n if line.strip() != \"\" and line != \":WAIT_FOR_INPUT:\":\n self.collected_history += line + \"\\n\"\n self.collected_output += line + \"\\n\"\n\n def handle_stderr(self, line):\n formatted_error = self.shell_speak.translate_output(line, True)\n self.shell_speak.display_output(formatted_error)" }, { "identifier": "get_file_size", "path": "modules/utils.py", "snippet": "def get_file_size(filepath):\n try:\n return os.path.getsize(filepath)\n except FileNotFoundError:\n return 0\n except PermissionError:\n return \"Permission denied.\"\n except Exception as e:\n return f\"An error occurred: {e}\"" }, { "identifier": "is_valid_filename", "path": "modules/utils.py", "snippet": "def is_valid_filename(filename):\n # Normalize unicode characters\n filename = unicodedata.normalize('NFC', filename)\n\n # Common invalid characters across *nix and Windows\n invalid_chars = r'[<>:\"/\\\\|?*\\x00-\\x1F]'\n if any(char in invalid_chars for char in filename):\n return False # Contains invalid characters\n if len(filename.encode('utf-8')) > 255:\n return False # Exceeds length restrictions when encoded in UTF-8\n \n # Windows-specific checks\n if platform.system() == \"Windows\":\n # Windows does not allow filenames to end with a dot or a space\n if filename.endswith('.') or filename.endswith(' '):\n return False\n # Check for valid drive letter\n if re.match(r'^[a-zA-Z]:\\\\', filename):\n return False\n # Windows reserved filenames\n reserved_names = (\n \"CON\", \"PRN\", \"AUX\", \"NUL\",\n \"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n \"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\"\n )\n basename, _, ext = filename.rpartition('.')\n if basename.upper() in reserved_names:\n if not ext or basename.upper() != filename.upper():\n return False\n\n # *nix-specific checks (optional)\n # For example, disallowing hidden files (starting with a dot)\n # if filename.startswith('.'):\n # return False\n\n return True" }, { "identifier": "list_files_and_folders_with_sizes", "path": "modules/utils.py", "snippet": "def list_files_and_folders_with_sizes(start_path):\n entries = os.scandir(start_path)\n files_and_folders = []\n\n for entry in entries:\n # This is a check for the entry being a file or a folder at the top level only\n if entry.is_dir(follow_symlinks=False):\n entry_type = 'Folder'\n size = 0 # Do not sum up sizes within the folder\n elif entry.is_file(follow_symlinks=False):\n entry_type = 'File'\n size = get_size(entry.path) # Get the size of the file\n else:\n entry_type = 'Other' # For symbolic links, sockets, etc.\n size = 0 # Other types do not have a size\n\n files_and_folders.append({\n 'name': entry.name,\n 'type': entry_type,\n 'size': size # Size is in bytes\n })\n return files_and_folders" }, { "identifier": "load_settings", "path": "modules/utils.py", "snippet": "def load_settings(filepath):\n try:\n with open(os.path.join(filepath, \"settings.json\"), 'r') as f:\n settings = json.load(f)\n chk_file = os.path.join(filepath, settings['command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['command_prompt'] = f.read()\n \n chk_file = os.path.join(filepath, settings['display_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['display_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['user_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['user_command_prompt'] = f.read()\n\n chk_file = os.path.join(filepath, settings['python_command_prompt'])\n if os.path.isfile(chk_file):\n with open(chk_file, 'r') as f:\n settings['python_command_prompt'] = f.read()\n\n return settings\n except FileNotFoundError:\n return {}" }, { "identifier": "map_possible_commands", "path": "modules/utils.py", "snippet": "def map_possible_commands():\n # Get the operating system name\n os_name = platform.system().lower()\n \n # Get the PATH environment variable\n path_variable = os.environ.get('PATH', '')\n \n # Split it into individual directories\n directories = path_variable.split(os.pathsep)\n \n # Initialize a set to store unique command names\n unique_commands = set()\n \n # List of wanted file extensions for Windows\n windows_wanted_extensions = ['.exe', '.bat', '.com', '.sh']\n \n for directory in directories:\n try:\n # List all files in the directory\n files = os.listdir(directory)\n \n # Filter out executable files and add them to the set\n for file in files:\n file_path = os.path.join(directory, file)\n \n # Get the file extension\n _, extension = os.path.splitext(file)\n \n if os.access(file_path, os.X_OK):\n if os_name == 'windows':\n if extension.lower() in windows_wanted_extensions:\n file = file.replace(f'{windows_wanted_extensions}', '')\n unique_commands.add(file)\n else:\n # On Unix-like systems, rely on executable permission\n unique_commands.add(file)\n \n except FileNotFoundError:\n # Directory in PATH does not exist, skip\n continue\n except PermissionError:\n # Don't have permission to access directory, skip\n continue\n \n commands_str = ','.join(unique_commands)\n return commands_str" }, { "identifier": "get_os_name", "path": "modules/utils.py", "snippet": "def get_os_name():\n return platform.system()" }, { "identifier": "print_colored_text", "path": "modules/utils.py", "snippet": "def print_colored_text(text, end_newline=True):\n try:\n end = \"\\n\" if end_newline else \"\"\n console.print(text, end=end)\n except Exception as e:\n print(text)" }, { "identifier": "capture_styled_input", "path": "modules/utils.py", "snippet": "def capture_styled_input(prompt):\n # Print the prompt without a newline at the end\n print_colored_text(prompt, end_newline=False)\n \n # Capture and return user input\n return input()" }, { "identifier": "read_file", "path": "modules/utils.py", "snippet": "def read_file(filepath):\n print(f\"Reading file {filepath}.\")\n try:\n with open(filepath, 'r') as f:\n return f.read()\n except FileNotFoundError:\n return \"File not found.\"\n except PermissionError:\n return \"Permission denied.\"\n except Exception as e:\n return f\"An error occurred: {e}\"" }, { "identifier": "redact_json_values", "path": "modules/utils.py", "snippet": "def redact_json_values(story, keys_to_redact):\n # Find all JSON objects in the string\n json_objects = re.findall(r'\\{.*?\\}', story, re.DOTALL)\n \n for json_obj in json_objects:\n # Load the JSON object into a Python dictionary\n try:\n data = json.loads(json_obj)\n except json.JSONDecodeError:\n continue # Skip if it's not valid JSON\n \n # Recursive function to redact specified keys\n def redact(data):\n if isinstance(data, dict):\n for key in data:\n if key in keys_to_redact:\n data[key] = \"...\"\n else:\n redact(data[key])\n elif isinstance(data, list):\n for item in data:\n redact(item)\n\n # Redact the necessary keys\n redact(data)\n \n # Convert the dictionary back to a JSON string\n redacted_json = json.dumps(data, indent=2)\n \n # Replace the original JSON string in the story\n story = story.replace(json_obj, redacted_json)\n \n return story" }, { "identifier": "replace_placeholders", "path": "modules/utils.py", "snippet": "def replace_placeholders(text, **kwargs):\n \"\"\"\n Replaces placeholders in the given text with the values provided.\n\n Parameters:\n - text (str): The text containing placeholders.\n - **kwargs: The values to replace the placeholders with.\n\n Returns:\n - str: The text with placeholders replaced.\n \"\"\"\n\n # Define a regular expression pattern to match placeholders like {placeholder_name}\n pattern = re.compile(r'\\{(\\w+)\\}')\n\n def replacement(match):\n # Extract the placeholder name from the match object\n placeholder_name = match.group(1)\n\n # If the placeholder name is found in kwargs, replace it with the corresponding value\n if placeholder_name in kwargs:\n return kwargs[placeholder_name]\n\n # If the placeholder name is not found in kwargs, keep the original placeholder text\n return match.group(0)\n\n # Use the re.sub() function to replace all occurrences of the pattern in the text\n return pattern.sub(replacement, text)" }, { "identifier": "get_token_count", "path": "modules/utils.py", "snippet": "def get_token_count(text, token_adjust=1):\n # Define the maximum length for a text chunk\n max_length = 1000000\n\n # Initialize the total token count\n total_token_count = 0\n\n # Split the text into chunks of up to max_length characters\n for start in range(0, len(text), max_length):\n # Get a chunk of text\n chunk = text[start:start + max_length]\n\n # Process the chunk with the NLP tool\n doc = nlp(chunk)\n\n # Update the total token count\n total_token_count += int(len(doc) * token_adjust)\n\n # Return the total token count\n return total_token_count" }, { "identifier": "trim_to_right_token_count", "path": "modules/utils.py", "snippet": "def trim_to_right_token_count(text, max_tokens):\n adjust_tokens = int(max_tokens / token_adjust)\n doc = nlp(text)\n start = len(doc) - adjust_tokens if len(doc) > adjust_tokens else 0\n trimmed_text = \" \".join(token.text for token in doc[start:])\n return trimmed_text" }, { "identifier": "trim_to_token_count", "path": "modules/utils.py", "snippet": "def trim_to_token_count(text, max_tokens):\n adjust_tokens = int(max_tokens / token_adjust)\n doc = nlp(text)\n trimmed_text = \" \".join(token.text for token in doc[:adjust_tokens])\n return trimmed_text" } ]
import asyncio import datetime import json import os import platform import queue import re import subprocess import logging import signal import base64 import threading import spacy from pygments import lexers from modules.command_result import CommandResult from modules.llm import LLM, ModelTypes from modules.run_command import CommandRunner from modules.utils import get_file_size, is_valid_filename, list_files_and_folders_with_sizes, load_settings, map_possible_commands, get_os_name, print_colored_text, capture_styled_input, read_file, redact_json_values, replace_placeholders, get_token_count, trim_to_right_token_count, trim_to_token_count from functools import partial from multiprocessing import Pool, TimeoutError
6,759
# Import necessary modules # Load English tokenizer, POS tagger, parser, NER and word vectors nlp = spacy.load("en_core_web_sm") logging.basicConfig(filename='app.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') class ShellSpeak: def __init__(self, settings, base_path, vectorDb): self.llm_len = int(settings.get("llm_size", 14000)) self.llm_history_len = int(settings.get("llm_history_size", 4000)) self.llm_file_len = int(settings.get("llm_file_size", 4000)) self.llm_folder_len = int(settings.get("llm_folder_size", 4000)) self.llm_slide_len = int(settings.get("llm_slide_len", 120)) self.temp_file = settings.get("temp_file", "temp") self.llm_output_size = int(settings.get("llm_output_size", 4097)) self.use_cache = settings.get("use_cache", False) self.cache_file = settings.get("cache_file", None) self.vector_for_commands = settings.get("vector_for_commands", False) self.vector_for_history = settings.get("vector_for_history", True) self.vector_for_folders = settings.get("vector_for_folders", True) self.data_file = 'path_to_your_data_file.json' self.use_indexing = settings.get('use_indexing', False) self.vector_db = vectorDb self.settings = settings self.command_history = "" self.settingsRoot = base_path self.files = []
# Import necessary modules # Load English tokenizer, POS tagger, parser, NER and word vectors nlp = spacy.load("en_core_web_sm") logging.basicConfig(filename='app.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') class ShellSpeak: def __init__(self, settings, base_path, vectorDb): self.llm_len = int(settings.get("llm_size", 14000)) self.llm_history_len = int(settings.get("llm_history_size", 4000)) self.llm_file_len = int(settings.get("llm_file_size", 4000)) self.llm_folder_len = int(settings.get("llm_folder_size", 4000)) self.llm_slide_len = int(settings.get("llm_slide_len", 120)) self.temp_file = settings.get("temp_file", "temp") self.llm_output_size = int(settings.get("llm_output_size", 4097)) self.use_cache = settings.get("use_cache", False) self.cache_file = settings.get("cache_file", None) self.vector_for_commands = settings.get("vector_for_commands", False) self.vector_for_history = settings.get("vector_for_history", True) self.vector_for_folders = settings.get("vector_for_folders", True) self.data_file = 'path_to_your_data_file.json' self.use_indexing = settings.get('use_indexing', False) self.vector_db = vectorDb self.settings = settings self.command_history = "" self.settingsRoot = base_path self.files = []
self.llm = LLM(model_type=ModelTypes(self.settings.get('model', "OpenAI")), use_cache=self.use_cache, cache_file=self.cache_file) #Zephyr7bBeta
2
2023-10-31 23:35:19+00:00
8k
qym7/SparseDiff
sparse_diffusion/metrics/spectre_utils.py
[ { "identifier": "SparsePlaceHolder", "path": "sparse_diffusion/utils.py", "snippet": "class SparsePlaceHolder:\n def __init__(\n self, node, edge_index, edge_attr, y, ptr=None, batch=None, charge=None\n ):\n self.node = node # (N, dx)\n self.edge_index = edge_index # (2, M)\n self.edge_attr = edge_attr # (M, de)\n self.y = y # (n, dy)\n self.batch = batch # (n)\n self.ptr = ptr\n self.charge = charge\n\n def type_as(self, x: torch.Tensor):\n \"\"\"Changes the device and dtype of X, E, y.\"\"\"\n self.node = self.node.type_as(x)\n self.edge_index = self.edge_index.type_as(x)\n self.edge_attr = self.edge_attr.type_as(x)\n self.y = self.y.type_as(x)\n\n self.ptr = self.ptr if self.ptr is None else self.ptr.type_as(x)\n self.batch = self.batch if self.batch is None else self.batch.type_as(x)\n self.charge = self.charge if self.charge is None else self.charge.type_as(x)\n\n return self\n\n def to_device(self, device: str):\n \"\"\"Changes the device and device of X, E, y.\"\"\"\n self.node = self.node.to(device)\n self.edge_index = self.edge_index.to(device)\n self.edge_attr = self.edge_attr.to(device)\n self.y = self.y.to(device)\n\n self.ptr = self.ptr if self.ptr is None else self.ptr.to(device)\n self.batch = self.batch if self.batch is None else self.batch.to(device)\n self.charge = self.charge if self.charge is None else self.charge.to(device)\n\n return self\n\n def coalesce(self):\n self.edge_index, self.edge_attr = coalesce(\n self.edge_index.long(), self.edge_attr\n )\n return self\n\n def symmetry(self):\n \"\"\"ecover directed graph to undirected graph\"\"\"\n self.edge_index, self.edge_attr = to_undirected(self.edge_index, self.edge_attr)\n return self\n\n def collapse(self, collapse_charge=None):\n self.node = torch.argmax(self.node, dim=-1)\n self.edge_attr = torch.argmax(self.edge_attr, dim=-1)" }, { "identifier": "compute_mmd", "path": "sparse_diffusion/analysis/dist_helper.py", "snippet": "def compute_mmd(samples1, samples2, kernel, is_hist=True, *args, **kwargs):\n \"\"\"MMD between two samples\"\"\"\n # normalize histograms into pmf\n if is_hist:\n samples1 = [s1 / (np.sum(s1) + 1e-6) for s1 in samples1]\n samples2 = [s2 / (np.sum(s2) + 1e-6) for s2 in samples2]\n return (\n disc(samples1, samples1, kernel, *args, **kwargs)\n + disc(samples2, samples2, kernel, *args, **kwargs)\n - 2 * disc(samples1, samples2, kernel, *args, **kwargs)\n )" }, { "identifier": "gaussian_emd", "path": "sparse_diffusion/analysis/dist_helper.py", "snippet": "def gaussian_emd(x, y, sigma=1.0, distance_scaling=1.0):\n \"\"\"Gaussian kernel with squared distance in exponential term replaced by EMD\n Args:\n x, y: 1D pmf of two distributions with the same support\n sigma: standard deviation\n \"\"\"\n support_size = max(len(x), len(y))\n d_mat = toeplitz(range(support_size)).astype(np.float64)\n distance_mat = d_mat / distance_scaling\n\n # convert histogram values x and y to float, and make them equal len\n x = x.astype(np.float64)\n y = y.astype(np.float64)\n if len(x) < len(y):\n x = np.hstack((x, [0.0] * (support_size - len(x))))\n elif len(y) < len(x):\n y = np.hstack((y, [0.0] * (support_size - len(y))))\n\n emd = pyemd.emd(x, y, distance_mat)\n return np.exp(-emd * emd / (2 * sigma * sigma))" }, { "identifier": "gaussian", "path": "sparse_diffusion/analysis/dist_helper.py", "snippet": "def gaussian(x, y, sigma=1.0):\n support_size = max(len(x), len(y))\n # convert histogram values x and y to float, and make them equal len\n x = x.astype(np.float64)\n y = y.astype(np.float64)\n if len(x) < len(y):\n x = np.hstack((x, [0.0] * (support_size - len(x))))\n elif len(y) < len(x):\n y = np.hstack((y, [0.0] * (support_size - len(y))))\n\n dist = np.linalg.norm(x - y, 2)\n return np.exp(-dist * dist / (2 * sigma * sigma))" }, { "identifier": "emd", "path": "sparse_diffusion/analysis/dist_helper.py", "snippet": "def emd(x, y, distance_scaling=1.0):\n support_size = max(len(x), len(y))\n d_mat = toeplitz(range(support_size)).astype(np.float64)\n distance_mat = d_mat / distance_scaling\n\n # convert histogram values x and y to float, and make them equal len\n x = x.astype(np.float64)\n y = y.astype(np.float64)\n if len(x) < len(y):\n x = np.hstack((x, [0.0] * (support_size - len(x))))\n elif len(y) < len(x):\n y = np.hstack((y, [0.0] * (support_size - len(y))))\n\n emd = pyemd.emd(x, y, distance_mat)\n return emd" }, { "identifier": "gaussian_tv", "path": "sparse_diffusion/analysis/dist_helper.py", "snippet": "def gaussian_tv(x, y, sigma=1.0):\n support_size = max(len(x), len(y))\n # convert histogram values x and y to float, and make them equal len\n x = x.astype(np.float64)\n y = y.astype(np.float64)\n if len(x) < len(y):\n x = np.hstack((x, [0.0] * (support_size - len(x))))\n elif len(y) < len(x):\n y = np.hstack((y, [0.0] * (support_size - len(y))))\n\n dist = np.abs(x - y).sum() / 2.0\n return np.exp(-dist * dist / (2 * sigma * sigma))" }, { "identifier": "disc", "path": "sparse_diffusion/analysis/dist_helper.py", "snippet": "def disc(samples1, samples2, kernel, is_parallel=True, *args, **kwargs):\n \"\"\"Discrepancy between 2 samples\"\"\"\n d = 0\n\n if not is_parallel:\n for s1 in samples1:\n for s2 in samples2:\n d += kernel(s1, s2, *args, **kwargs)\n else:\n with concurrent.futures.ThreadPoolExecutor() as executor:\n for dist in executor.map(\n kernel_parallel_worker,\n [(s1, samples2, partial(kernel, *args, **kwargs)) for s1 in samples1],\n ):\n d += dist\n if len(samples1) * len(samples2) > 0:\n d /= len(samples1) * len(samples2)\n else:\n d = 1e6\n return d" }, { "identifier": "FIDEvaluation", "path": "sparse_diffusion/metrics/neural_metrics.py", "snippet": "class FIDEvaluation(GINMetric):\n # https://github.com/mseitzer/pytorch-fid\n @time_function\n def evaluate(self, generated_dataset=None, reference_dataset=None):\n if not isinstance(generated_dataset, torch.Tensor) and not isinstance(generated_dataset, np.ndarray):\n generated_dataset, reference_dataset, _ = self.get_activations(generated_dataset, reference_dataset)\n\n mu_ref, cov_ref = self.__calculate_dataset_stats(reference_dataset)\n mu_generated, cov_generated = self.__calculate_dataset_stats(generated_dataset)\n # print(np.max(mu_generated), np.max(cov_generated), 'mu, cov fid')\n fid = self.compute_FID(mu_ref, mu_generated, cov_ref, cov_generated)\n return {'fid': fid}\n\n def __calculate_dataset_stats(self, activations):\n # print('activation mean -----------------------------------------', activations.mean())\n mu = np.mean(activations, axis = 0)\n cov = np.cov(activations, rowvar = False)\n\n return mu, cov\n\n def compute_FID(self, mu1, mu2, cov1, cov2, eps = 1e-6):\n \"\"\"Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n Stable version by Dougal J. Sutherland.\n\n Params:\n -- mu1 : Numpy array containing the activations of a layer of the\n inception net (like returned by the function 'get_predictions')\n for generated samples.\n -- mu2 : The sample mean over activations, precalculated on an\n representative data set.\n -- sigma1: The covariance matrix over activations for generated samples.\n -- sigma2: The covariance matrix over activations, precalculated on an\n representative data set.\n Returns:\n -- : The Frechet Distance.\n \"\"\"\n assert mu1.shape == mu2.shape, \\\n 'Training and test mean vectors have different lengths'\n assert cov1.shape == cov2.shape, \\\n 'Training and test covariances have different dimensions'\n\n diff = mu1 - mu2\n # Product might be almost singular\n covmean, _ = linalg.sqrtm(cov1.dot(cov2), disp=False)\n # print(np.max(covmean), 'covmean')\n if not np.isfinite(covmean).all():\n msg = ('fid calculation produces singular product; '\n 'adding %s to diagonal of cov estimates') % eps\n print(msg)\n offset = np.eye(cov1.shape[0]) * eps\n covmean = linalg.sqrtm((cov1 + offset).dot(cov2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n #raise ValueError('Imaginary component {}'.format(m))\n covmean = covmean.real\n\n tr_covmean = np.trace(covmean)\n # print(tr_covmean, 'tr_covmean')\n\n return (diff.dot(diff) + np.trace(cov1) +\n np.trace(cov2) - 2 * tr_covmean)" }, { "identifier": "MMDEvaluation", "path": "sparse_diffusion/metrics/neural_metrics.py", "snippet": "class MMDEvaluation(GINMetric):\n def __init__(self, model, kernel='rbf', sigma='range', multiplier='mean'):\n super().__init__(model)\n\n if multiplier == 'mean':\n self.__get_sigma_mult_factor = self.__mean_pairwise_distance\n elif multiplier == 'median':\n self.__get_sigma_mult_factor = self.__median_pairwise_distance\n elif multiplier is None:\n self.__get_sigma_mult_factor = lambda *args, **kwargs: 1\n else:\n raise Exception(multiplier)\n\n if 'rbf' in kernel:\n if sigma == 'range':\n self.base_sigmas = np.array([\n 0.01, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0])\n\n if multiplier == 'mean':\n self.name = 'mmd_rbf'\n elif multiplier == 'median':\n self.name = 'mmd_rbf_adaptive_median'\n else:\n self.name = 'mmd_rbf_adaptive'\n\n elif sigma == 'one':\n self.base_sigmas = np.array([1])\n\n if multiplier == 'mean':\n self.name = 'mmd_rbf_single_mean'\n elif multiplier == 'median':\n self.name = 'mmd_rbf_single_median'\n else:\n self.name = 'mmd_rbf_single'\n\n else:\n raise Exception(sigma)\n\n self.evaluate = self.calculate_MMD_rbf_quadratic\n\n elif 'linear' in kernel:\n self.evaluate = self.calculate_MMD_linear_kernel\n\n else:\n raise Exception()\n\n def __get_pairwise_distances(self, generated_dataset, reference_dataset):\n return pairwise_distances(\n reference_dataset, generated_dataset,\n metric='euclidean', n_jobs=8) ** 2\n\n def __mean_pairwise_distance(self, dists_GR):\n return np.sqrt(dists_GR.mean())\n\n def __median_pairwise_distance(self, dists_GR):\n return np.sqrt(np.median(dists_GR))\n\n def get_sigmas(self, dists_GR):\n mult_factor = self.__get_sigma_mult_factor(dists_GR)\n return self.base_sigmas * mult_factor\n\n @time_function\n def calculate_MMD_rbf_quadratic(self, generated_dataset=None, reference_dataset=None):\n # https://github.com/djsutherland/opt-mmd/blob/master/two_sample/mmd.py\n if not isinstance(generated_dataset, torch.Tensor) and not isinstance(generated_dataset, np.ndarray):\n (generated_dataset, reference_dataset), _ = self.get_activations(generated_dataset, reference_dataset)\n\n GG = self.__get_pairwise_distances(generated_dataset, generated_dataset)\n GR = self.__get_pairwise_distances(generated_dataset, reference_dataset)\n RR = self.__get_pairwise_distances(reference_dataset, reference_dataset)\n\n max_mmd = 0\n sigmas = self.get_sigmas(GR)\n for sigma in sigmas:\n gamma = 1 / (2 * sigma**2)\n\n K_GR = np.exp(-gamma * GR)\n K_GG = np.exp(-gamma * GG)\n K_RR = np.exp(-gamma * RR)\n\n mmd = K_GG.mean() + K_RR.mean() - 2 * K_GR.mean()\n max_mmd = mmd if mmd > max_mmd else max_mmd\n\n return {self.name: max_mmd}\n\n @time_function\n def calculate_MMD_linear_kernel(self, generated_dataset=None, reference_dataset=None):\n # https://github.com/djsutherland/opt-mmd/blob/master/two_sample/mmd.py\n if not isinstance(generated_dataset, torch.Tensor) and not isinstance(generated_dataset, np.ndarray):\n generated_dataset, reference_dataset, _ = self.get_activations(generated_dataset, reference_dataset)\n\n G_bar = generated_dataset.mean(axis=0)\n R_bar = reference_dataset.mean(axis=0)\n Z_bar = G_bar - R_bar\n mmd = Z_bar.dot(Z_bar)\n mmd = mmd if mmd >= 0 else 0\n return {'mmd_linear': mmd}" }, { "identifier": "load_feature_extractor", "path": "sparse_diffusion/metrics/neural_metrics.py", "snippet": "def load_feature_extractor(\n device, num_layers=3, hidden_dim=35, neighbor_pooling_type='sum',\n graph_pooling_type='sum', input_dim=1, edge_feat_dim=0,\n dont_concat=False, num_mlp_layers=2, output_dim=1,\n node_feat_loc='attr', edge_feat_loc='attr', init='orthogonal',\n **kwargs):\n\n model = GIN(\n num_layers=num_layers, hidden_dim=hidden_dim,\n neighbor_pooling_type=neighbor_pooling_type,\n graph_pooling_type=graph_pooling_type, input_dim=input_dim,\n edge_feat_dim=edge_feat_dim, num_mlp_layers=num_mlp_layers,\n output_dim=output_dim, init=init)\n\n model.node_feat_loc = node_feat_loc\n model.edge_feat_loc = edge_feat_loc\n\n use_pretrained = kwargs.get('use_pretrained', False)\n if use_pretrained:\n model_path = kwargs.get('model_path')\n assert model_path is not None, 'Please pass model_path if use_pretrained=True'\n print('loaded', model_path)\n saved_model = torch.load(model_path)\n model.load_state_dict(saved_model['model_state_dict'])\n\n model.eval()\n\n if dont_concat:\n model.forward = model.get_graph_embed_no_cat\n else:\n model.forward = model.get_graph_embed\n\n model.device = device\n return model.to(device)" }, { "identifier": "SparsePlaceHolder", "path": "sparse_diffusion/utils.py", "snippet": "class SparsePlaceHolder:\n def __init__(\n self, node, edge_index, edge_attr, y, ptr=None, batch=None, charge=None\n ):\n self.node = node # (N, dx)\n self.edge_index = edge_index # (2, M)\n self.edge_attr = edge_attr # (M, de)\n self.y = y # (n, dy)\n self.batch = batch # (n)\n self.ptr = ptr\n self.charge = charge\n\n def type_as(self, x: torch.Tensor):\n \"\"\"Changes the device and dtype of X, E, y.\"\"\"\n self.node = self.node.type_as(x)\n self.edge_index = self.edge_index.type_as(x)\n self.edge_attr = self.edge_attr.type_as(x)\n self.y = self.y.type_as(x)\n\n self.ptr = self.ptr if self.ptr is None else self.ptr.type_as(x)\n self.batch = self.batch if self.batch is None else self.batch.type_as(x)\n self.charge = self.charge if self.charge is None else self.charge.type_as(x)\n\n return self\n\n def to_device(self, device: str):\n \"\"\"Changes the device and device of X, E, y.\"\"\"\n self.node = self.node.to(device)\n self.edge_index = self.edge_index.to(device)\n self.edge_attr = self.edge_attr.to(device)\n self.y = self.y.to(device)\n\n self.ptr = self.ptr if self.ptr is None else self.ptr.to(device)\n self.batch = self.batch if self.batch is None else self.batch.to(device)\n self.charge = self.charge if self.charge is None else self.charge.to(device)\n\n return self\n\n def coalesce(self):\n self.edge_index, self.edge_attr = coalesce(\n self.edge_index.long(), self.edge_attr\n )\n return self\n\n def symmetry(self):\n \"\"\"ecover directed graph to undirected graph\"\"\"\n self.edge_index, self.edge_attr = to_undirected(self.edge_index, self.edge_attr)\n return self\n\n def collapse(self, collapse_charge=None):\n self.node = torch.argmax(self.node, dim=-1)\n self.edge_attr = torch.argmax(self.edge_attr, dim=-1)" } ]
import os import copy import random import dgl import wandb import pygsp as pg import secrets import torch import torch.nn as nn import numpy as np import networkx as nx import subprocess as sp import concurrent.futures import graph_tool.all as gt from datetime import datetime from scipy.linalg import eigvalsh from scipy.stats import chi2 from string import ascii_uppercase, digits from torch_geometric.utils import to_dense_adj, is_undirected, to_networkx from sparse_diffusion.utils import SparsePlaceHolder from sparse_diffusion.analysis.dist_helper import ( compute_mmd, gaussian_emd, gaussian, emd, gaussian_tv, disc, ) from sparse_diffusion.metrics.neural_metrics import ( FIDEvaluation, MMDEvaluation, load_feature_extractor ) from sparse_diffusion.utils import SparsePlaceHolder
7,134
def eval_fraction_isomorphic(fake_graphs, train_graphs): count = 0 for fake_g in fake_graphs: for train_g in train_graphs: if nx.faster_could_be_isomorphic(fake_g, train_g): if nx.is_isomorphic(fake_g, train_g): count += 1 break return count / float(len(fake_graphs)) def eval_fraction_unique(fake_graphs, precise=False): count_non_unique = 0 fake_evaluated = [] for fake_g in fake_graphs: unique = True if not fake_g.number_of_nodes() == 0: for fake_old in fake_evaluated: if precise: if nx.faster_could_be_isomorphic(fake_g, fake_old): if nx.is_isomorphic(fake_g, fake_old): count_non_unique += 1 unique = False break else: if nx.faster_could_be_isomorphic(fake_g, fake_old): if nx.could_be_isomorphic(fake_g, fake_old): count_non_unique += 1 unique = False break if unique: fake_evaluated.append(fake_g) frac_unique = (float(len(fake_graphs)) - count_non_unique) / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs return frac_unique def eval_fraction_unique_non_isomorphic_valid( fake_graphs, train_graphs, validity_func=(lambda x: True) ): count_valid = 0 count_isomorphic = 0 count_non_unique = 0 fake_evaluated = [] for fake_g in fake_graphs: unique = True for fake_old in fake_evaluated: if nx.faster_could_be_isomorphic(fake_g, fake_old): if nx.is_isomorphic(fake_g, fake_old): count_non_unique += 1 unique = False break if unique: fake_evaluated.append(fake_g) non_isomorphic = True for train_g in train_graphs: if nx.faster_could_be_isomorphic(fake_g, train_g): if nx.is_isomorphic(fake_g, train_g): count_isomorphic += 1 non_isomorphic = False break if non_isomorphic: if validity_func(fake_g): count_valid += 1 frac_unique = (float(len(fake_graphs)) - count_non_unique) / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs frac_unique_non_isomorphic = ( float(len(fake_graphs)) - count_non_unique - count_isomorphic ) / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs that are not in the training set frac_unique_non_isomorphic_valid = count_valid / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs that are not in the training set and are valid return frac_unique, frac_unique_non_isomorphic, frac_unique_non_isomorphic_valid class SpectreSamplingMetrics(nn.Module): def __init__(self, dataloaders, compute_emd, metrics_list): super().__init__() self.train_graphs = self.loader_to_nx(dataloaders["train"]) self.val_graphs = self.loader_to_nx(dataloaders["val"]) self.test_graphs = self.loader_to_nx(dataloaders["test"]) self.num_graphs_test = len(self.test_graphs) self.num_graphs_val = len(self.val_graphs) print('num_train_graphs is', len(self.train_graphs)) print('num_graphs_test is', self.num_graphs_test) print('num_graphs_val is', self.num_graphs_val) self.compute_emd = compute_emd self.metrics_list = metrics_list def loader_to_nx(self, loader): networkx_graphs = [] for i, batch in enumerate(loader): data_list = batch.to_data_list() for j, data in enumerate(data_list): networkx_graphs.append( to_networkx( data, node_attrs=None, edge_attrs=None, to_undirected=True, remove_self_loops=True, ) ) return networkx_graphs def neural_metrics(self, generated): # Neural metrics gin_model = load_feature_extractor(device='cpu') # take a gin-model with predefined params and random weights fid_evaluator = FIDEvaluation(model=gin_model)
############################################################################### # # Adapted from https://github.com/lrjconan/GRAN/ which in turn is adapted from https://github.com/JiaxuanYou/graph-generation # ############################################################################### ##Navigate to the ./util/orca directory and compile orca.cpp # g++ -O2 -std=c++11 -o orca orca.cpp try: except ModuleNotFoundError: print("Graph tool could not be loaded") PRINT_TIME = False __all__ = [ "degree_stats", "clustering_stats", "orbit_stats_all", "spectral_stats", "eval_acc_lobster_graph", ] def degree_worker(G): return np.array(nx.degree_histogram(G)) def degree_stats(graph_ref_list, graph_pred_list, is_parallel=True, compute_emd=False): """Compute the distance between the degree distributions of two unordered sets of graphs. Args: graph_ref_list, graph_target_list: two lists of networkx graphs to be evaluated """ sample_ref = [] sample_pred = [] # in case an empty graph is generated graph_pred_list_remove_empty = [ G for G in graph_pred_list if not G.number_of_nodes() == 0 ] prev = datetime.now() if is_parallel: with concurrent.futures.ThreadPoolExecutor() as executor: for deg_hist in executor.map(degree_worker, graph_ref_list): sample_ref.append(deg_hist) with concurrent.futures.ThreadPoolExecutor() as executor: for deg_hist in executor.map(degree_worker, graph_pred_list_remove_empty): sample_pred.append(deg_hist) else: for i in range(len(graph_ref_list)): degree_temp = np.array(nx.degree_histogram(graph_ref_list[i])) sample_ref.append(degree_temp) for i in range(len(graph_pred_list_remove_empty)): degree_temp = np.array(nx.degree_histogram(graph_pred_list_remove_empty[i])) sample_pred.append(degree_temp) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_emd) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd) if compute_emd: # EMD option uses the same computation as GraphRNN, the alternative is MMD as computed by GRAN # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd) mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_emd) else: mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_tv) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian) elapsed = datetime.now() - prev if PRINT_TIME: print("Time computing degree mmd: ", elapsed) return mmd_dist ############################################################################### def spectral_worker(G, n_eigvals=-1): # eigs = nx.laplacian_spectrum(G) try: eigs = eigvalsh(nx.normalized_laplacian_matrix(G).todense()) except: eigs = np.zeros(G.number_of_nodes()) if n_eigvals > 0: eigs = eigs[1 : n_eigvals + 1] spectral_pmf, _ = np.histogram(eigs, bins=200, range=(-1e-5, 2), density=False) spectral_pmf = spectral_pmf / spectral_pmf.sum() return spectral_pmf def get_spectral_pmf(eigs, max_eig): spectral_pmf, _ = np.histogram( np.clip(eigs, 0, max_eig), bins=200, range=(-1e-5, max_eig), density=False ) spectral_pmf = spectral_pmf / spectral_pmf.sum() return spectral_pmf def eigval_stats( eig_ref_list, eig_pred_list, max_eig=20, is_parallel=True, compute_emd=False ): """Compute the distance between the degree distributions of two unordered sets of graphs. Args: graph_ref_list, graph_target_list: two lists of networkx graphs to be evaluated """ sample_ref = [] sample_pred = [] prev = datetime.now() if is_parallel: with concurrent.futures.ThreadPoolExecutor() as executor: for spectral_density in executor.map( get_spectral_pmf, eig_ref_list, [max_eig for i in range(len(eig_ref_list))], ): sample_ref.append(spectral_density) with concurrent.futures.ThreadPoolExecutor() as executor: for spectral_density in executor.map( get_spectral_pmf, eig_pred_list, [max_eig for i in range(len(eig_ref_list))], ): sample_pred.append(spectral_density) else: for i in range(len(eig_ref_list)): spectral_temp = get_spectral_pmf(eig_ref_list[i]) sample_ref.append(spectral_temp) for i in range(len(eig_pred_list)): spectral_temp = get_spectral_pmf(eig_pred_list[i]) sample_pred.append(spectral_temp) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_emd) if compute_emd: mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd) else: mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_tv) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian) elapsed = datetime.now() - prev if PRINT_TIME: print("Time computing eig mmd: ", elapsed) return mmd_dist def eigh_worker(G): L = nx.normalized_laplacian_matrix(G).todense() try: eigvals, eigvecs = np.linalg.eigh(L) except: eigvals = np.zeros(L[0, :].shape) eigvecs = np.zeros(L.shape) return (eigvals, eigvecs) def compute_list_eigh(graph_list, is_parallel=False): eigval_list = [] eigvec_list = [] if is_parallel: with concurrent.futures.ThreadPoolExecutor() as executor: for e_U in executor.map(eigh_worker, graph_list): eigval_list.append(e_U[0]) eigvec_list.append(e_U[1]) else: for i in range(len(graph_list)): e_U = eigh_worker(graph_list[i]) eigval_list.append(e_U[0]) eigvec_list.append(e_U[1]) return eigval_list, eigvec_list def get_spectral_filter_worker(eigvec, eigval, filters, bound=1.4): ges = filters.evaluate(eigval) linop = [] for ge in ges: linop.append(eigvec @ np.diag(ge) @ eigvec.T) linop = np.array(linop) norm_filt = np.sum(linop**2, axis=2) hist_range = [0, bound] hist = np.array( [np.histogram(x, range=hist_range, bins=100)[0] for x in norm_filt] ) # NOTE: change number of bins return hist.flatten() def spectral_filter_stats( eigvec_ref_list, eigval_ref_list, eigvec_pred_list, eigval_pred_list, is_parallel=False, compute_emd=False, ): """Compute the distance between the eigvector sets. Args: graph_ref_list, graph_target_list: two lists of networkx graphs to be evaluated """ prev = datetime.now() class DMG(object): """Dummy Normalized Graph""" lmax = 2 n_filters = 12 filters = pg.filters.Abspline(DMG, n_filters) bound = np.max(filters.evaluate(np.arange(0, 2, 0.01))) sample_ref = [] sample_pred = [] if is_parallel: with concurrent.futures.ThreadPoolExecutor() as executor: for spectral_density in executor.map( get_spectral_filter_worker, eigvec_ref_list, eigval_ref_list, [filters for i in range(len(eigval_ref_list))], [bound for i in range(len(eigval_ref_list))], ): sample_ref.append(spectral_density) with concurrent.futures.ThreadPoolExecutor() as executor: for spectral_density in executor.map( get_spectral_filter_worker, eigvec_pred_list, eigval_pred_list, [filters for i in range(len(eigval_ref_list))], [bound for i in range(len(eigval_ref_list))], ): sample_pred.append(spectral_density) else: for i in range(len(eigval_ref_list)): try: spectral_temp = get_spectral_filter_worker( eigvec_ref_list[i], eigval_ref_list[i], filters, bound ) sample_ref.append(spectral_temp) except: pass for i in range(len(eigval_pred_list)): try: spectral_temp = get_spectral_filter_worker( eigvec_pred_list[i], eigval_pred_list[i], filters, bound ) sample_pred.append(spectral_temp) except: pass if compute_emd: # EMD option uses the same computation as GraphRNN, the alternative is MMD as computed by GRAN # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd) mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_emd) else: mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_tv) elapsed = datetime.now() - prev if PRINT_TIME: print("Time computing spectral filter stats: ", elapsed) return mmd_dist def spectral_stats( graph_ref_list, graph_pred_list, is_parallel=True, n_eigvals=-1, compute_emd=False ): """Compute the distance between the degree distributions of two unordered sets of graphs. Args: graph_ref_list, graph_target_list: two lists of networkx graphs to be evaluated """ sample_ref = [] sample_pred = [] # in case an empty graph is generated graph_pred_list_remove_empty = [ G for G in graph_pred_list if not G.number_of_nodes() == 0 ] prev = datetime.now() if is_parallel: with concurrent.futures.ThreadPoolExecutor() as executor: for spectral_density in executor.map( spectral_worker, graph_ref_list, [n_eigvals for i in graph_ref_list] ): sample_ref.append(spectral_density) with concurrent.futures.ThreadPoolExecutor() as executor: for spectral_density in executor.map( spectral_worker, graph_pred_list_remove_empty, [n_eigvals for i in graph_ref_list], ): sample_pred.append(spectral_density) else: for i in range(len(graph_ref_list)): spectral_temp = spectral_worker(graph_ref_list[i], n_eigvals) sample_ref.append(spectral_temp) for i in range(len(graph_pred_list_remove_empty)): spectral_temp = spectral_worker(graph_pred_list_remove_empty[i], n_eigvals) sample_pred.append(spectral_temp) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_emd) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd) if compute_emd: # EMD option uses the same computation as GraphRNN, the alternative is MMD as computed by GRAN # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd) mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_emd) else: mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian_tv) # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=gaussian) elapsed = datetime.now() - prev if PRINT_TIME: print("Time computing degree mmd: ", elapsed) return mmd_dist ############################################################################### def clustering_worker(param): G, bins = param clustering_coeffs_list = list(nx.clustering(G).values()) hist, _ = np.histogram( clustering_coeffs_list, bins=bins, range=(0.0, 1.0), density=False ) return hist def clustering_stats( graph_ref_list, graph_pred_list, bins=100, is_parallel=True, compute_emd=False ): sample_ref = [] sample_pred = [] graph_pred_list_remove_empty = [ G for G in graph_pred_list if not G.number_of_nodes() == 0 ] prev = datetime.now() if is_parallel: with concurrent.futures.ThreadPoolExecutor() as executor: for clustering_hist in executor.map( clustering_worker, [(G, bins) for G in graph_ref_list] ): sample_ref.append(clustering_hist) with concurrent.futures.ThreadPoolExecutor() as executor: for clustering_hist in executor.map( clustering_worker, [(G, bins) for G in graph_pred_list_remove_empty] ): sample_pred.append(clustering_hist) # check non-zero elements in hist # total = 0 # for i in range(len(sample_pred)): # nz = np.nonzero(sample_pred[i])[0].shape[0] # total += nz # print(total) else: for i in range(len(graph_ref_list)): clustering_coeffs_list = list(nx.clustering(graph_ref_list[i]).values()) hist, _ = np.histogram( clustering_coeffs_list, bins=bins, range=(0.0, 1.0), density=False ) sample_ref.append(hist) for i in range(len(graph_pred_list_remove_empty)): clustering_coeffs_list = list( nx.clustering(graph_pred_list_remove_empty[i]).values() ) hist, _ = np.histogram( clustering_coeffs_list, bins=bins, range=(0.0, 1.0), density=False ) sample_pred.append(hist) if compute_emd: # EMD option uses the same computation as GraphRNN, the alternative is MMD as computed by GRAN # mmd_dist = compute_mmd(sample_ref, sample_pred, kernel=emd, sigma=1.0 / 10) mmd_dist = compute_mmd( sample_ref, sample_pred, kernel=gaussian_emd, sigma=1.0 / 10, distance_scaling=bins, ) else: mmd_dist = compute_mmd( sample_ref, sample_pred, kernel=gaussian_tv, sigma=1.0 / 10 ) elapsed = datetime.now() - prev if PRINT_TIME: print("Time computing clustering mmd: ", elapsed) return mmd_dist # maps motif/orbit name string to its corresponding list of indices from orca output motif_to_indices = { "3path": [1, 2], "4cycle": [8], } COUNT_START_STR = "orbit counts:" def edge_list_reindexed(G): idx = 0 id2idx = dict() for u in G.nodes(): id2idx[str(u)] = idx idx += 1 edges = [] for u, v in G.edges(): edges.append((id2idx[str(u)], id2idx[str(v)])) return edges def orca(graph): # tmp_fname = f'analysis/orca/tmp_{"".join(secrets.choice(ascii_uppercase + digits) for i in range(8))}.txt' tmp_fname = f'../analysis/orca/tmp_{"".join(secrets.choice(ascii_uppercase + digits) for i in range(8))}.txt' tmp_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), tmp_fname) # print(tmp_fname, flush=True) f = open(tmp_fname, "w") f.write(str(graph.number_of_nodes()) + " " + str(graph.number_of_edges()) + "\n") for u, v in edge_list_reindexed(graph): f.write(str(u) + " " + str(v) + "\n") f.close() output = sp.check_output( [ str( os.path.join( os.path.dirname(os.path.realpath(__file__)), "../analysis/orca/orca" ) ), "node", "4", tmp_fname, "std", ] ) output = output.decode("utf8").strip() idx = output.find(COUNT_START_STR) + len(COUNT_START_STR) + 2 output = output[idx:] node_orbit_counts = np.array( [ list(map(int, node_cnts.strip().split(" "))) for node_cnts in output.strip("\n").split("\n") ] ) try: os.remove(tmp_fname) except OSError: pass return node_orbit_counts def motif_stats( graph_ref_list, graph_pred_list, motif_type="4cycle", ground_truth_match=None, bins=100, compute_emd=False, ): # graph motif counts (int for each graph) # normalized by graph size total_counts_ref = [] total_counts_pred = [] num_matches_ref = [] num_matches_pred = [] graph_pred_list_remove_empty = [ G for G in graph_pred_list if not G.number_of_nodes() == 0 ] indices = motif_to_indices[motif_type] for G in graph_ref_list: orbit_counts = orca(G) motif_counts = np.sum(orbit_counts[:, indices], axis=1) if ground_truth_match is not None: match_cnt = 0 for elem in motif_counts: if elem == ground_truth_match: match_cnt += 1 num_matches_ref.append(match_cnt / G.number_of_nodes()) # hist, _ = np.histogram( # motif_counts, bins=bins, density=False) motif_temp = np.sum(motif_counts) / G.number_of_nodes() total_counts_ref.append(motif_temp) for G in graph_pred_list_remove_empty: orbit_counts = orca(G) motif_counts = np.sum(orbit_counts[:, indices], axis=1) if ground_truth_match is not None: match_cnt = 0 for elem in motif_counts: if elem == ground_truth_match: match_cnt += 1 num_matches_pred.append(match_cnt / G.number_of_nodes()) motif_temp = np.sum(motif_counts) / G.number_of_nodes() total_counts_pred.append(motif_temp) total_counts_ref = np.array(total_counts_ref)[:, None] total_counts_pred = np.array(total_counts_pred)[:, None] if compute_emd: # EMD option uses the same computation as GraphRNN, the alternative is MMD as computed by GRAN # mmd_dist = compute_mmd(total_counts_ref, total_counts_pred, kernel=emd, is_hist=False) mmd_dist = compute_mmd( total_counts_ref, total_counts_pred, kernel=gaussian, is_hist=False ) else: mmd_dist = compute_mmd( total_counts_ref, total_counts_pred, kernel=gaussian, is_hist=False ) return mmd_dist def orbit_stats_all(graph_ref_list, graph_pred_list, compute_emd=False): total_counts_ref = [] total_counts_pred = [] graph_pred_list_remove_empty = [ G for G in graph_pred_list if not G.number_of_nodes() == 0 ] for G in graph_ref_list: orbit_counts = orca(G) orbit_counts_graph = np.sum(orbit_counts, axis=0) / G.number_of_nodes() total_counts_ref.append(orbit_counts_graph) for G in graph_pred_list: orbit_counts = orca(G) orbit_counts_graph = np.sum(orbit_counts, axis=0) / G.number_of_nodes() total_counts_pred.append(orbit_counts_graph) total_counts_ref = np.array(total_counts_ref) total_counts_pred = np.array(total_counts_pred) # mmd_dist = compute_mmd( # total_counts_ref, # total_counts_pred, # kernel=gaussian, # is_hist=False, # sigma=30.0) # mmd_dist = compute_mmd( # total_counts_ref, # total_counts_pred, # kernel=gaussian_tv, # is_hist=False, # sigma=30.0) if compute_emd: # mmd_dist = compute_mmd(total_counts_ref, total_counts_pred, kernel=emd, sigma=30.0) # EMD option uses the same computation as GraphRNN, the alternative is MMD as computed by GRAN mmd_dist = compute_mmd( total_counts_ref, total_counts_pred, kernel=gaussian, is_hist=False, sigma=30.0, ) else: mmd_dist = compute_mmd( total_counts_ref, total_counts_pred, kernel=gaussian_tv, is_hist=False, sigma=30.0, ) return mmd_dist def eval_acc_lobster_graph(G_list): G_list = [copy.deepcopy(gg) for gg in G_list] count = 0 for gg in G_list: if is_lobster_graph(gg): count += 1 return count / float(len(G_list)) def eval_acc_tree_graph(G_list): count = 0 for gg in G_list: if nx.is_tree(gg): count += 1 return count / float(len(G_list)) def eval_acc_grid_graph(G_list, grid_start=10, grid_end=20): count = 0 for gg in G_list: if is_grid_graph(gg): count += 1 return count / float(len(G_list)) def eval_acc_sbm_graph( G_list, p_intra=0.3, p_inter=0.005, strict=True, refinement_steps=1000, is_parallel=True, ): count = 0.0 if is_parallel: with concurrent.futures.ThreadPoolExecutor() as executor: for prob in executor.map( is_sbm_graph, [gg for gg in G_list], [p_intra for i in range(len(G_list))], [p_inter for i in range(len(G_list))], [strict for i in range(len(G_list))], [refinement_steps for i in range(len(G_list))], ): count += prob else: for gg in G_list: count += is_sbm_graph( gg, p_intra=p_intra, p_inter=p_inter, strict=strict, refinement_steps=refinement_steps, ) return count / float(len(G_list)) def eval_acc_planar_graph(G_list): count = 0 for gg in G_list: if is_planar_graph(gg): count += 1 return count / float(len(G_list)) def is_planar_graph(G): return nx.is_connected(G) and nx.check_planarity(G)[0] def is_lobster_graph(G): """ Check a given graph is a lobster graph or not Removing leaf nodes twice: lobster -> caterpillar -> path """ ### Check if G is a tree if nx.is_tree(G): G = G.copy() ### Check if G is a path after removing leaves twice leaves = [n for n, d in G.degree() if d == 1] G.remove_nodes_from(leaves) leaves = [n for n, d in G.degree() if d == 1] G.remove_nodes_from(leaves) num_nodes = len(G.nodes()) num_degree_one = [d for n, d in G.degree() if d == 1] num_degree_two = [d for n, d in G.degree() if d == 2] if sum(num_degree_one) == 2 and sum(num_degree_two) == 2 * (num_nodes - 2): return True elif sum(num_degree_one) == 0 and sum(num_degree_two) == 0: return True else: return False else: return False def is_grid_graph(G): """ Check if the graph is grid, by comparing with all the real grids with the same node count """ all_grid_file = f"data/all_grids.pt" if os.path.isfile(all_grid_file): all_grids = torch.load(all_grid_file) else: all_grids = {} for i in range(2, 20): for j in range(2, 20): G_grid = nx.grid_2d_graph(i, j) n_nodes = f"{len(G_grid.nodes())}" all_grids[n_nodes] = all_grids.get(n_nodes, []) + [G_grid] torch.save(all_grids, all_grid_file) n_nodes = f"{len(G.nodes())}" if n_nodes in all_grids: for G_grid in all_grids[n_nodes]: if nx.faster_could_be_isomorphic(G, G_grid): if nx.is_isomorphic(G, G_grid): return True return False else: return False def is_sbm_graph(G, p_intra=0.3, p_inter=0.005, strict=True, refinement_steps=1000): """ Check if how closely given graph matches a SBM with given probabilites by computing mean probability of Wald test statistic for each recovered parameter """ adj = nx.adjacency_matrix(G).toarray() idx = adj.nonzero() g = gt.Graph() g.add_edge_list(np.transpose(idx)) try: state = gt.minimize_blockmodel_dl(g) except ValueError: if strict: return False else: return 0.0 # Refine using merge-split MCMC for i in range(refinement_steps): state.multiflip_mcmc_sweep(beta=np.inf, niter=10) b = state.get_blocks() b = gt.contiguous_map(state.get_blocks()) state = state.copy(b=b) e = state.get_matrix() n_blocks = state.get_nonempty_B() node_counts = state.get_nr().get_array()[:n_blocks] edge_counts = e.todense()[:n_blocks, :n_blocks] if strict: if ( (node_counts > 40).sum() > 0 or (node_counts < 20).sum() > 0 or n_blocks > 5 or n_blocks < 2 ): return False max_intra_edges = node_counts * (node_counts - 1) est_p_intra = np.diagonal(edge_counts) / (max_intra_edges + 1e-6) max_inter_edges = node_counts.reshape((-1, 1)) @ node_counts.reshape((1, -1)) np.fill_diagonal(edge_counts, 0) est_p_inter = edge_counts / (max_inter_edges + 1e-6) W_p_intra = (est_p_intra - p_intra) ** 2 / (est_p_intra * (1 - est_p_intra) + 1e-6) W_p_inter = (est_p_inter - p_inter) ** 2 / (est_p_inter * (1 - est_p_inter) + 1e-6) W = W_p_inter.copy() np.fill_diagonal(W, W_p_intra) p = 1 - chi2.cdf(abs(W), 1) p = p.mean() if strict: return p > 0.9 # p value < 10 % else: return p def eval_fraction_isomorphic(fake_graphs, train_graphs): count = 0 for fake_g in fake_graphs: for train_g in train_graphs: if nx.faster_could_be_isomorphic(fake_g, train_g): if nx.is_isomorphic(fake_g, train_g): count += 1 break return count / float(len(fake_graphs)) def eval_fraction_unique(fake_graphs, precise=False): count_non_unique = 0 fake_evaluated = [] for fake_g in fake_graphs: unique = True if not fake_g.number_of_nodes() == 0: for fake_old in fake_evaluated: if precise: if nx.faster_could_be_isomorphic(fake_g, fake_old): if nx.is_isomorphic(fake_g, fake_old): count_non_unique += 1 unique = False break else: if nx.faster_could_be_isomorphic(fake_g, fake_old): if nx.could_be_isomorphic(fake_g, fake_old): count_non_unique += 1 unique = False break if unique: fake_evaluated.append(fake_g) frac_unique = (float(len(fake_graphs)) - count_non_unique) / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs return frac_unique def eval_fraction_unique_non_isomorphic_valid( fake_graphs, train_graphs, validity_func=(lambda x: True) ): count_valid = 0 count_isomorphic = 0 count_non_unique = 0 fake_evaluated = [] for fake_g in fake_graphs: unique = True for fake_old in fake_evaluated: if nx.faster_could_be_isomorphic(fake_g, fake_old): if nx.is_isomorphic(fake_g, fake_old): count_non_unique += 1 unique = False break if unique: fake_evaluated.append(fake_g) non_isomorphic = True for train_g in train_graphs: if nx.faster_could_be_isomorphic(fake_g, train_g): if nx.is_isomorphic(fake_g, train_g): count_isomorphic += 1 non_isomorphic = False break if non_isomorphic: if validity_func(fake_g): count_valid += 1 frac_unique = (float(len(fake_graphs)) - count_non_unique) / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs frac_unique_non_isomorphic = ( float(len(fake_graphs)) - count_non_unique - count_isomorphic ) / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs that are not in the training set frac_unique_non_isomorphic_valid = count_valid / float( len(fake_graphs) ) # Fraction of distinct isomorphism classes in the fake graphs that are not in the training set and are valid return frac_unique, frac_unique_non_isomorphic, frac_unique_non_isomorphic_valid class SpectreSamplingMetrics(nn.Module): def __init__(self, dataloaders, compute_emd, metrics_list): super().__init__() self.train_graphs = self.loader_to_nx(dataloaders["train"]) self.val_graphs = self.loader_to_nx(dataloaders["val"]) self.test_graphs = self.loader_to_nx(dataloaders["test"]) self.num_graphs_test = len(self.test_graphs) self.num_graphs_val = len(self.val_graphs) print('num_train_graphs is', len(self.train_graphs)) print('num_graphs_test is', self.num_graphs_test) print('num_graphs_val is', self.num_graphs_val) self.compute_emd = compute_emd self.metrics_list = metrics_list def loader_to_nx(self, loader): networkx_graphs = [] for i, batch in enumerate(loader): data_list = batch.to_data_list() for j, data in enumerate(data_list): networkx_graphs.append( to_networkx( data, node_attrs=None, edge_attrs=None, to_undirected=True, remove_self_loops=True, ) ) return networkx_graphs def neural_metrics(self, generated): # Neural metrics gin_model = load_feature_extractor(device='cpu') # take a gin-model with predefined params and random weights fid_evaluator = FIDEvaluation(model=gin_model)
rbf_evaluator = MMDEvaluation(model=gin_model, kernel='rbf', sigma='range', multiplier='mean')
8
2023-10-30 12:12:16+00:00
8k
cxyfreedom/website-hot-hub
main.py
[ { "identifier": "WebsiteSSPai", "path": "website_sspai.py", "snippet": "class WebsiteSSPai:\n @staticmethod\n def get_raw() -> dict:\n ret = {}\n try:\n with request_session() as s:\n resp = s.get(url)\n ret = resp.json()\n except:\n logger.exception(\"get data failed\")\n return ret\n\n @staticmethod\n def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:\n ret: typing.List[typing.Dict[str, typing.Any]] = []\n for item in raw_data[\"data\"]:\n ret.append(\n {\n \"title\": item[\"title\"],\n \"url\": f\"https://sspai.com/post/{item['id']}\",\n }\n )\n return ret\n\n @staticmethod\n def read_already_download(\n full_path: str,\n ) -> typing.List[typing.Dict[str, typing.Any]]:\n content: typing.List[typing.Dict[str, typing.Any]] = []\n if pathlib.Path(full_path).exists():\n with open(full_path) as fd:\n content = json.loads(fd.read())\n return content\n\n @staticmethod\n def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n topics = []\n template = \"\"\"<!-- BEGIN SSPAI -->\n<!-- 最后更新时间 {update_time} -->\n{topics}\n<!-- END SSPAI -->\"\"\"\n\n for item in content:\n topics.append(f\"1. [{item['title']}]({item['url']})\")\n template = template.replace(\"{update_time}\", current_time())\n template = template.replace(\"{topics}\", \"\\n\".join(topics))\n return template\n\n @staticmethod\n def create_raw(full_path: str, raw: str) -> None:\n write_text_file(full_path, raw)\n\n @staticmethod\n def merge_data(\n cur: typing.List[typing.Dict[str, typing.Any]],\n another: typing.List[typing.Dict[str, typing.Any]],\n ):\n merged_dict: typing.Dict[str, typing.Any] = {}\n for item in chain(cur, another):\n merged_dict[item[\"url\"]] = item[\"title\"]\n\n return [{\"url\": k, \"title\": v} for k, v in merged_dict.items()]\n\n def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n with open(\"./README.md\", \"r\") as fd:\n readme = fd.read()\n return re.sub(\n r\"<!-- BEGIN SSPAI -->[\\W\\w]*<!-- END SSPAI -->\",\n self.create_list(content),\n readme,\n )\n\n def create_archive(\n self, content: typing.List[typing.Dict[str, typing.Any]], date: str\n ) -> str:\n return f\"# {date}\\n\\n共 {len(content)} 条\\n\\n{self.create_list(content)}\"\n\n def run(self):\n dir_name = \"sspai\"\n\n raw_data = self.get_raw()\n cleaned_data = self.clean_raw(raw_data)\n\n cur_date = current_date()\n # 写入原始数据\n raw_path = f\"./raw/{dir_name}/{cur_date}.json\"\n already_download_data = self.read_already_download(raw_path)\n merged_data = self.merge_data(cleaned_data, already_download_data)\n\n self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))\n\n # 更新 README\n readme_text = self.update_readme(merged_data)\n readme_path = \"./README.md\"\n write_text_file(readme_path, readme_text)\n\n # 更新 archive\n archive_text = self.create_archive(merged_data, cur_date)\n archive_path = f\"./archives/{dir_name}/{cur_date}.md\"\n write_text_file(archive_path, archive_text)" }, { "identifier": "WebSite36Kr", "path": "website_36kr.py", "snippet": "class WebSite36Kr:\n @staticmethod\n def get_raw() -> dict:\n ret = {}\n try:\n payload = {\n \"partner_id\": \"wap\",\n \"param\": {\"siteId\": 1, \"platformId\": 2},\n \"timestamp\": int(time.time()),\n }\n with request_session() as s:\n resp = s.post(url, json=payload)\n ret = resp.json()\n except:\n logger.exception(\"get data failed\")\n return ret\n\n @staticmethod\n def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:\n ret: typing.List[typing.Dict[str, typing.Any]] = []\n for item in raw_data[\"data\"][\"hotRankList\"]:\n ret.append(\n {\n \"title\": item[\"templateMaterial\"][\"widgetTitle\"],\n \"url\": f\"https://36kr.com/p/{item['itemId']}\",\n }\n )\n return ret\n\n @staticmethod\n def read_already_download(\n full_path: str,\n ) -> typing.List[typing.Dict[str, typing.Any]]:\n content: typing.List[typing.Dict[str, typing.Any]] = []\n if pathlib.Path(full_path).exists():\n with open(full_path) as fd:\n content = json.loads(fd.read())\n return content\n\n @staticmethod\n def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n topics = []\n template = \"\"\"<!-- BEGIN 36KR -->\n<!-- 最后更新时间 {update_time} -->\n{topics}\n<!-- END 36KR -->\"\"\"\n\n for item in content:\n topics.append(f\"1. [{item['title']}]({item['url']})\")\n template = template.replace(\"{update_time}\", current_time())\n template = template.replace(\"{topics}\", \"\\n\".join(topics))\n return template\n\n @staticmethod\n def create_raw(full_path: str, raw: str) -> None:\n write_text_file(full_path, raw)\n\n @staticmethod\n def merge_data(\n cur: typing.List[typing.Dict[str, typing.Any]],\n another: typing.List[typing.Dict[str, typing.Any]],\n ):\n merged_dict: typing.Dict[str, typing.Any] = {}\n for item in chain(cur, another):\n merged_dict[item[\"url\"]] = item[\"title\"]\n\n return [{\"url\": k, \"title\": v} for k, v in merged_dict.items()]\n\n def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n with open(\"./README.md\", \"r\") as fd:\n readme = fd.read()\n return re.sub(\n r\"<!-- BEGIN 36KR -->[\\W\\w]*<!-- END 36KR -->\",\n self.create_list(content),\n readme,\n )\n\n def create_archive(\n self, content: typing.List[typing.Dict[str, typing.Any]], date: str\n ) -> str:\n return f\"# {date}\\n\\n共 {len(content)} 条\\n\\n{self.create_list(content)}\"\n\n def run(self):\n dir_name = \"36kr\"\n\n raw_data = self.get_raw()\n cleaned_data = self.clean_raw(raw_data)\n\n cur_date = current_date()\n # 写入原始数据\n raw_path = f\"./raw/{dir_name}/{cur_date}.json\"\n already_download_data = self.read_already_download(raw_path)\n merged_data = self.merge_data(cleaned_data, already_download_data)\n\n self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))\n\n # 更新 README\n readme_text = self.update_readme(merged_data)\n readme_path = \"./README.md\"\n write_text_file(readme_path, readme_text)\n\n # 更新 archive\n archive_text = self.create_archive(merged_data, cur_date)\n archive_path = f\"./archives/{dir_name}/{cur_date}.md\"\n write_text_file(archive_path, archive_text)" }, { "identifier": "WebSiteBilibili", "path": "website_bilibili.py", "snippet": "class WebSiteBilibili:\n @staticmethod\n def get_raw() -> dict:\n ret = {}\n try:\n with request_session() as s:\n resp = s.get(url)\n ret = resp.json()\n except:\n logger.exception(\"get data failed\")\n return ret\n\n @staticmethod\n def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:\n ret: typing.List[typing.Dict[str, typing.Any]] = []\n for item in raw_data[\"data\"][\"list\"]:\n ret.append(\n {\n \"title\": item[\"title\"],\n \"url\": item[\"short_link_v2\"] or f\"https://b23.tv/{item['bvid']}\",\n }\n )\n return ret\n\n @staticmethod\n def read_already_download(\n full_path: str,\n ) -> typing.List[typing.Dict[str, typing.Any]]:\n content: typing.List[typing.Dict[str, typing.Any]] = []\n if pathlib.Path(full_path).exists():\n with open(full_path) as fd:\n content = json.loads(fd.read())\n return content\n\n @staticmethod\n def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n topics = []\n template = \"\"\"<!-- BEGIN BILIBILI -->\n<!-- 最后更新时间 {update_time} -->\n{topics}\n<!-- END BILIBILI -->\"\"\"\n\n for item in content:\n topics.append(f\"1. [{item['title']}]({item['url']})\")\n template = template.replace(\"{update_time}\", current_time())\n template = template.replace(\"{topics}\", \"\\n\".join(topics))\n return template\n\n @staticmethod\n def create_raw(full_path: str, raw: str) -> None:\n write_text_file(full_path, raw)\n\n @staticmethod\n def merge_data(\n cur: typing.List[typing.Dict[str, typing.Any]],\n another: typing.List[typing.Dict[str, typing.Any]],\n ):\n merged_dict: typing.Dict[str, typing.Any] = {}\n for item in chain(cur, another):\n merged_dict[item[\"url\"]] = item[\"title\"]\n\n return [{\"url\": k, \"title\": v} for k, v in merged_dict.items()]\n\n def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n with open(\"./README.md\", \"r\") as fd:\n readme = fd.read()\n return re.sub(\n r\"<!-- BEGIN BILIBILI -->[\\W\\w]*<!-- END BILIBILI -->\",\n self.create_list(content),\n readme,\n )\n\n def create_archive(\n self, content: typing.List[typing.Dict[str, typing.Any]], date: str\n ) -> str:\n return f\"# {date}\\n\\n共 {len(content)} 条\\n\\n{self.create_list(content)}\"\n\n def run(self):\n dir_name = \"bilibili\"\n\n raw_data = self.get_raw()\n cleaned_data = self.clean_raw(raw_data)\n\n cur_date = current_date()\n # 写入原始数据\n raw_path = f\"./raw/{dir_name}/{cur_date}.json\"\n already_download_data = self.read_already_download(raw_path)\n merged_data = self.merge_data(cleaned_data, already_download_data)\n\n self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))\n\n # 更新 README\n readme_text = self.update_readme(merged_data)\n readme_path = \"./README.md\"\n write_text_file(readme_path, readme_text)\n\n # 更新 archive\n archive_text = self.create_archive(merged_data, cur_date)\n archive_path = f\"./archives/{dir_name}/{cur_date}.md\"\n write_text_file(archive_path, archive_text)" }, { "identifier": "WebSiteDouYin", "path": "website_douyin.py", "snippet": "class WebSiteDouYin:\n\n @staticmethod\n def get_raw(cookie: str) -> dict:\n ret = {}\n try:\n with request_session() as s:\n s.headers.update({\"Cookie\": f\"passport_csrf_token={cookie}\"})\n resp = s.get(url)\n ret = resp.json()\n except:\n logger.exception(\"get data failed\")\n return ret\n\n @staticmethod\n def get_cookie() -> str:\n cookie_data = \"\"\n try:\n with request_session() as s:\n resp = s.get(cookie_url)\n regex = re.compile(r\"passport_csrf_token=(.*?); Path=/;\")\n result = re.match(regex, resp.headers[\"Set-Cookie\"])\n if result:\n cookie_data = result.group(1)\n return cookie_data\n except:\n logger.exception(\"get douyin cookie failed\")\n return cookie_data\n\n @staticmethod\n def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:\n ret: typing.List[typing.Dict[str, typing.Any]] = []\n for item in raw_data[\"data\"][\"word_list\"]:\n ret.append(\n {\n \"title\": item[\"word\"],\n \"url\": f\"https://www.douyin.com/hot/{item['sentence_id']}\",\n }\n )\n return ret\n\n @staticmethod\n def read_already_download(\n full_path: str,\n ) -> typing.List[typing.Dict[str, typing.Any]]:\n content: typing.List[typing.Dict[str, typing.Any]] = []\n if pathlib.Path(full_path).exists():\n with open(full_path) as fd:\n content = json.loads(fd.read())\n return content\n\n @staticmethod\n def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n topics = []\n template = \"\"\"<!-- BEGIN DOUYIN -->\n<!-- 最后更新时间 {update_time} -->\n{topics}\n<!-- END DOUYIN -->\"\"\"\n\n for item in content:\n topics.append(f\"1. [{item['title']}]({item['url']})\")\n template = template.replace(\"{update_time}\", current_time())\n template = template.replace(\"{topics}\", \"\\n\".join(topics))\n return template\n\n @staticmethod\n def create_raw(full_path: str, raw: str) -> None:\n write_text_file(full_path, raw)\n\n @staticmethod\n def merge_data(\n cur: typing.List[typing.Dict[str, typing.Any]],\n another: typing.List[typing.Dict[str, typing.Any]],\n ):\n merged_dict: typing.Dict[str, typing.Any] = {}\n for item in chain(cur, another):\n merged_dict[item[\"url\"]] = item[\"title\"]\n\n return [{\"url\": k, \"title\": v} for k, v in merged_dict.items()]\n\n def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n with open(\"./README.md\", \"r\") as fd:\n readme = fd.read()\n return re.sub(\n r\"<!-- BEGIN DOUYIN -->[\\W\\w]*<!-- END DOUYIN -->\",\n self.create_list(content),\n readme,\n )\n\n def create_archive(\n self, content: typing.List[typing.Dict[str, typing.Any]], date: str\n ) -> str:\n return f\"# {date}\\n\\n共 {len(content)} 条\\n\\n{self.create_list(content)}\"\n\n def run(self):\n dir_name = \"douyin\"\n cookie_data = self.get_cookie()\n\n raw_data = self.get_raw(cookie_data)\n cleaned_data = self.clean_raw(raw_data)\n\n cur_date = current_date()\n # 写入原始数据\n raw_path = f\"./raw/{dir_name}/{cur_date}.json\"\n already_download_data = self.read_already_download(raw_path)\n merged_data = self.merge_data(cleaned_data, already_download_data)\n\n self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))\n\n # 更新 README\n readme_text = self.update_readme(merged_data)\n readme_path = \"./README.md\"\n write_text_file(readme_path, readme_text)\n\n # 更新 archive\n archive_text = self.create_archive(merged_data, cur_date)\n archive_path = f\"./archives/{dir_name}/{cur_date}.md\"\n write_text_file(archive_path, archive_text)" }, { "identifier": "WebSiteJueJin", "path": "website_juejin.py", "snippet": "class WebSiteJueJin:\n @staticmethod\n def get_raw() -> dict:\n ret = {}\n try:\n with request_session() as s:\n resp = s.get(url)\n ret = resp.json()\n except:\n logger.exception(\"get data failed\")\n return ret\n\n @staticmethod\n def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:\n ret: typing.List[typing.Dict[str, typing.Any]] = []\n for item in raw_data[\"data\"]:\n ret.append(\n {\n \"title\": item[\"content\"][\"title\"],\n \"url\": f\"https://juejin.cn/post/{item['content']['content_id']}\",\n }\n )\n return ret\n\n @staticmethod\n def read_already_download(\n full_path: str,\n ) -> typing.List[typing.Dict[str, typing.Any]]:\n content: typing.List[typing.Dict[str, typing.Any]] = []\n if pathlib.Path(full_path).exists():\n with open(full_path) as fd:\n content = json.loads(fd.read())\n return content\n\n @staticmethod\n def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n topics = []\n template = \"\"\"<!-- BEGIN JUEJIN -->\n<!-- 最后更新时间 {update_time} -->\n{topics}\n<!-- END JUEJIN -->\"\"\"\n\n for item in content:\n topics.append(f\"1. [{item['title']}]({item['url']})\")\n template = template.replace(\"{update_time}\", current_time())\n template = template.replace(\"{topics}\", \"\\n\".join(topics))\n return template\n\n @staticmethod\n def create_raw(full_path: str, raw: str) -> None:\n write_text_file(full_path, raw)\n\n @staticmethod\n def merge_data(\n cur: typing.List[typing.Dict[str, typing.Any]],\n another: typing.List[typing.Dict[str, typing.Any]],\n ):\n merged_dict: typing.Dict[str, typing.Any] = {}\n for item in chain(cur, another):\n merged_dict[item[\"url\"]] = item[\"title\"]\n\n return [{\"url\": k, \"title\": v} for k, v in merged_dict.items()]\n\n def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n with open(\"./README.md\", \"r\") as fd:\n readme = fd.read()\n return re.sub(\n r\"<!-- BEGIN JUEJIN -->[\\W\\w]*<!-- END JUEJIN -->\",\n self.create_list(content),\n readme,\n )\n\n def create_archive(\n self, content: typing.List[typing.Dict[str, typing.Any]], date: str\n ) -> str:\n return f\"# {date}\\n\\n共 {len(content)} 条\\n\\n{self.create_list(content)}\"\n\n def run(self):\n dir_name = \"juejin\"\n\n raw_data = self.get_raw()\n cleaned_data = self.clean_raw(raw_data)\n\n cur_date = current_date()\n # 写入原始数据\n raw_path = f\"./raw/{dir_name}/{cur_date}.json\"\n already_download_data = self.read_already_download(raw_path)\n merged_data = self.merge_data(cleaned_data, already_download_data)\n\n self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))\n\n # 更新 README\n readme_text = self.update_readme(merged_data)\n readme_path = \"./README.md\"\n write_text_file(readme_path, readme_text)\n\n # 更新 archive\n archive_text = self.create_archive(merged_data, cur_date)\n archive_path = f\"./archives/{dir_name}/{cur_date}.md\"\n write_text_file(archive_path, archive_text)" }, { "identifier": "WebsiteWeRead", "path": "website_weread.py", "snippet": "class WebsiteWeRead:\n @staticmethod\n def get_raw() -> dict:\n ret = {}\n try:\n with request_session() as s:\n resp = s.get(url)\n ret = resp.json()\n except:\n logger.exception(\"get data failed\")\n return ret\n\n @staticmethod\n def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:\n ret: typing.List[typing.Dict[str, typing.Any]] = []\n for item in raw_data[\"books\"]:\n ret.append(\n {\n \"title\": item[\"bookInfo\"][\"title\"],\n \"url\": f\"https://weread.qq.com/web/bookDetail/{get_weread_id(item['bookInfo']['bookId'])}\",\n }\n )\n return ret\n\n @staticmethod\n def read_already_download(\n full_path: str,\n ) -> typing.List[typing.Dict[str, typing.Any]]:\n content: typing.List[typing.Dict[str, typing.Any]] = []\n if pathlib.Path(full_path).exists():\n with open(full_path) as fd:\n content = json.loads(fd.read())\n return content\n\n @staticmethod\n def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n topics = []\n template = \"\"\"<!-- BEGIN WEREAD -->\n<!-- 最后更新时间 {update_time} -->\n{topics}\n<!-- END WEREAD -->\"\"\"\n\n for item in content:\n topics.append(f\"1. [{item['title']}]({item['url']})\")\n template = template.replace(\"{update_time}\", current_time())\n template = template.replace(\"{topics}\", \"\\n\".join(topics))\n return template\n\n @staticmethod\n def create_raw(full_path: str, raw: str) -> None:\n write_text_file(full_path, raw)\n\n @staticmethod\n def merge_data(\n cur: typing.List[typing.Dict[str, typing.Any]],\n another: typing.List[typing.Dict[str, typing.Any]],\n ):\n merged_dict: typing.Dict[str, typing.Any] = {}\n for item in chain(cur, another):\n merged_dict[item[\"url\"]] = item[\"title\"]\n\n return [{\"url\": k, \"title\": v} for k, v in merged_dict.items()]\n\n def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n with open(\"./README.md\", \"r\") as fd:\n readme = fd.read()\n return re.sub(\n r\"<!-- BEGIN WEREAD -->[\\W\\w]*<!-- END WEREAD -->\",\n self.create_list(content),\n readme,\n )\n\n def create_archive(\n self, content: typing.List[typing.Dict[str, typing.Any]], date: str\n ) -> str:\n return f\"# {date}\\n\\n共 {len(content)} 条\\n\\n{self.create_list(content)}\"\n\n def run(self):\n dir_name = \"weread\"\n\n raw_data = self.get_raw()\n\n cleaned_data = self.clean_raw(raw_data)\n\n cur_date = current_date()\n # 写入原始数据\n raw_path = f\"./raw/{dir_name}/{cur_date}.json\"\n already_download_data = self.read_already_download(raw_path)\n merged_data = self.merge_data(cleaned_data, already_download_data)\n\n self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))\n\n # 更新 README\n readme_text = self.update_readme(merged_data)\n readme_path = \"./README.md\"\n write_text_file(readme_path, readme_text)\n\n # 更新 archive\n archive_text = self.create_archive(merged_data, cur_date)\n archive_path = f\"./archives/{dir_name}/{cur_date}.md\"\n write_text_file(archive_path, archive_text)" }, { "identifier": "WebsiteKuaiShou", "path": "website_kuaishou.py", "snippet": "class WebsiteKuaiShou:\n @staticmethod\n def get_raw() -> dict:\n ret = {}\n try:\n with request_session() as s:\n resp = s.get(url)\n content = resp.text\n regex = r\"window.__APOLLO_STATE__=(.*);\\(function\\(\\)\"\n result = re.search(regex, content, re.DOTALL)\n if result:\n ret = json.loads(result.group(1))[\"defaultClient\"]\n except:\n logger.exception(\"get data failed\")\n return ret\n\n @staticmethod\n def clean_raw(raw_data: dict) -> typing.List[typing.Dict[str, typing.Any]]:\n ret: typing.List[typing.Dict[str, typing.Any]] = []\n for item in raw_data['$ROOT_QUERY.visionHotRank({\"page\":\"home\"})'][\"items\"]:\n image = raw_data[item[\"id\"]][\"poster\"]\n _id = re.search(r\"clientCacheKey=([A-Za-z0-9]+)\", image).group(1)\n ret.append(\n {\n \"title\": raw_data[item[\"id\"]][\"name\"],\n \"url\": f\"https://www.kuaishou.com/short-video/{_id}\",\n }\n )\n return ret\n\n @staticmethod\n def read_already_download(\n full_path: str,\n ) -> typing.List[typing.Dict[str, typing.Any]]:\n content: typing.List[typing.Dict[str, typing.Any]] = []\n if pathlib.Path(full_path).exists():\n with open(full_path) as fd:\n content = json.loads(fd.read())\n return content\n\n @staticmethod\n def create_list(content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n topics = []\n template = \"\"\"<!-- BEGIN KUAISHOU -->\n<!-- 最后更新时间 {update_time} -->\n{topics}\n<!-- END KUAISHOU -->\"\"\"\n\n for item in content:\n topics.append(f\"1. [{item['title']}]({item['url']})\")\n template = template.replace(\"{update_time}\", current_time())\n template = template.replace(\"{topics}\", \"\\n\".join(topics))\n return template\n\n @staticmethod\n def create_raw(full_path: str, raw: str) -> None:\n write_text_file(full_path, raw)\n\n @staticmethod\n def merge_data(\n cur: typing.List[typing.Dict[str, typing.Any]],\n another: typing.List[typing.Dict[str, typing.Any]],\n ):\n merged_dict: typing.Dict[str, typing.Any] = {}\n for item in chain(cur, another):\n merged_dict[item[\"url\"]] = item[\"title\"]\n\n return [{\"url\": k, \"title\": v} for k, v in merged_dict.items()]\n\n def update_readme(self, content: typing.List[typing.Dict[str, typing.Any]]) -> str:\n with open(\"./README.md\", \"r\") as fd:\n readme = fd.read()\n return re.sub(\n r\"<!-- BEGIN KUAISHOU -->[\\W\\w]*<!-- END KUAISHOU -->\",\n self.create_list(content),\n readme,\n )\n\n def create_archive(\n self, content: typing.List[typing.Dict[str, typing.Any]], date: str\n ) -> str:\n return f\"# {date}\\n\\n共 {len(content)} 条\\n\\n{self.create_list(content)}\"\n\n def run(self):\n dir_name = \"kuaishou\"\n\n raw_data = self.get_raw()\n cleaned_data = self.clean_raw(raw_data)\n\n cur_date = current_date()\n # 写入原始数据\n raw_path = f\"./raw/{dir_name}/{cur_date}.json\"\n already_download_data = self.read_already_download(raw_path)\n merged_data = self.merge_data(cleaned_data, already_download_data)\n\n self.create_raw(raw_path, json.dumps(merged_data, ensure_ascii=False))\n\n # 更新 README\n readme_text = self.update_readme(merged_data)\n readme_path = \"./README.md\"\n write_text_file(readme_path, readme_text)\n\n # 更新 archive\n archive_text = self.create_archive(merged_data, cur_date)\n archive_path = f\"./archives/{dir_name}/{cur_date}.md\"\n write_text_file(archive_path, archive_text)" } ]
import concurrent.futures from website_sspai import WebsiteSSPai from website_36kr import WebSite36Kr from website_bilibili import WebSiteBilibili from website_douyin import WebSiteDouYin from website_juejin import WebSiteJueJin from website_weread import WebsiteWeRead from website_kuaishou import WebsiteKuaiShou
6,897
# -*- coding: utf-8 -*- def run_task(func, *args): with concurrent.futures.ThreadPoolExecutor() as executor: executor.submit(func, *args) def main(): website_sspai_obj = WebsiteSSPai() website_36kr_obj = WebSite36Kr() website_bilibili_obj = WebSiteBilibili() website_douyin_obj = WebSiteDouYin() website_juejin_obj = WebSiteJueJin()
# -*- coding: utf-8 -*- def run_task(func, *args): with concurrent.futures.ThreadPoolExecutor() as executor: executor.submit(func, *args) def main(): website_sspai_obj = WebsiteSSPai() website_36kr_obj = WebSite36Kr() website_bilibili_obj = WebSiteBilibili() website_douyin_obj = WebSiteDouYin() website_juejin_obj = WebSiteJueJin()
website_weread_obj = WebsiteWeRead()
5
2023-10-25 14:31:11+00:00
8k
ZhangLin-PKU/FedFTG
train.py
[ { "identifier": "util_dataset", "path": "utils/util_dataset.py", "snippet": "COLOR_MAP = ['red', 'green', 'blue', 'black', 'brown', 'purple', 'yellow', 'pink', 'cyan', 'gray']\r\nclass DatasetObject:\r\nclass Dataset(torch.utils.data.Dataset):\r\nclass DatasetFromDir(data.Dataset):\r\n def __init__(self, dataset, n_client, seed, rule, unbalanced_sgm=0, rule_arg='', data_path=''):\r\n def _get_data_info(self):\r\n def _load_data(self):\r\n def _split_data(self, clnt_data_list, trn_x, trn_y, rule, rule_arg, sgm):\r\n def _load_split_data(self, seed, rule, rule_arg, sgm):\r\n def __init__(self, data_x, data_y=True, train=False, dataset_name=''):\r\n def __len__(self):\r\n def __getitem__(self, idx):\r\ndef split_datasets(dataname, num_clients, num_class, seed, sgm, rule, alpha, data_path='./data', showfig=False):\r\n def __init__(self, img_root, img_list, label_list, transformer):\r\n def __getitem__(self, index):\r\n def __len__(self):\r\ndef show_statis(data_obj, num_clients, num_class, dataname, save_path):\r" }, { "identifier": "util_parser", "path": "utils/util_parser.py", "snippet": "MODEL_ARCH = ['resnet18']\r\nDATASET_NAME = ['CIFAR10', 'CIFAR100']\r\nRULE = ['iid', 'Dirichlet']\r\nMETHODS = ['FedAvg', 'FedProx', 'FedDyn', 'SCAFFOLD', 'MOON',\r\n 'FedFTG', 'FedProxGAN', 'FedDynGAN', 'SCAFFOLDGAN', 'MOONGAN']\r\ndef prepare_parser():\r" }, { "identifier": "model_choose_fn", "path": "models/model_choose_fn.py", "snippet": "def choose_model(model_name, **kwargs):\r\ndef choose_g_model(model_name, **kwargs):\r" }, { "identifier": "FedAvg", "path": "methods/FedAvg.py", "snippet": "def train_FedAvg(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedProx", "path": "methods/FedProx.py", "snippet": "def train_FedProx(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, mu, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "SCAFFOLD", "path": "methods/SCAFFOLD.py", "snippet": "def train_SCAFFOLD(data_obj, act_prob, learning_rate, batch_size, n_minibatch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1,\r\n global_learning_rate=1):\r" }, { "identifier": "MOON", "path": "methods/MOON.py", "snippet": "def train_MOON(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, mu, tau, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedDyn", "path": "methods/FedDyn.py", "snippet": "def train_FedDyn(data_obj, act_prob,\r\n learning_rate, batch_size, epoch, com_amount, print_per,\r\n weight_decay, model_func, init_model, alpha_coef,\r\n sch_step, sch_gamma, save_period,\r\n suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedFTG", "path": "methods/FedFTG.py", "snippet": "def train_FedFTG(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedProxGAN", "path": "methods/FedProxGAN.py", "snippet": "def train_FedProxGAN(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, mu, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "SCAFFOLDGAN", "path": "methods/SCAFFOLDGAN.py", "snippet": "def train_SCAFFOLDGAN(data_obj, act_prob, learning_rate, batch_size, n_minibatch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1,\r\n global_learning_rate=1):\r" }, { "identifier": "MOONGAN", "path": "methods/MOONGAN.py", "snippet": "def train_MOONGAN(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, mu, tau, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedDynGAN", "path": "methods/FedDynGAN.py", "snippet": "def train_FedDynGAN(data_obj, act_prob,\r\n learning_rate, batch_size, epoch, com_amount, print_per,\r\n weight_decay, model_func, init_model, init_g_model, alpha_coef,\r\n sch_step, sch_gamma, save_period,\r\n suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1):\r" } ]
from utils import util_dataset, util_parser from models import model_choose_fn from methods import FedAvg, FedProx, SCAFFOLD, MOON, FedDyn from methods import FedFTG, FedProxGAN, SCAFFOLDGAN, MOONGAN, FedDynGAN import torch import os import random import numpy as np import matplotlib.pyplot as plt
4,019
batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedavg_res[-1] elif conf['method'] == 'FedProx': print('Train with FedProx+++++++++++++++++++++++++++++++') fedprox_res = FedProx.train_FedProx(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], mu=conf['mu'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedprox_res[-1] elif conf['method'] == 'FedDyn': print('Train with FedDyn+++++++++++++++++++++++++++++++') feddyn_res = FedDyn.train_FedDyn(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, alpha_coef=conf['coef_alpha'], sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = feddyn_res[-1] elif conf['method'] == 'SCAFFOLD': print('Train with SCAFFOLD+++++++++++++++++++++++++++++++') fedscaffold_res = SCAFFOLD.train_SCAFFOLD(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], n_minibatch=conf['n_minibatch'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedscaffold_res[-1] elif conf['method'] == 'MOON': print('Train with MOON+++++++++++++++++++++++++++++++') moon_res = MOON.train_MOON(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], mu=conf['mu'], tau=conf['tau'], lr_decay_per_round=conf['lr_decay']) res_all_performance = moon_res[-1] elif conf['method'] == 'FedFTG': print('Train with FedFTG+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() fedavg_res = FedFTG.train_FedFTG(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, init_g_model=init_g_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedavg_res[-1] elif conf['method'] == 'FedProxGAN': print('Train with FedProxGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() fedprox_res = FedProxGAN.train_FedProxGAN(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, init_g_model=init_g_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], mu=conf['mu'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedprox_res[-1] elif conf['method'] == 'FedDynGAN': print('Train with FedDynGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() feddyn_res = FedDynGAN.train_FedDynGAN(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_g_model=init_g_model, init_model=init_model, alpha_coef=conf['coef_alpha'], sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = feddyn_res[-1] elif conf['method'] == 'SCAFFOLDGAN': print('Train with SCAFFOLDGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() fedscaffold_res = SCAFFOLDGAN.train_SCAFFOLDGAN(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], n_minibatch=conf['n_minibatch'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, init_g_model=init_g_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedscaffold_res[-1] elif conf['method'] == 'MOONGAN': print('Train with MOONGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func()
def run(conf): print('Init-------------------------') root_path = os.getcwd() # print(root_path) if root_path.endswith('scripts'): root_path = os.path.dirname(root_path) conf['savepath'] = os.path.join(root_path, conf['savepath'].strip()) print('Data and results save path is: ', conf['savepath']) ###################################################### # Provide reproducibility torch.manual_seed(conf['seed']) random.seed(conf['seed']) np.random.seed(conf['seed']) in_channel = 3 out_channel = 10 ###################################################### # Split the dataset data_obj = util_dataset.DatasetObject(dataset=conf['dataset'], n_client=conf['n_client'], seed=conf['seed'], rule=conf['rule'], rule_arg=conf['alpha'], unbalanced_sgm=conf['sgm'], data_path=conf['savepath'].strip()) ###################################################### # Model selection if conf['dataset'] == 'CIFAR100': out_channel = 100 in_channel = 3 g_model_arch = 'CGeneratorA' nz = 256 elif conf['dataset'] == 'CIFAR10': out_channel = 10 in_channel = 3 g_model_arch = 'CGeneratorA' nz = 100 else: raise RuntimeError('Wrong dataset or model_arch parameter setting.') if (conf['model_arch'] == 'LeNet') or (conf['model_arch'] == 'FullDNN'): model_func = lambda: model_choose_fn.choose_model(config['model_arch'], in_channel=in_channel, out_channel=out_channel) else: model_func = lambda: model_choose_fn.choose_model(config['model_arch'], num_classes=out_channel) init_model = model_func() ###################################################### # build up the saving directory if not os.path.exists( '%sModel/%s/%s_%s_init_mdl.pt' % (conf['savepath'], data_obj.name, conf['dataset'], conf['model_arch'])): if not os.path.exists('%sModel/%s/' % (conf['savepath'], data_obj.name)): print("Create a new directory") os.makedirs('%sModel/%s/' % (conf['savepath'], data_obj.name)) torch.save(init_model.state_dict(), '%sModel/%s/%s_%s_init_mdl.pt' % ( conf['savepath'], data_obj.name, conf['dataset'], conf['model_arch'])) else: # Load model init_model.load_state_dict(torch.load( '%sModel/%s/%s_%s_init_mdl.pt' % (conf['savepath'], data_obj.name, conf['dataset'], conf['model_arch']))) ###################################################### # Begin to train with the specific method res_all_performance = [] if conf['method'] == 'FedAvg': print('Train with FedAvg+++++++++++++++++++++++++++++++') fedavg_res = FedAvg.train_FedAvg(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedavg_res[-1] elif conf['method'] == 'FedProx': print('Train with FedProx+++++++++++++++++++++++++++++++') fedprox_res = FedProx.train_FedProx(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], mu=conf['mu'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedprox_res[-1] elif conf['method'] == 'FedDyn': print('Train with FedDyn+++++++++++++++++++++++++++++++') feddyn_res = FedDyn.train_FedDyn(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, alpha_coef=conf['coef_alpha'], sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = feddyn_res[-1] elif conf['method'] == 'SCAFFOLD': print('Train with SCAFFOLD+++++++++++++++++++++++++++++++') fedscaffold_res = SCAFFOLD.train_SCAFFOLD(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], n_minibatch=conf['n_minibatch'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedscaffold_res[-1] elif conf['method'] == 'MOON': print('Train with MOON+++++++++++++++++++++++++++++++') moon_res = MOON.train_MOON(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], mu=conf['mu'], tau=conf['tau'], lr_decay_per_round=conf['lr_decay']) res_all_performance = moon_res[-1] elif conf['method'] == 'FedFTG': print('Train with FedFTG+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() fedavg_res = FedFTG.train_FedFTG(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, init_g_model=init_g_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedavg_res[-1] elif conf['method'] == 'FedProxGAN': print('Train with FedProxGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() fedprox_res = FedProxGAN.train_FedProxGAN(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, init_g_model=init_g_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], mu=conf['mu'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedprox_res[-1] elif conf['method'] == 'FedDynGAN': print('Train with FedDynGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() feddyn_res = FedDynGAN.train_FedDynGAN(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], epoch=conf['localE'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_g_model=init_g_model, init_model=init_model, alpha_coef=conf['coef_alpha'], sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = feddyn_res[-1] elif conf['method'] == 'SCAFFOLDGAN': print('Train with SCAFFOLDGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func() fedscaffold_res = SCAFFOLDGAN.train_SCAFFOLDGAN(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'], batch_size=conf['bs'], n_minibatch=conf['n_minibatch'], com_amount=conf['comm_amount'], print_per=conf['print_freq'], weight_decay=conf['weight_decay'], model_func=model_func, init_model=init_model, init_g_model=init_g_model, sch_step=conf['sch_step'], sch_gamma=conf['sch_gamma'], save_period=conf['save_period'], suffix=config['model_arch'], trial=False, data_path=conf['savepath'], rand_seed=conf['seed'], lr_decay_per_round=conf['lr_decay']) res_all_performance = fedscaffold_res[-1] elif conf['method'] == 'MOONGAN': print('Train with MOONGAN+++++++++++++++++++++++++++++++') g_model_func = lambda: model_choose_fn.choose_g_model(g_model_arch, nz=nz, nc=data_obj.channels, img_size=data_obj.width, n_cls=out_channel) init_g_model = g_model_func()
moongan_res = MOONGAN.train_MOONGAN(data_obj=data_obj, act_prob=conf['active_frac'], learning_rate=conf['lr'],
11
2023-10-26 03:35:17+00:00
8k
Shou-Hsu/Report.ai
main.py
[ { "identifier": "credential_validation", "path": "utils.py", "snippet": "def credential_validation(vectorDB:str=False, temperature:float=0.1) -> None:\n from langchain.embeddings.openai import OpenAIEmbeddings\n from langchain.chat_models import AzureChatOpenAI\n from langchain.chat_models import ChatOpenAI\n\n load_dotenv()\n # validate llm\n global llm, pinecone, embeddings\n\n if os.getenv('AZURE_OPENAI_API_KEY') and os.getenv('AZURE_OPENAI_API_BASE') and os.getenv('AZURE_OPENAI_API_VERSION') and os.getenv('AZURE_DEPLOYMENT_NAME'):\n llm = AzureChatOpenAI(openai_api_key=os.getenv('AZURE_OPENAI_API_KEY'),\n openai_api_base=os.getenv('AZURE_OPENAI_API_BASE'),\n openai_api_version=os.getenv('AZURE_OPENAI_API_VERSION'), \n deployment_name=os.getenv('AZURE_DEPLOYMENT_NAME'), \n temperature=temperature, \n request_timeout=240,\n max_retries=10\n ) \n \n embeddings = OpenAIEmbeddings(openai_api_key=os.getenv('AZURE_OPENAI_API_KEY'),\n openai_api_base=os.getenv('AZURE_OPENAI_API_BASE'),\n openai_api_version=os.getenv('AZURE_OPENAI_API_VERSION'), \n deployment=os.getenv('EMBEDDING_DEPLOYMENT_NAME'))\n\n print('Initial AzureOpenAI')\n elif os.getenv('OPENAI_API_KEY'):\n llm = ChatOpenAI(\n openai_api_key=os.getenv('OPENAI_API_KEY'),\n model_name=os.getenv('MODEL_NAME'), \n temperature=temperature, \n request_timeout=240,\n )\n embeddings = OpenAIEmbeddings()\n print('Initial OpenAI')\n else:\n raise Exception('Please provide OPENAI_API_KEY')\n\n\n # validate pinecone\n if vectorDB == 'pinecone':\n import pinecone\n if os.getenv('PINECONE_API_KEY') and os.getenv('PINECONE_ENV'):\n pinecone.init(environment=os.getenv('PINECONE_ENV'))\n print('Initial Pinecone')\n else:\n raise Exception('Please provide PINECONE_API_KEY and PINECONE_ENV')" }, { "identifier": "get_file_list", "path": "utils.py", "snippet": "def get_file_list(file_path:str) -> list:\n with open(file_path) as f: lines = ''.join(f.readlines())\n \n return [line.strip() for line in lines.split(',') if line]" }, { "identifier": "validation_and_filetype_check", "path": "utils.py", "snippet": "def validation_and_filetype_check(file_path:str, output_dir:str='./docx') ->str:\n\n if not os.path.exists('./transcript'): os.mkdir('./transcript')\n if not os.path.exists('./summary'): os.mkdir('./summary')\n if not os.path.exists(output_dir): os.mkdir(output_dir)\n if not os.path.exists('./audio'): os.mkdir('./audio')\n\n # validate input is url or not\n if validators.url(str(file_path)): return 'url', file_path\n\n # validate file path is existed or not\n if os.path.exists(file_path): \n file_name = file_path.split('/')[-1].split('.')[0]\n # validate input is txt or not\n if file_path.endswith('.txt'): \n # copy file to default folder\n if not os.path.exists(f'./transcript/{file_name}.txt'):\n shutil.copyfile(file_path, f'transcript/{file_name}.txt')\n return 'transcript', file_name\n \n # validate input is wav or not\n elif file_path.endswith('.wav'): \n # copy file to default folder\n if not os.path.exists(f'./audio/{file_name}.wav'):\n shutil.copyfile(file_path, f'audio/{file_name}.wav')\n return 'audio', file_name\n \n elif file_path.endswith('.mp3'): \n # copy file to default folder\n if not os.path.exists(f'./audio/{file_name}.mp3'):\n shutil.copyfile(file_path, f'audio/{file_name}.mp3')\n return 'audio', file_name\n else:\n raise ValueError(f'Please check input type is url or txt or wav')\n \n else: raise ValueError(f'Please check {file_path} is existed or not')" }, { "identifier": "detect_language", "path": "utils.py", "snippet": "def detect_language(file_path:str) -> str:\n from langdetect import detect_langs\n file_name = file_path.split('/')[-1].split('.')[0]\n with open(file_path,'r') as f:\n text = ''.join(f.readlines())\n return file_name, str(detect_langs(text)[0]).split(':')[0]" }, { "identifier": "speech2text", "path": "s2t_whisper.py", "snippet": "def speech2text(file_path:str, model_name:str=\"tiny\", extraction:bool=False) -> dict:\n from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline\n from utils import translate_chinese, llm\n from langdetect import detect_langs\n from pydub import AudioSegment\n import json, torch\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n file_name = file_path.split('/')[-1].split('.')[0]\n file_type = file_path.split('/')[-1].split('.')[1]\n if file_type not in [\"wav\", \"mp3\"]: raise ValueError('Please make sure the audio is \"wav\" or \"mp3\"')\n\n # convert mp3 to wav\n if file_type == \"mp3\":\n audio = AudioSegment.from_mp3(f'./audio/{file_name}.mp3')\n audio.export(f'./audio/{file_name}.wav', format=\"wav\")\n os.remove(f'./audio/{file_name}.mp3')\n\n # extract voice from audio\n if extraction: extract_voice(file_name)\n\n # remove the silence of audio\n remove_silence(file_name)\n \n # convert audio to text\n print('Start convert audio to text with timestamp')\n torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32\n model = AutoModelForSpeechSeq2Seq.from_pretrained(\n f'openai/whisper-{model_name}', torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True).to(device)\n\n processor = AutoProcessor.from_pretrained(f'openai/whisper-{model_name}')\n\n pipe = pipeline(\n \"automatic-speech-recognition\",\n model=model,\n tokenizer=processor.tokenizer,\n feature_extractor=processor.feature_extractor,\n max_new_tokens=128,\n chunk_length_s=30,\n batch_size=16,\n return_timestamps=True,\n torch_dtype=torch_dtype,\n device=device,\n )\n\n result = pipe(f'./audio/{file_name}.wav')\n content = result.get('text')\n language = str(detect_langs(content)[0]).split(':')[0]\n\n # add punctuation in chinese\n if language.__contains__('zh-cn'): \n content = translate_chinese(llm, content)\n content = punctuation_zh(content)\n language = 'zh-tw'\n\n if language.__contains__('zh-tw'):\n content = punctuation_zh(content) \n\n # save the transcript\n with open(f'./transcript/{file_name}.json', 'w', encoding='utf-8') as f: json.dump(result, f, ensure_ascii=False)\n with open(f'./transcript/{file_name}.txt', 'w', encoding='utf-8') as f: f.write(content)\n\n print('Converting is complete')\n return file_name, language" }, { "identifier": "download_from_youtube", "path": "s2t_whisper.py", "snippet": "def download_from_youtube(url:str) -> str:\n import moviepy.editor as mp\n from pytube import YouTube\n\n # Download the video\n yt = YouTube(url, use_oauth=True, allow_oauth_cache=True)\n print('Start downloading')\n stream = yt.streams.filter(only_audio=True).first()\n stream.download()\n file_name = re.sub(r'[^\\w\\s]', '', yt.title).replace(' ', '_')\n os.rename(stream.default_filename, f\"./audio/{file_name}.mp4\")\n\n # Convert the video to WAV format\n clip = mp.AudioFileClip(f\"./audio/{file_name}.mp4\", fps=16000)\n clip.write_audiofile(f\"./audio/{file_name}.wav\")\n\n os.remove(f\"./audio/{file_name}.mp4\")\n print('Downloading is complete')\n\n return f\"./audio/{file_name}.wav\"" }, { "identifier": "download_from_vimeo", "path": "s2t_whisper.py", "snippet": "def download_from_vimeo(url:str) -> str:\n from vimeo_downloader import Vimeo\n import moviepy.editor as mp\n\n vimeo = Vimeo(url) \n file_name = re.sub(r'[^\\w\\s]', '', vimeo._get_meta_data()[0]['title'].replace(' ', '_'))\n vimeo.best_stream.download(download_directory='./audio', filename=file_name)\n\n # Convert the video to WAV format\n clip = mp.AudioFileClip(f\"./audio/{file_name}.mp4\", fps=16000)\n clip.write_audiofile(f\"./audio/{file_name}.wav\")\n os.remove(f\"./audio/{file_name}.mp4\")\n\n return f\"./audio/{file_name}.wav\"" }, { "identifier": "pinecone_storage", "path": "storage_vector.py", "snippet": "def pinecone_storage(file_name:str) -> None:\n from utils import pinecone, embeddings\n\n with open(f\"./summary/{file_name}.json\", \"r\") as f: summary = json.load(f)\n\n index_name = file_name.lower().replace('_','-')\n # First, check if our index already exists. If it doesn't, we create it\n if index_name not in pinecone.list_indexes():\n pinecone.create_index(\n name=index_name,\n metric='cosine',\n dimension=1536 \n )\n\n for subtopic in summary['Subtopics']:\n content = \"\"\n for key, value in subtopic.items():\n if key != 'timestamp':\n content += f'{key}: {\"\".join(value)}\\n'\n doc = Document(page_content=content , metadata={\"source\": file_name})\n Pinecone.from_documents([doc], embeddings, index_name=index_name)" }, { "identifier": "chroma_storage", "path": "storage_vector.py", "snippet": "def chroma_storage(file_name:str, collection_name:str='my-collection', persist_directory:str='chroma_db') -> None:\n from langchain.vectorstores import Chroma\n from dotenv import load_dotenv\n from utils import embeddings\n\n load_dotenv()\n if not os.path.exists(os.getenv('PERSIST_DIR')): os.mkdir(os.getenv('PERSIST_DIR'))\n\n with open(f\"./summary/{file_name}.json\", \"r\") as f: summary = json.load(f)\n\n docs = list()\n for subtopic in summary['Subtopics']:\n content = \"\"\n for key, value in subtopic.items():\n if key != 'timestamp':\n content += f'{key}: {\"\".join(value)}\\n'\n doc = Document(page_content=content , metadata={\"source\": file_name})\n docs.append(doc)\n\n Chroma.from_documents(docs, embeddings, persist_directory=persist_directory, collection_name=collection_name)" }, { "identifier": "generate_summary", "path": "summarize.py", "snippet": "class generate_summary():\n def __init__(self, file_name:str, original_language:str, translated_language:str, chunk_size:int, output_dir:str) -> None:\n from utils import llm\n self.file_name = file_name\n self.chunk_size = chunk_size \n self.original_language = original_language\n self.translated_language = translated_language\n self.output_dir = output_dir\n self.llm = llm\n\n def _get_general_summary(self, article_divided:dict) -> None:\n from langchain.text_splitter import RecursiveCharacterTextSplitter\n from langchain.chains.summarize import load_summarize_chain\n\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size = self.chunk_size//2,\n chunk_overlap = 0,\n length_function = len)\n \n # load transcript\n with open(f'./transcript/{self.file_name}.txt', 'r') as f:\n transcript = ''.join(f.readlines())\n split_text = text_splitter.split_text(transcript)\n\n item_list, items, item_format = get_items('general')\n prompt_template = f\"###Write cosine {items} of the following:###\" \"{text} \\n\"\n prompt = PromptTemplate.from_template(prompt_template)\n\n refine_template = (\n f\"Your job is to produce a final streamline {items}.\\\n We have provided an existing {items} up to a certain point:\"\"{existing_answer}\\n\"\\\n f\"We have the opportunity to refine {items}\"\n \"(only if needed) with some more context below.\\n\\\n ------------\\n\\\n {text}\\n\\\n ------------\\n\"\n f\"Given the new context, refine the original {items} in {self.original_language}\\\n If the context isn't useful, return the origin {items} in {self.original_language}\\\n Fulfill the format: {item_format}\"\n )\n refine_prompt = PromptTemplate.from_template(refine_template)\n\n chain = load_summarize_chain(\n llm=self.llm,\n chain_type=\"refine\",\n question_prompt=prompt,\n refine_prompt=refine_prompt,\n return_intermediate_steps=False,\n input_key=\"input_documents\",\n output_key=\"output_text\"\n )\n print('Analysing general items')\n split_docs = [Document(page_content=text, metadata={\"source\": self.file_name}) for text in split_text]\n out = chain({\"input_documents\": split_docs}, return_only_outputs=True)['output_text']\n\n # convert to json\n output = convert_json(out, item_list)\n\n self.article_full = {**output, **article_divided}\n\n def _get_subtopic_summary(self) -> None:\n item_list, items, item_format = get_items('individuel')\n\n prompt_template = f\"Your primary focus should be on accurately identifying or extracting specific information.\\\n Find out or extract the {items} based on the information given in the text. \\\n Consequently, adhere to the designated format below:\\\n Subtopic:\\\n {item_format}\"\\\n \"{text}\"\n\n prompt = PromptTemplate.from_template(prompt_template)\n\n # Define LLM chain\n llm_chain = LLMChain(llm=self.llm, prompt=prompt, return_final_only=True)\n\n # Define StuffDocumentsChain\n stuff_chain = StuffDocumentsChain( \n llm_chain=llm_chain, document_variable_name=\"text\")\n \n print('Analysing subtopics')\n result = list()\n with tqdm(total=len(self.article_full.get('Subtopics'))) as pbar:\n for subtopic in self.article_full.get('Subtopics'):\n content = f\"{subtopic.get('subtopic')}: {subtopic.get('transcript').strip()}\"\n doc = Document(page_content=content , metadata={\"source\": self.file_name})\n out = stuff_chain.run([doc])\n\n # convert to json\n output = convert_json(out, item_list)\n output['subtopic'] = subtopic.get('subtopic')\n output['original transcript'] = subtopic.get('transcript')\n if self.original_language != self.translated_language:\n output['translated transcript'] = self._translate_chinese(subtopic.get('transcript'))\n\n if subtopic.get('timestamp'):\n output['timestamp']= [{'start': subtopic.get('timestamp').get('start')}, {'end': subtopic.get('timestamp').get('end')}]\n result.append(output)\n pbar.update(1)\n self.article_full.update({\"Subtopics\":result})\n with open(f'./summary/{self.file_name}.json', 'w', encoding='utf-8') as f: json.dump(self.article_full, f, ensure_ascii=False)\n print(\"Analysis completed\")\n \n def _translate_chinese(self, content:str=\"N/A\") -> str:\n from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n from langchain.docstore.document import Document\n from langchain.prompts import PromptTemplate\n from langchain.chains.llm import LLMChain\n from langdetect import detect_langs\n \n if content == \"N/A\": return \"N/A\"\n if str(detect_langs(content)[0]).split(':')[0] != self.translated_language:\n doc = Document(page_content=content, metadata={\"source\": self.file_name})\n prompt_template = f\"You are an experienced translator who will translate the content into {self.translated_language} if the given text is not in {self.translated_language}. \\\n You will translate the given text in a way that stays faithful to the original without adding much expansion and explanation. You will only return the translated text\" \"{text}\"\n \n prompt = PromptTemplate.from_template(prompt_template)\n llm_chain = LLMChain(llm=self.llm, prompt=prompt, return_final_only=True)\n\n stuff_translate_chain = StuffDocumentsChain( \n llm_chain=llm_chain, document_variable_name=\"text\")\n\n return stuff_translate_chain.run([doc])\n else:\n return content\n \n def _translate_convert_docx(self) -> None:\n from utils import add_hyperlink, divide_audio\n import docx, datetime\n\n # initial a docx\n document = docx.Document()\n\n # translate general info and convert in docx\n print('Translating')\n items_list, _, _ = get_items('general')\n for item in items_list:\n content = self._translate_chinese(self.article_full.get(item))\n document.add_heading(item, level=1)\n document.add_paragraph(content)\n subtopics = self.article_full.get('Subtopics')\n with tqdm(total=len(subtopics)) as pbar:\n for subtopic in subtopics:\n content = self._translate_chinese(subtopic.get('subtopic'))\n insertion = document.add_heading(content, level=2)\n\n # add hyperlink\n if subtopic.get('timestamp') and isinstance(subtopic.get('timestamp')[0].get('start'), int) and isinstance(subtopic.get('timestamp')[1].get('end'), int):\n start = subtopic.get('timestamp')[0].get('start')\n end = subtopic.get('timestamp')[1].get('end')\n subtopic_name = subtopic.get('subtopic')\n # seperate audio by suntopics\n absolute_path = divide_audio(self.file_name, subtopic_name, start, end)\n add_hyperlink(insertion, f'{datetime.timedelta(seconds = int(start))}', f'file:///{absolute_path}/{subtopic.get(\"subtopic\")}.wav')\n\n # translate individual item and convert in docx\n items_list, _, _ = get_items('individuel')\n for item in items_list:\n content = self._translate_chinese(subtopic.get(item))\n document.add_heading(item, level=3)\n document.add_paragraph(content)\n\n # add chinese transcript\n if self.original_language != self.translated_language:\n document.add_heading('translated transcript', level=3)\n document.add_paragraph(subtopic.get(\"translated transcript\").strip())\n document.add_heading('original transcript', level=3)\n document.add_paragraph(subtopic.get('original transcript').strip())\n\n document.save(f'{self.output_dir}/{self.file_name}.docx')\n pbar.update(1)\n\n def run(self, article_divided:dict) -> None:\n # generate global and subtopic summary\n self._get_general_summary(article_divided)\n self._get_subtopic_summary()\n \n # Translate and convert json to docx\n self._translate_convert_docx()" }, { "identifier": "divide_article", "path": "divide.py", "snippet": "class divide_article():\n def __init__(self, file_name:str, original_language:str, chunk_size:int) -> None:\n from utils import llm\n self.file_name = file_name\n self.chunk_size = chunk_size \n self.original_language = original_language\n self.llm = llm\n self.llm.temperature=0.0\n\n def _get_timestamp_list(self, article_timestamp:dict) -> list:\n timestamp_list = list()\n for chunk in article_timestamp.get('chunks'):\n words = chunk.get('text')\n texts = ''.join(re.split(r\"\\W+\", words.strip()))\n\n for text in texts: \n start = chunk.get('timestamp')[0]\n end = chunk.get('timestamp')[1]\n timestamp_list.append((text, start, end))\n return timestamp_list\n\n def _add_timestamp(self, paragraphs:list) -> dict: \n with open(f'./transcript/{self.file_name}.json') as f:\n timestamp_list = self._get_timestamp_list(json.load(f))\n result, subtopics = dict(), list()\n for paragraph in paragraphs.get('Subtopics'):\n # seperate transcript to word list\n primer = ''.join(re.split(r\"\\W+\", paragraph.get('transcript').strip()))\n word_list = [word for word in primer if word]\n start, end = 'undifine', 'undifine'\n index_w = 0\n\n # fit the timestamp to the paragraph\n for timestamp in timestamp_list:\n if index_w == len(word_list): break\n if timestamp[0] == word_list[index_w]:\n if start == 'undifine': start = int(timestamp[1])\n end = int(timestamp[2])\n index_w += 1\n else: \n start, end = 'undifine', 'undifine'\n paragraph['timestamp'] = {\"start\":start, \"end\":end}\n subtopics.append(paragraph)\n result['Subtopics'] = subtopics\n return result\n\n def _add_transcript(self) -> dict: \n with open(f'./transcript/{self.file_name}.txt') as f:\n transcript = ''.join(f.readlines())\n\n result, subtopics = dict(), list()\n index_list = [['start', 0]]\n # divide the transcript by punctuation\n source = re.split(r'[:.?!,。]', transcript)\n for paragraph in self.draft:\n subtopic = paragraph.get('Subtopic')\n primer = re.split(r'[:.?!,。]', paragraph.get('Transcript').strip())[0]\n\n # fuzzy match the primer and transcript\n matched = fuzzy_match(source, primer, cutoff=0.1)\n if matched: \n if transcript.find(matched[0], index_list[-1][1]) == -1: index_list.pop()\n index_list.append((subtopic, transcript.find(matched[0], index_list[-1][1])))\n\n index_list.append(('end', len(transcript)))\n # fulfill transcript\n for i in range(1, len(index_list)-1):\n if index_list[i+1][1] - index_list[i][1] > 10:\n subtopic_dict = dict()\n subtopic_dict['subtopic'] = index_list[i][0]\n subtopic_dict['transcript'] = transcript[index_list[i][1]:index_list[i+1][1]]\n subtopics.append(subtopic_dict)\n\n result['Subtopics'] = subtopics\n return result\n \n def _divide_by_subtopics(self) -> dict:\n from langchain.text_splitter import RecursiveCharacterTextSplitter\n\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size = self.chunk_size,\n chunk_overlap = 0,\n length_function = len)\n \n with open(f'./transcript/{self.file_name}.txt') as f:\n transcript = ''.join(f.readlines())\n split_text = text_splitter.split_text(transcript)\n\n # Define prompt\n prompt_template = \"###You are a experimental researcher and \\\n your task is to find out the subtopics in detail and divide the article by subtopics.\\\n Please ensure that you do not overly fragment the content, and \\\n that each subtopic contains a sufficient amount of information.\\\n Begin by identifying the subtopics within the given text.\\\n Keep the context entirely unmodified and refrain from extending it in any way.\\\n Divide the given text into separate contexts based on the identified subtopics.\"\\\n f\"Extract the first sentence from each context as a transcript in {self.original_language}.\"\\\n f\"Discard the remainder of the transcript, retaining only the first sentence in {self.original_language}.\"\\\n \"Fulfill the format below: \\n\\\n Subtopic: \\n\\\n Transcript: \\n\\\n ###{text}\\n\"\n \n prompt = PromptTemplate.from_template(prompt_template)\n\n # Define LLM chain\n llm_chain = LLMChain(llm=self.llm, prompt=prompt)\n\n # Define StuffDocumentsChain\n stuff_chain = StuffDocumentsChain( \n llm_chain=llm_chain, document_variable_name=\"text\", input_key='language')\n\n # divide article\n output = list()\n print('Dividing the content')\n for chunk in split_text:\n doc = Document(page_content=chunk, metadata={\"source\": self.file_name})\n out = stuff_chain.run([doc]).strip()\n output.append(convert_json(out, ['Subtopic', 'Transcript']))\n\n self.draft = output\n\n def run(self): \n # divide article \n self._divide_by_subtopics()\n \n # fulfill transcript\n article_full = self._add_transcript()\n \n # add timestamp, base on whisper result \n if os.path.exists(f'./transcript/{self.file_name}.json'):\n article_full = self._add_timestamp(article_full)\n \n # save result\n with open(f'./summary/{self.file_name}.json', 'w', encoding='utf-8') as f:\n json.dump(article_full, f, ensure_ascii=False)\n\n return article_full" } ]
from utils import credential_validation, get_file_list, validation_and_filetype_check, detect_language from s2t_whisper import speech2text, download_from_youtube, download_from_vimeo from storage_vector import pinecone_storage, chroma_storage from summarize import generate_summary from divide import divide_article import argparse, os
6,414
def main(): parser = argparse.ArgumentParser(description='Build your own professional database') parser.add_argument('file_path', type=str, help='file path') parser.add_argument('-c', '--chunk', default=2000, type=int, help='chunk size') parser.add_argument('-t', '--temperature', default=0.1, type=float, help='temperature of LLM') parser.add_argument('-b', '--batch', default=False, action="store_true", help='batch process') parser.add_argument('-o', '--output_dir', default='./docx', type=str, help='file path of output report') parser.add_argument('-l', '--translated_language', default='zh-tw', help='the language that should be translated') parser.add_argument('-v', '--vectorDB', default=None, choices=['pinecone', 'chroma', None], help='select the vectorDB') parser.add_argument('-e', '--extract', default=False, action="store_true", help='Extract human voice from audio (not support in Apple silicon)') parser.add_argument('-m', '--model', type=str, default='medium', help='the using model for ASR', choices=['tiny', 'base', 'small', 'medium', 'large-v3']) args = parser.parse_args() # credential validation
def main(): parser = argparse.ArgumentParser(description='Build your own professional database') parser.add_argument('file_path', type=str, help='file path') parser.add_argument('-c', '--chunk', default=2000, type=int, help='chunk size') parser.add_argument('-t', '--temperature', default=0.1, type=float, help='temperature of LLM') parser.add_argument('-b', '--batch', default=False, action="store_true", help='batch process') parser.add_argument('-o', '--output_dir', default='./docx', type=str, help='file path of output report') parser.add_argument('-l', '--translated_language', default='zh-tw', help='the language that should be translated') parser.add_argument('-v', '--vectorDB', default=None, choices=['pinecone', 'chroma', None], help='select the vectorDB') parser.add_argument('-e', '--extract', default=False, action="store_true", help='Extract human voice from audio (not support in Apple silicon)') parser.add_argument('-m', '--model', type=str, default='medium', help='the using model for ASR', choices=['tiny', 'base', 'small', 'medium', 'large-v3']) args = parser.parse_args() # credential validation
credential_validation(vectorDB=args.vectorDB, temperature=args.temperature)
0
2023-10-30 12:29:20+00:00
8k
EnVision-Research/Defect_Spectrum
models/unet/ldmunet.py
[ { "identifier": "checkpoint", "path": "models/unet/util.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)" }, { "identifier": "conv_nd", "path": "models/unet/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "models/unet/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "avg_pool_nd", "path": "models/unet/util.py", "snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "zero_module", "path": "models/unet/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "normalization", "path": "models/unet/util.py", "snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)" }, { "identifier": "timestep_embedding", "path": "models/unet/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "models/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None):\n super().__init__()\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)\n for d in range(depth)]\n )\n\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c')\n for block in self.transformer_blocks:\n x = block(x, context=context)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)\n x = self.proj_out(x)\n return x + x_in" } ]
from abc import abstractmethod from functools import partial from typing import Iterable from models.unet.util import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding, ) from models.modules.attention import SpatialTransformer from omegaconf.listconfig import ListConfig import math import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F
3,693
use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ) ) if level and i == num_res_blocks: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = []
# dummy replace def convert_module_to_f16(x): pass def convert_module_to_f32(x): pass ## go class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class TransposedUpsample(nn.Module): 'Learned 2x upsampling without padding' def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) def forward(self,x): return self.up(x) class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) class ResBlock(TimestepBlock): """ A residual block that can optionally change the number of channels. :param channels: the number of input channels. :param emb_channels: the number of timestep embedding channels. :param dropout: the rate of dropout. :param out_channels: if specified, the number of out channels. :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1 convolution to change the channels in the skip connection. :param dims: determines if the signal is 1D, 2D, or 3D. :param use_checkpoint: if True, use gradient checkpointing on this module. :param up: if True, use this block for upsampling. :param down: if True, use this block for downsampling. """ def __init__( self, channels, emb_channels, dropout, out_channels=None, use_conv=False, use_scale_shift_norm=False, dims=2, use_checkpoint=False, up=False, down=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm self.in_layers = nn.Sequential( normalization(channels), nn.SiLU(), conv_nd(dims, channels, self.out_channels, 3, padding=1), ) self.updown = up or down if up: self.h_upd = Upsample(channels, False, dims) self.x_upd = Upsample(channels, False, dims) elif down: self.h_upd = Downsample(channels, False, dims) self.x_upd = Downsample(channels, False, dims) else: self.h_upd = self.x_upd = nn.Identity() self.emb_layers = nn.Sequential( nn.SiLU(), linear( emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels, ), ) self.out_layers = nn.Sequential( normalization(self.out_channels), nn.SiLU(), nn.Dropout(p=dropout), zero_module( conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) ), ) if self.out_channels == channels: self.skip_connection = nn.Identity() elif use_conv: self.skip_connection = conv_nd( dims, channels, self.out_channels, 3, padding=1 ) else: self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) def forward(self, x, emb): """ Apply the block to a Tensor, conditioned on a timestep embedding. :param x: an [N x C x ...] Tensor of features. :param emb: an [N x emb_channels] Tensor of timestep embeddings. :return: an [N x C x ...] Tensor of outputs. """ return checkpoint( self._forward, (x, emb), self.parameters(), self.use_checkpoint ) def _forward(self, x, emb): if self.updown: in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] h = in_rest(x) h = self.h_upd(h) x = self.x_upd(x) h = in_conv(h) else: h = self.in_layers(x) emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] scale, shift = th.chunk(emb_out, 2, dim=1) h = out_norm(h) * (1 + scale) + shift h = out_rest(h) else: h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h class AttentionBlock(nn.Module): """ An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. """ def __init__( self, channels, num_heads=1, num_head_channels=-1, use_checkpoint=False, use_new_attention_order=False, ): super().__init__() self.channels = channels if num_head_channels == -1: self.num_heads = num_heads else: assert ( channels % num_head_channels == 0 ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" self.num_heads = channels // num_head_channels self.use_checkpoint = use_checkpoint self.norm = normalization(channels) self.qkv = conv_nd(1, channels, channels * 3, 1) if use_new_attention_order: # split qkv before split heads self.attention = QKVAttention(self.num_heads) else: # split heads before split qkv self.attention = QKVAttentionLegacy(self.num_heads) self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) def forward(self, x): return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! #return pt_checkpoint(self._forward, x) # pytorch def _forward(self, x): b, c, *spatial = x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) h = self.attention(qkv) h = self.proj_out(h) return (x + h).reshape(b, c, *spatial) def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. matmul_ops = 2 * b * (num_spatial ** 2) * c model.total_ops += th.DoubleTensor([matmul_ops]) class QKVAttentionLegacy(nn.Module): """ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", q * scale, k * scale ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", (q * scale).view(bs * self.n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length), ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class UNetModel(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels self.num_res_blocks = num_res_blocks self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: self.label_emb = nn.Embedding(num_classes, time_embed_dim) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for _ in range(num_res_blocks): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(num_res_blocks + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim ) ) if level and i == num_res_blocks: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
6
2023-10-26 10:28:26+00:00
8k
ORI-Muchim/BEGANSing
AudioSR-Upsampling/audiosr/clap/open_clip/factory.py
[ { "identifier": "CLAP", "path": "AudioSR-Upsampling/audiosr/clap/open_clip/model.py", "snippet": "class CLAP(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n audio_cfg: CLAPAudioCfp,\n text_cfg: CLAPTextCfg,\n quick_gelu: bool = False,\n enable_fusion: bool = False,\n fusion_type: str = \"None\",\n joint_embed_shape: int = 512,\n mlp_act: str = \"relu\",\n ):\n super().__init__()\n if isinstance(audio_cfg, dict):\n audio_cfg = CLAPAudioCfp(**audio_cfg)\n if isinstance(text_cfg, dict):\n text_cfg = CLAPTextCfg(**text_cfg)\n\n self.audio_cfg = audio_cfg\n self.text_cfg = text_cfg\n self.enable_fusion = enable_fusion\n self.fusion_type = fusion_type\n self.joint_embed_shape = joint_embed_shape\n self.mlp_act = mlp_act\n\n self.context_length = text_cfg.context_length\n\n # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more\n # memory efficient in recent PyTorch releases (>= 1.10).\n # NOTE: timm models always use native GELU regardless of quick_gelu flag.\n act_layer = QuickGELU if quick_gelu else nn.GELU\n\n if mlp_act == \"relu\":\n mlp_act_layer = nn.ReLU()\n elif mlp_act == \"gelu\":\n mlp_act_layer = nn.GELU()\n else:\n raise NotImplementedError\n\n # audio branch\n # audio branch parameters\n if audio_cfg.model_type == \"PANN\":\n self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)\n elif audio_cfg.model_type == \"HTSAT\":\n self.audio_branch = create_htsat_model(\n audio_cfg, enable_fusion, fusion_type\n )\n else:\n logging.error(f\"Model config for {audio_cfg.model_type} not found\")\n raise RuntimeError(f\"Model config for {audio_cfg.model_type} not found.\")\n\n # text branch\n # text branch parameters\n if text_cfg.model_type == \"transformer\":\n self.text_branch = Transformer(\n width=text_cfg.width,\n layers=text_cfg.layers,\n heads=text_cfg.heads,\n act_layer=act_layer,\n )\n self.vocab_size = text_cfg.vocab_size\n self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)\n self.positional_embedding = nn.Parameter(\n torch.empty(self.context_length, text_cfg.width)\n )\n self.ln_final = LayerNorm(text_cfg.width)\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(text_cfg.width, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n elif text_cfg.model_type == \"bert\":\n self.text_branch = BertModel.from_pretrained(\"bert-base-uncased\")\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(768, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n elif text_cfg.model_type == \"roberta\":\n self.text_branch = RobertaModel(\n RobertaConfig.from_pretrained(\"roberta-base\")\n )\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(768, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n elif text_cfg.model_type == \"bart\":\n self.text_branch = BartModel.from_pretrained(\"facebook/bart-base\")\n self.text_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n self.text_projection = nn.Sequential(\n nn.Linear(768, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n else:\n logging.error(f\"Model config for {text_cfg.model_type} not found\")\n raise RuntimeError(f\"Model config for {text_cfg.model_type} not found.\")\n self.text_branch_type = text_cfg.model_type\n # text branch parameters\n\n # audio branch parameters\n self.audio_transform = MLPLayers(\n units=[\n self.joint_embed_shape,\n self.joint_embed_shape,\n self.joint_embed_shape,\n ],\n dropout=0.1,\n )\n\n # below here is text branch parameters\n\n # ============================================================================================================\n self.audio_projection = nn.Sequential(\n nn.Linear(embed_dim, self.joint_embed_shape),\n mlp_act_layer,\n nn.Linear(self.joint_embed_shape, self.joint_embed_shape),\n )\n\n self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.register_buffer(\"attn_mask\", self.build_attention_mask(), persistent=False)\n\n self.init_text_branch_parameters()\n\n def init_text_branch_parameters(self):\n if self.text_branch_type == \"transformer\":\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n proj_std = (self.text_branch.width**-0.5) * (\n (2 * self.text_branch.layers) ** -0.5\n )\n attn_std = self.text_branch.width**-0.5\n fc_std = (2 * self.text_branch.width) ** -0.5\n for block in self.text_branch.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n if self.text_branch_type == \"bert\" or self.text_branch_type == \"roberta\":\n self.text_branch.embeddings.word_embeddings.weight.shape[-1]\n elif self.text_branch_type == \"bart\":\n self.text_branch.shared.weight.shape[-1]\n else:\n self.text_branch.width\n nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))\n nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))\n\n # deprecated\n # if hasattr(self.visual, 'init_parameters'):\n # self.visual.init_parameters()\n\n # if self.text_projection is not None:\n # nn.init.normal_(self.text_projection, std=width**-0.5)\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def encode_audio(self, audio, device):\n return self.audio_branch(\n audio, mixup_lambda=None, device=device\n ) # mix lambda needs to add\n\n # def list_of_dict_of_tensor2dict_of_tensor(self, x, device):\n # tmp = {}\n # for k in x[0].keys():\n # tmp[k] = []\n # for i in range(len(x)):\n # tmp[k].append(x[i][k][:77])\n # for k in x[0].keys():\n # tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)\n # return tmp\n\n def encode_text(self, text, device):\n if self.text_branch_type == \"transformer\":\n text = text.to(device=device, non_blocking=True)\n x = self.token_embedding(text) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.text_branch(x, attn_mask=self.attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x)\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])\n elif self.text_branch_type == \"bert\":\n # text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)\n # text = BatchEncoding(text)\n x = self.text_branch(\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\n attention_mask=text[\"attention_mask\"].to(\n device=device, non_blocking=True\n ),\n token_type_ids=text[\"token_type_ids\"].to(\n device=device, non_blocking=True\n ),\n )[\"pooler_output\"]\n x = self.text_projection(x)\n elif self.text_branch_type == \"roberta\":\n x = self.text_branch(\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\n attention_mask=text[\"attention_mask\"].to(\n device=device, non_blocking=True\n ),\n )[\"pooler_output\"]\n x = self.text_projection(x)\n elif self.text_branch_type == \"bart\":\n x = torch.mean(\n self.text_branch(\n input_ids=text[\"input_ids\"].to(device=device, non_blocking=True),\n attention_mask=text[\"attention_mask\"].to(\n device=device, non_blocking=True\n ),\n )[\"encoder_last_hidden_state\"],\n axis=1,\n )\n x = self.text_projection(x)\n else:\n logging.error(f\"Model type {self.text_branch_type} not found\")\n raise RuntimeError(f\"Model type {self.text_branch_type} not found.\")\n return x\n\n def forward(self, audio, text, device=None):\n \"\"\"Forward audio and text into the CLAP\n\n Parameters\n ----------\n audio: torch.Tensor (batch_size, audio_length)\n the time-domain audio input / the batch of mel_spec and longer list.\n text: torch.Tensor () // need to add\n the text token input\n \"\"\"\n if device is None:\n if audio is not None:\n device = audio.device\n elif text is not None:\n device = text.device\n if audio is None and text is None:\n # a hack to get the logit scale\n return self.logit_scale_a.exp(), self.logit_scale_t.exp()\n elif audio is None:\n return self.encode_text(text, device=device)\n elif text is None:\n return self.audio_projection(\n self.encode_audio(audio, device=device)[\"embedding\"]\n )\n audio_features = self.audio_projection(\n self.encode_audio(audio, device=device)[\"embedding\"]\n )\n audio_features = F.normalize(audio_features, dim=-1)\n\n text_features = self.encode_text(text, device=device)\n # print(\"text_features\", text_features)\n # print(\"text_features.shape\", text_features.shape)\n # print(\"text_features.type\", type(text_features))\n text_features = F.normalize(text_features, dim=-1)\n\n audio_features_mlp = self.audio_transform(audio_features)\n text_features_mlp = self.text_transform(text_features)\n # Four outputs: audio features (basic & MLP), text features (basic & MLP)\n return (\n audio_features,\n text_features,\n audio_features_mlp,\n text_features_mlp,\n self.logit_scale_a.exp(),\n self.logit_scale_t.exp(),\n )\n\n def get_logit_scale(self):\n return self.logit_scale_a.exp(), self.logit_scale_t.exp()\n\n def get_text_embedding(self, data):\n \"\"\"Get the text embedding from the model\n\n Parameters\n ----------\n data: torch.Tensor\n a tensor of text embedding\n\n Returns\n ----------\n text_embed: torch.Tensor\n a tensor of text_embeds (N, D)\n\n \"\"\"\n device = next(self.parameters()).device\n for k in data:\n data[k] = data[k].to(device)\n text_embeds = self.encode_text(data, device=device)\n text_embeds = F.normalize(text_embeds, dim=-1)\n\n return text_embeds\n\n def get_audio_embedding(self, data):\n \"\"\"Get the audio embedding from the model\n\n Parameters\n ----------\n data: a list of dict\n the audio input dict list from 'get_audio_feature' method\n\n Returns\n ----------\n audio_embed: torch.Tensor\n a tensor of audio_embeds (N, D)\n\n \"\"\"\n device = next(self.parameters()).device\n # input_dict = {}\n # keys = data[0].keys()\n # for k in keys:\n # input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(\n # device\n # )\n audio_embeds = self.audio_projection(\n self.encode_audio(data, device=device)[\"embedding\"]\n )\n audio_embeds = F.normalize(audio_embeds, dim=-1)\n\n return audio_embeds\n\n def audio_infer(self, audio, hopsize=None, device=None):\n \"\"\"Forward one audio and produce the audio embedding\n\n Parameters\n ----------\n audio: (audio_length)\n the time-domain audio input, notice that it must be only one input\n hopsize: int\n the overlap hopsize as the sliding window\n\n Returns\n ----------\n output_dict: {\n key: [n, (embedding_shape)] if \"HTS-AT\"\n or\n key: [(embedding_shape)] if \"PANN\"\n }\n the list of key values of the audio branch\n\n \"\"\"\n\n assert not self.training, \"the inference mode must be run at eval stage\"\n output_dict = {}\n # PANN\n if self.audio_cfg.model_type == \"PANN\":\n audio_input = audio.unsqueeze(dim=0)\n output_dict[key] = self.encode_audio(audio_input, device=device)[\n key\n ].squeeze(dim=0)\n elif self.audio_cfg.model_type == \"HTSAT\":\n # repeat\n audio_len = len(audio)\n k = self.audio_cfg.clip_samples // audio_len\n if k > 1:\n audio = audio.repeat(k)\n audio_len = len(audio)\n\n if hopsize is None:\n hopsize = min(hopsize, audio_len)\n\n if audio_len > self.audio_cfg.clip_samples:\n audio_input = [\n audio[pos : pos + self.audio_cfg.clip_samples].clone()\n for pos in range(\n 0, audio_len - self.audio_cfg.clip_samples, hopsize\n )\n ]\n audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())\n audio_input = torch.stack(audio_input)\n output_dict[key] = self.encode_audio(audio_input, device=device)[key]\n else:\n audio_input = audio.unsqueeze(dim=0)\n output_dict[key] = self.encode_audio(audio_input, device=device)[\n key\n ].squeeze(dim=0)\n\n return output_dict" }, { "identifier": "convert_weights_to_fp16", "path": "AudioSR-Upsampling/audiosr/clap/open_clip/model.py", "snippet": "def convert_weights_to_fp16(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()\n\n if isinstance(l, nn.MultiheadAttention):\n for attr in [\n *[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]],\n \"in_proj_bias\",\n \"bias_k\",\n \"bias_v\",\n ]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.half()\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.half()\n\n model.apply(_convert_weights_to_fp16)" }, { "identifier": "load_openai_model", "path": "AudioSR-Upsampling/audiosr/clap/open_clip/openai.py", "snippet": "def load_openai_model(\n name: str,\n model_cfg,\n device: Union[str, torch.device] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n jit=True,\n cache_dir=os.path.expanduser(\"~/.cache/clip\"),\n enable_fusion: bool = False,\n fusion_type: str = \"None\",\n):\n \"\"\"Load a CLIP model, preserve its text pretrained part, and set in the CLAP model\n\n Parameters\n ----------\n name : str\n A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict\n device : Union[str, torch.device]\n The device to put the loaded model\n jit : bool\n Whether to load the optimized JIT model (default) or more hackable non-JIT model.\n\n Returns\n -------\n model : torch.nn.Module\n The CLAP model\n preprocess : Callable[[PIL.Image], torch.Tensor]\n A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input\n \"\"\"\n if get_pretrained_url(name, \"openai\"):\n model_path = download_pretrained(\n get_pretrained_url(name, \"openai\"), root=cache_dir\n )\n elif os.path.isfile(name):\n model_path = name\n else:\n raise RuntimeError(\n f\"Model {name} not found; available models = {list_openai_models()}\"\n )\n\n try:\n # loading JIT archive\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n state_dict = None\n except RuntimeError:\n # loading saved state dict\n if jit:\n warnings.warn(\n f\"File {model_path} is not a JIT archive. Loading as a state dict instead\"\n )\n jit = False\n state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if not jit:\n try:\n model = build_model_from_openai_state_dict(\n state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type\n ).to(device)\n except KeyError:\n sd = {k[7:]: v for k, v in state_dict[\"state_dict\"].items()}\n model = build_model_from_openai_state_dict(\n sd, model_cfg, enable_fusion, fusion_type\n ).to(device)\n\n if str(device) == \"cpu\":\n model.float()\n return model\n\n # patch the device names\n device_holder = torch.jit.trace(\n lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]\n )\n device_node = [\n n\n for n in device_holder.graph.findAllNodes(\"prim::Constant\")\n if \"Device\" in repr(n)\n ][-1]\n\n def patch_device(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\n \"cuda\"\n ):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_audio)\n patch_device(model.encode_text)\n\n # patch dtype to float32 on CPU\n if str(device) == \"cpu\":\n float_holder = torch.jit.trace(\n lambda: torch.ones([]).float(), example_inputs=[]\n )\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [\n 1,\n 2,\n ]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_audio)\n patch_float(model.encode_text)\n model.float()\n\n model.audio_branch.audio_length = model.audio_cfg.audio_length\n return model" }, { "identifier": "get_pretrained_url", "path": "AudioSR-Upsampling/audiosr/clap/open_clip/pretrained.py", "snippet": "def get_pretrained_url(model: str, tag: str):\n if model not in _PRETRAINED:\n return \"\"\n model_pretrained = _PRETRAINED[model]\n if tag not in model_pretrained:\n return \"\"\n return model_pretrained[tag]" }, { "identifier": "download_pretrained", "path": "AudioSR-Upsampling/audiosr/clap/open_clip/pretrained.py", "snippet": "def download_pretrained(url: str, root: str = os.path.expanduser(\"~/.cache/clip\")):\n os.makedirs(root, exist_ok=True)\n filename = os.path.basename(url)\n\n if \"openaipublic\" in url:\n expected_sha256 = url.split(\"/\")[-2]\n else:\n expected_sha256 = \"\"\n\n download_target = os.path.join(root, filename)\n\n if os.path.exists(download_target) and not os.path.isfile(download_target):\n raise RuntimeError(f\"{download_target} exists and is not a regular file\")\n\n if os.path.isfile(download_target):\n if expected_sha256:\n if (\n hashlib.sha256(open(download_target, \"rb\").read()).hexdigest()\n == expected_sha256\n ):\n return download_target\n else:\n warnings.warn(\n f\"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file\"\n )\n else:\n return download_target\n\n with urllib.request.urlopen(url) as source, open(download_target, \"wb\") as output:\n with tqdm(\n total=int(source.info().get(\"Content-Length\")),\n ncols=80,\n unit=\"iB\",\n unit_scale=True,\n ) as loop:\n while True:\n buffer = source.read(8192)\n if not buffer:\n break\n\n output.write(buffer)\n loop.update(len(buffer))\n\n if (\n expected_sha256\n and hashlib.sha256(open(download_target, \"rb\").read()).hexdigest()\n != expected_sha256\n ):\n raise RuntimeError(\n f\"Model has been downloaded but the SHA256 checksum does not not match\"\n )\n\n return download_target" }, { "identifier": "image_transform", "path": "AudioSR-Upsampling/audiosr/clap/open_clip/transform.py", "snippet": "def image_transform(\n image_size: int,\n is_train: bool,\n mean=(0.48145466, 0.4578275, 0.40821073),\n std=(0.26862954, 0.26130258, 0.27577711),\n):\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n return Compose(\n [\n RandomResizedCrop(\n image_size,\n scale=(0.9, 1.0),\n interpolation=InterpolationMode.BICUBIC,\n ),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ]\n )\n else:\n return Compose(\n [\n Resize(image_size, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ]\n )" } ]
import json import logging import os import re import torch from copy import deepcopy from pathlib import Path from .model import CLAP, convert_weights_to_fp16 from .openai import load_openai_model from .pretrained import get_pretrained_url, download_pretrained from .transform import image_transform
6,857
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())] def _rescan_model_configs(): global _MODEL_CONFIGS config_ext = (".json",) config_files = [] for config_path in _MODEL_CONFIG_PATHS: if config_path.is_file() and config_path.suffix in config_ext: config_files.append(config_path) elif config_path.is_dir(): for ext in config_ext: config_files.extend(config_path.glob(f"*{ext}")) for cf in config_files: if os.path.basename(cf)[0] == ".": continue # Ignore hidden files with open(cf, "r") as f: model_cfg = json.load(f) if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")): _MODEL_CONFIGS[cf.stem] = model_cfg _MODEL_CONFIGS = { k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])) } _rescan_model_configs() # initial populate of model config registry def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True): checkpoint = torch.load(checkpoint_path, map_location=map_location) if isinstance(checkpoint, dict) and "state_dict" in checkpoint: state_dict = checkpoint["state_dict"] else: state_dict = checkpoint if skip_params: if next(iter(state_dict.items()))[0].startswith("module"): state_dict = {k[7:]: v for k, v in state_dict.items()} # for k in state_dict: # if k.startswith('transformer'): # v = state_dict.pop(k) # state_dict['text_branch.' + k[12:]] = v return state_dict def create_model( amodel_name: str, tmodel_name: str, pretrained: str = "", precision: str = "fp32", device: torch.device = torch.device("cpu"), jit: bool = False, force_quick_gelu: bool = False, openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"), skip_params=True, pretrained_audio: str = "", pretrained_text: str = "", enable_fusion: bool = False, fusion_type: str = "None" # pretrained_image: bool = False, ): amodel_name = amodel_name.replace( "/", "-" ) # for callers using old naming with / in ViT names pretrained_orig = pretrained pretrained = pretrained.lower() if pretrained == "openai": if amodel_name in _MODEL_CONFIGS: logging.info(f"Loading {amodel_name} model config.") model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name]) else: logging.error( f"Model config for {amodel_name} not found; available models {list_models()}." ) raise RuntimeError(f"Model config for {amodel_name} not found.") logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.") # Hard Code in model name model_cfg["text_cfg"]["model_type"] = tmodel_name
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())] def _rescan_model_configs(): global _MODEL_CONFIGS config_ext = (".json",) config_files = [] for config_path in _MODEL_CONFIG_PATHS: if config_path.is_file() and config_path.suffix in config_ext: config_files.append(config_path) elif config_path.is_dir(): for ext in config_ext: config_files.extend(config_path.glob(f"*{ext}")) for cf in config_files: if os.path.basename(cf)[0] == ".": continue # Ignore hidden files with open(cf, "r") as f: model_cfg = json.load(f) if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")): _MODEL_CONFIGS[cf.stem] = model_cfg _MODEL_CONFIGS = { k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])) } _rescan_model_configs() # initial populate of model config registry def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True): checkpoint = torch.load(checkpoint_path, map_location=map_location) if isinstance(checkpoint, dict) and "state_dict" in checkpoint: state_dict = checkpoint["state_dict"] else: state_dict = checkpoint if skip_params: if next(iter(state_dict.items()))[0].startswith("module"): state_dict = {k[7:]: v for k, v in state_dict.items()} # for k in state_dict: # if k.startswith('transformer'): # v = state_dict.pop(k) # state_dict['text_branch.' + k[12:]] = v return state_dict def create_model( amodel_name: str, tmodel_name: str, pretrained: str = "", precision: str = "fp32", device: torch.device = torch.device("cpu"), jit: bool = False, force_quick_gelu: bool = False, openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"), skip_params=True, pretrained_audio: str = "", pretrained_text: str = "", enable_fusion: bool = False, fusion_type: str = "None" # pretrained_image: bool = False, ): amodel_name = amodel_name.replace( "/", "-" ) # for callers using old naming with / in ViT names pretrained_orig = pretrained pretrained = pretrained.lower() if pretrained == "openai": if amodel_name in _MODEL_CONFIGS: logging.info(f"Loading {amodel_name} model config.") model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name]) else: logging.error( f"Model config for {amodel_name} not found; available models {list_models()}." ) raise RuntimeError(f"Model config for {amodel_name} not found.") logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.") # Hard Code in model name model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
2
2023-10-29 09:32:19+00:00
8k
KenyonY/flaxkv
flaxkv/core.py
[ { "identifier": "class_measure_time", "path": "flaxkv/decorators.py", "snippet": "def class_measure_time(logger=None, level=logging.INFO, prec=3):\r\n def decorate(func):\r\n \"\"\"Log the runtime of the decorated function.\"\"\"\r\n\r\n @wraps(func)\r\n def wrapper(self, *args, **kwargs):\r\n if not ENABLED_MEASURE_TIME_DECORATOR:\r\n return func(self, *args, **kwargs)\r\n start = time.perf_counter()\r\n value = func(self, *args, **kwargs)\r\n end = time.perf_counter()\r\n cost_time = end - start\r\n time_str = f\"{cost_time:.{int(prec)}E}\"\r\n msg = f\"{func.__name__}:{self._db_manager.db_type}\"\r\n if logger:\r\n show_string = f\"Finished {msg} in {time_str} secs.\"\r\n logger.log(level, show_string)\r\n else:\r\n rgb_cost_time = Text(time_str, style='green')\r\n rgb_msg = Text(f\"{msg}\", style=\"cyan\")\r\n str_tuple = (f\"Finished\", rgb_msg, \"in\", rgb_cost_time, \"secs.\")\r\n print(*str_tuple, sep=' ')\r\n return value\r\n\r\n return wrapper\r\n\r\n return decorate\r" }, { "identifier": "SimpleQueue", "path": "flaxkv/helper.py", "snippet": "class SimpleQueue:\r\n def __init__(self, maxsize: int):\r\n self.q = queue.Queue(maxsize=maxsize)\r\n\r\n def put(self, item):\r\n if not self.q.full():\r\n self.q.put(item)\r\n else:\r\n self.q.get()\r\n self.q.put(item)\r\n\r\n def get(self, block=True, timeout=None):\r\n return self.q.get(block=block, timeout=timeout)\r\n\r\n def empty(self):\r\n return self.q.empty()\r\n\r\n def clear(self):\r\n while not self.empty():\r\n self.get()\r" }, { "identifier": "setting_log", "path": "flaxkv/log.py", "snippet": "def setting_log(level=None, multi_process=True, save_file=True, stdout=True):\r\n \"\"\"\r\n Configures the logging settings for the application.\r\n \"\"\"\r\n if level is None:\r\n # https://loguru.readthedocs.io/en/stable/api/logger.html\r\n # level = \"CRITICAL\"\r\n level = \"INFO\"\r\n save_file = False\r\n\r\n tz = os.environ.get(\"TZ\", \"\").strip()\r\n if tz and hasattr(time, \"tzset\"):\r\n\r\n def get_utc_offset(timezone_str):\r\n timezone = pytz.timezone(timezone_str)\r\n offset_seconds = timezone.utcoffset(datetime.now()).total_seconds()\r\n offset_hours = offset_seconds // 3600\r\n return f\"UTC{-int(offset_hours):+d}\"\r\n\r\n try:\r\n os.environ[\"TZ\"] = get_utc_offset(tz)\r\n except:\r\n pass\r\n time.tzset()\r\n config_handlers = []\r\n if stdout:\r\n config_handlers += [\r\n {\r\n \"sink\": sys.stdout,\r\n \"level\": level,\r\n \"filter\": lambda record: \"flaxkv\" in record[\"extra\"],\r\n },\r\n ]\r\n if save_file:\r\n config_handlers += [\r\n {\r\n \"sink\": f\"./Log/flaxkv.log\",\r\n \"enqueue\": multi_process,\r\n \"rotation\": \"100 MB\",\r\n \"level\": level,\r\n \"filter\": lambda record: \"flaxkv\" in record[\"extra\"],\r\n }\r\n ]\r\n return config_handlers\r" }, { "identifier": "DBManager", "path": "flaxkv/manager.py", "snippet": "class DBManager:\r\n def __init__(\r\n self, db_type: str, root_path_or_url: str, db_name: str, rebuild=False, **kwargs\r\n ):\r\n \"\"\"\r\n Initializes the database manager.\r\n\r\n Args:\r\n db_type (str): Type of the database (\"lmdb\", \"leveldb\", \"remote\").\r\n root_path_or_url (str): Root path or URL of the database.\r\n db_name (str): Name of the database.\r\n rebuild (bool, optional): Whether to create a new database. Defaults to False.\r\n \"\"\"\r\n self.db_type = db_type.lower()\r\n self.db_name = db_name\r\n self._rebuild = rebuild\r\n\r\n url_pattern = re.compile(r'^(http://|https://|ftp://)')\r\n if url_pattern.match(root_path_or_url):\r\n self.db_address = root_path_or_url\r\n self.db_name = f\"{db_name}-{kwargs.get('backend', 'leveldb')}\"\r\n else:\r\n self.db_address = os.path.join(\r\n root_path_or_url, f\"{db_name}-{self.db_type}\"\r\n )\r\n\r\n root_path = Path(root_path_or_url)\r\n if not root_path.exists():\r\n root_path.mkdir(parents=True, exist_ok=True)\r\n\r\n if self._rebuild:\r\n self.destroy()\r\n\r\n self.env = self.connect(**kwargs)\r\n\r\n def connect(self, **kwargs):\r\n \"\"\"\r\n Connects to the specified database.\r\n\r\n Returns:\r\n env: A database environment object based on the specified DB type.\r\n \"\"\"\r\n if self.db_type == \"lmdb\":\r\n import lmdb\r\n\r\n env = lmdb.open(\r\n self.db_address,\r\n max_dbs=kwargs.get('max_dbs', 1),\r\n map_size=kwargs.get('map_size', 2 * 1024**3),\r\n )\r\n\r\n elif self.db_type == \"leveldb\":\r\n import plyvel\r\n\r\n env = plyvel.DB(self.db_address, create_if_missing=True)\r\n\r\n elif self.db_type == \"remote\":\r\n env = RemoteTransaction(\r\n base_url=self.db_address,\r\n db_name=self.db_name,\r\n backend=kwargs.pop(\"backend\", \"leveldb\"),\r\n rebuild=self._rebuild,\r\n timeout=kwargs.pop(\r\n \"timeout\", 10\r\n ), # `timeout` refers to connection timeout\r\n **kwargs,\r\n )\r\n else:\r\n raise ValueError(f\"Unsupported DB type {self.db_type}.\")\r\n return env\r\n\r\n def rmtree(self):\r\n \"\"\"\r\n Deletes the database at the specified path.\r\n \"\"\"\r\n shutil.rmtree(self.db_address, ignore_errors=True)\r\n\r\n def destroy(self):\r\n \"\"\"\r\n Destroys the database by closing and deleting it.\r\n \"\"\"\r\n try:\r\n self.close()\r\n except:\r\n pass\r\n self.rmtree()\r\n logger.info(f\"Destroyed database at {self.db_address}.\")\r\n\r\n def rebuild_db(self):\r\n \"\"\"\r\n Clears the database by closing and recreating it.\r\n \"\"\"\r\n try:\r\n self.close()\r\n except:\r\n pass\r\n self.rmtree()\r\n self.env = self.connect()\r\n\r\n def get_env(self):\r\n \"\"\"\r\n Retrieves the database environment.\r\n\r\n Returns:\r\n env: The database environment object.\r\n \"\"\"\r\n return self.env\r\n\r\n def new_static_view(self):\r\n \"\"\"\r\n Creates a new static view of the database.\r\n\r\n Returns:\r\n static_view: A static view of the database based on the specified DB type.\r\n \"\"\"\r\n if self.db_type == \"lmdb\":\r\n return self.env.begin()\r\n elif self.db_type == \"leveldb\":\r\n return self.env.snapshot()\r\n elif self.db_type == \"remote\":\r\n return self.env\r\n else:\r\n raise ValueError(f\"Unsupported DB type {self.db_type}.\")\r\n\r\n def close_static_view(self, static_view):\r\n \"\"\"\r\n Closes the provided static view of the database.\r\n\r\n Args:\r\n static_view: The static view to be closed.\r\n \"\"\"\r\n if self.db_type == \"lmdb\":\r\n return static_view.abort()\r\n elif self.db_type == \"leveldb\":\r\n return static_view.close()\r\n elif self.db_type == \"remote\":\r\n return static_view.close()\r\n else:\r\n raise ValueError(f\"Unsupported DB type {self.db_type}.\")\r\n\r\n def write(self):\r\n \"\"\"\r\n Initiates a write transaction on the database.\r\n\r\n Returns:\r\n wb: A write transaction object based on the specified DB type.\r\n \"\"\"\r\n if self.db_type == \"lmdb\":\r\n return self.env.begin(write=True)\r\n elif self.db_type == \"leveldb\":\r\n return self.env.write_batch()\r\n elif self.db_type == \"remote\":\r\n return self.env\r\n else:\r\n traceback.print_exc()\r\n raise ValueError(f\"Unsupported DB type {self.db_type}.\")\r\n\r\n # def pull(self):\r\n # if self.db_type == \"lmdb\":\r\n # ...\r\n # elif self.db_type == \"leveldb\":\r\n # ...\r\n # elif self.db_type == \"remote\":\r\n # ...\r\n # else:\r\n # traceback.print_exc()\r\n # raise ValueError(f\"Unsupported DB type {self.db_type}.\")\r\n\r\n def close(self):\r\n \"\"\"\r\n Closes the database connection.\r\n \"\"\"\r\n if self.db_type == \"lmdb\":\r\n return self.env.close()\r\n elif self.db_type == \"leveldb\":\r\n return self.env.close()\r\n elif self.db_type == \"remote\":\r\n return self.env.close()\r\n else:\r\n raise ValueError(f\"Unsupported DB type to {self.db_type}.\")\r" }, { "identifier": "check_pandas_type", "path": "flaxkv/pack.py", "snippet": "class NPArray(msgspec.Struct, gc=False, array_like=True):\r\n def check_pandas_type(obj):\r\n def check_pandas_type(obj):\r\ndef encode_hook(obj):\r\ndef ext_hook(type, data: memoryview):\r\ndef decode_key(value):\r" } ]
import atexit import threading import time import traceback import numpy as np from abc import ABC, abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar from loguru import logger from .decorators import class_measure_time from .helper import SimpleQueue from .log import setting_log from .manager import DBManager from .pack import check_pandas_type, decode, decode_key, encode from httpx import Response from litestar.exceptions import HTTPException
4,321
""" Removes the key-value pair and returns the value. Args: key: The key to pop. default: The default value to return if the key does not exist. Returns: value: The value associated with the key, or the default value. """ if key in self: with self._buffer_lock: self.delete_buffer_set.add(key) self._buffered_count += 1 self._last_set_time = time.time() if key in self.buffer_dict: value = self.buffer_dict.pop(key) if self._raw: return decode(value) else: return value else: if self._cache_all_db: value = self._cache_dict.pop(key) else: key = self._encode_key(key) value = decode(self._static_view.get(key)) return value else: return default def __contains__(self, key): """ Checks if a key exists in the buffer or database. Args: key: The key to check. Returns: bool: True if the key exists, False otherwise. """ with self._buffer_lock: if key in self.buffer_dict: return True if key in self.delete_buffer_set: return False if self._cache_all_db: return key in self._cache_dict key = self._encode_key(key) return ( self._static_view.get(key) is not None ) # self._static_view.get() return a binary value or None def clear(self, wait=True): """ Clears the database and resets the buffer. """ self.close(write=False, wait=wait) self._db_manager.rebuild_db() self._init() def destroy(self): """ Destroys the database by closing and deleting it. """ self.close(write=False) self._unregister_auto_close() self._db_manager.destroy() self._logger.info(f"Destroyed database successfully.") def __del__(self): """ Destructor for the BaseDBDict class. Closes the database before object deletion. """ self.close(write=True) def __repr__(self): return str(self.db_dict()) def __len__(self): return self.stat()['count'] def close(self, write=True, wait=False): """ Closes the database and stops the background worker. Args: write (bool, optional): Whether to write the buffer to the database before closing. Defaults to True. wait (bool, optional): Whether to wait for the background worker to finish. Defaults to False. """ self._close_background_worker(write=write, block=wait) self._db_manager.close_static_view(self._static_view) self._db_manager.close() self._logger.info(f"Closed ({self._db_manager.db_type.upper()}) successfully") def _get_status_info( self, return_key=False, return_value=False, return_buffer_dict=False, return_view=True, decode_raw=True, ): static_view = None buffer_keys_set, buffer_values_list = None, None # shallow copy buffer data with self._buffer_lock: if return_view: static_view = self._db_manager.new_static_view() buffer_dict = self.buffer_dict.copy() delete_buffer_set = self.delete_buffer_set.copy() if self._raw and decode_raw:
# Copyright (c) 2023 K.Y. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations if TYPE_CHECKING: class BaseDBDict(ABC): MAX_BUFFER_SIZE = 100 # unit: number of keys COMMIT_TIME_INTERVAL = 10 * 60 # unit: second _logger = logger # Unused @dataclass class _enc_prefix: str = b's' int = b'i' float = b'f' bool = b'b' list = b'l' tuple = b't' dict = b'd' array = b'a' def __init__( self, db_type, root_path_or_url: str, db_name: str, rebuild=False, raw=False, cache=False, **kwargs, ): """ Initializes the BaseDBDict class which provides a dictionary-like interface to a database. Args: db_type (str): Type of the database ("lmdb" or "leveldb" or "remote"). root_path_or_url (str): Root path or URL of the database. rebuild (bool, optional): Whether to recreate the database. Defaults to False. raw (bool): Only used by the server. """ log_level = kwargs.pop('log', None) if log_level: log_configs = setting_log( level="DEBUG" if log_level is True else log_level, stdout=kwargs.pop("stdout", False), save_file=kwargs.pop('save_log', False), ) log_ids = [logger.add(**log_conf) for log_conf in log_configs] self._logger = logger.bind(flaxkv=True) else: logger.disable('flaxkv') self._db_manager = DBManager( db_type=db_type, root_path_or_url=root_path_or_url, db_name=db_name, rebuild=rebuild, **kwargs, ) self._db_name = self._db_manager.db_name self._raw = raw self._cache_all_db = cache self._register_auto_close() self._init() def _init(self): self._static_view = self._db_manager.new_static_view() self.buffer_dict = {} self.delete_buffer_set = set() self._buffered_count = 0 self._buffer_lock = threading.Lock() self._stop_event = threading.Event() self._last_set_time = None self._write_complete = SimpleQueue(maxsize=1) self._write_event = threading.Event() self._latest_write_num = 0 self._write_queue = SimpleQueue(maxsize=1) self._thread_running = True self._thread = threading.Thread(target=self._background_worker) self._thread.daemon = True self._thread_write_monitor = threading.Thread(target=self._write_monitor) self._thread_write_monitor.daemon = True # Start the background worker self._start() self._cache_dict = {} # DB data that marked_delete has been deleted if self._cache_all_db: # load from db self._pull_db_data_to_cache() def _register_auto_close(self, func=None): if func is None: atexit.register(self.close) else: atexit.register(func) def _unregister_auto_close(self, func=None): if func is None: atexit.unregister(self.close) else: atexit.unregister(func) def _start(self): """ Starts the background worker thread. """ self._thread_running = True self._thread.start() self._thread_write_monitor.start() @staticmethod def _diff_buffer(a: dict, b: dict): """ Computes the difference between two buffers. Find a dictionary containing key-value pairs that exist in `a` but not in `b`. Args: a (dict): The latest buffer. b (dict): The older buffer. Q: Why don't you need to worry about the key-value pair existing in 'b' but not in 'a'? A: Because the absence of the key in 'a' indicates that the value has been deleted by the user, and this information will be stored in the 'deleted_set'. Returns: dict: A dictionary containing key-value pairs that exist in `a` but not in `b`. """ result = {} for key, value in a.items(): if key not in b: result[key] = value else: if type(value) is not type(b[key]): continue if isinstance(value, np.ndarray): if not np.array_equal(value, b[key]): result[key] = value elif check_pandas_type(value): if not value.equals(b[key]): result[key] = value else: if value != b[key]: result[key] = value return result def _write_monitor(self): self._logger.info("Write monitor started") while not self._stop_event.is_set(): time.sleep(0.2) if self._last_set_time is not None: if (time.time() - self._last_set_time) >= 0.6: self._logger.debug("Write monitor triggered") self.write_immediately() def _background_worker(self): """ Background worker function to periodically write buffer to the database. """ while self._thread_running or not self._write_queue.empty(): self._write_event.wait(timeout=self.COMMIT_TIME_INTERVAL) self._write_event.clear() if not self._write_queue.empty(): is_write = self._write_queue.get() if is_write is False: self._write_complete.put(True) break self._write_complete.clear() try: self._write_buffer_to_db(current_write_num=self._latest_write_num) except: # todo: self._logger.warning(f"Write buffer to db failed. error") traceback.print_exc() self._write_complete.put(True) def write_immediately(self, write=True, block=False): """ Triggers an immediate write of the buffer to the database. """ self._last_set_time = None self._latest_write_num += 1 self._write_queue.put(write) self._write_event.set() if block: self._write_complete.clear() self._write_complete.get(block=True) def wait_until_write_complete(self, timeout=None): """ Waits until the background worker thread has finished writing the buffer to the database. """ self._write_complete.get(block=True, timeout=timeout) def _close_background_worker(self, write=True, block=False): """ Stops the background worker thread. """ self._stop_event.set() self._latest_write_num += 1 self._thread_running = False self.write_immediately(write=write, block=block) self._thread.join(timeout=15) self._thread_write_monitor.join(timeout=3) if self._thread.is_alive(): self._logger.warning( "Warning: Background thread did not finish in time. Some data might not be saved." ) def _encode_key(self, key): if self._raw: return key else: return encode(key) def _encode_value(self, value): if self._raw: return value else: return encode(value) def get(self, key: Any, default=None): """ Retrieves the value associated with the given key. Args: key (Any): The key to retrieve. default: The default value to set if the key does not exist. Returns: value: The value associated with the key, or None if the key is not found. """ with self._buffer_lock: if key in self.delete_buffer_set: return default if key in self.buffer_dict: return self.buffer_dict[key] if self._cache_all_db: return self._cache_dict.get(key, default) key = self._encode_key(key) value = self._static_view.get(key) if value is None: return default return value if self._raw else decode(value) def get_db_value(self, key: str): """ Directly retrieves the encoded value associated with the given key from the database. Args: key (str): The key to retrieve. Returns: value: The encoded value associated with the key. """ key = self._encode_key(key) return self._static_view.get(key) def get_batch(self, keys): """ Retrieves values for a batch of keys. Args: keys (list): A list of keys to retrieve. Returns: list: A list of values corresponding to the given keys. """ values = [] for key in keys: if self.delete_buffer_set and key in self.delete_buffer_set: values.append(None) continue if key in self.buffer_dict: values.append(self.buffer_dict[key]) continue if self._cache_all_db: value = self._cache_dict.get(key) else: key = self._encode_key(key) value = self._static_view.get(key) if value is not None: value = decode(value) values.append(value) return values def _set(self, key, value): """ Sets the value for a given key in the buffer. Args: key: The key to set. value: The value to associate with the key. """ with self._buffer_lock: self.buffer_dict[key] = value self.delete_buffer_set.discard(key) self._buffered_count += 1 self._last_set_time = time.time() # Trigger immediate write if buffer size exceeds MAX_BUFFER_SIZE if self._buffered_count >= self.MAX_BUFFER_SIZE: self._logger.debug("Trigger immediate write") self._buffered_count = 0 self.write_immediately() def setdefault(self, key, default=None): """ Retrieves the value for a given key. If the key does not exist, sets it to the default value. Args: key (Any): The key to retrieve. default: The default value to set if the key does not exist. Returns: value: The value associated with the key. """ value = self.get(key) if value is None: self._set(key, default) return default return value def update(self, d: dict): """ Updates the buffer with the given dictionary. Args: d (dict): A dictionary of key-value pairs to update. """ if not isinstance(d, dict): raise ValueError("Input must be a dictionary.") with self._buffer_lock: for key, value in d.items(): if self._raw: key, value = encode(key), encode(value) self.buffer_dict[key] = value self.delete_buffer_set.discard(key) self._buffered_count += 1 self._last_set_time = time.time() # Trigger immediate write if buffer size exceeds MAX_BUFFER_SIZE if self._buffered_count >= self.MAX_BUFFER_SIZE: self._logger.debug("Trigger immediate write") self._buffered_count = 0 self.write_immediately() # @class_measure_time() def _write_buffer_to_db( self, current_write_num: int, ): """ Writes the current buffer to the database. Args: current_write_num (int): The current write operation number. """ with self._buffer_lock: self._logger.debug(f"Trigger write") self._logger.debug(f"{current_write_num=}") if not (self.buffer_dict or self.delete_buffer_set): self._logger.debug( f"buffer is empty and delete_buffer_set is empty: {self._latest_write_num=} {current_write_num=}" ) return else: # ensure atomicity (shallow copy) buffer_dict_snapshot = self.buffer_dict.copy() delete_buffer_set_snapshot = self.delete_buffer_set.copy() cache_dict = self._cache_dict.copy() # ensure atomicity with self._db_manager.write() as wb: try: for key in delete_buffer_set_snapshot: # delete from db key = self._encode_key(key) wb.delete(key) for key, value in buffer_dict_snapshot.items(): # set key, value to cache if self._cache_all_db: cache_dict[key] = value # set key, value to db key, value = self._encode_key(key), self._encode_value(value) wb.put(key, value) except Exception as e: traceback.print_exc() self._logger.error( f"Error writing to {self._db_manager.db_type}: {e}\n" f"data will rollback" ) raise with self._buffer_lock: self.delete_buffer_set = self.delete_buffer_set - delete_buffer_set_snapshot self.buffer_dict = self._diff_buffer(self.buffer_dict, buffer_dict_snapshot) self._cache_dict = cache_dict self._db_manager.close_static_view(self._static_view) self._static_view = self._db_manager.new_static_view() self._logger.info( f"write {self._db_manager.db_type.upper()} buffer to db successfully! " f"current_num={current_write_num} latest_num={self._latest_write_num}" ) def __iter__(self): """ Returns an iterator over the keys. """ return self.keys() def __getitem__(self, key): """ Retrieves the value for a given key using the dictionary access syntax. Args: key: The key to retrieve. Returns: value: The value associated with the key. """ value = self.get(key, b'iamnone') if isinstance(value, bytes) and value == b'iamnone': raise KeyError(f"Key `{key}` not found in the database.") return value def __setitem__(self, key, value): """ Sets the value for a given key using the dictionary access syntax. Args: key: The key to set. value: The value to associate with the key. """ self._set(key, value) def __delitem__(self, key): """ Deletes a key-value pair using the dictionary access syntax. Args: key: The key to delete. """ if key in self: with self._buffer_lock: self.delete_buffer_set.add(key) self._buffered_count += 1 self._last_set_time = time.time() if key in self.buffer_dict: del self.buffer_dict[key] return else: if self._cache_all_db: self._cache_dict.pop(key) else: raise KeyError(f"Key `{key}` not found in the database.") def pop(self, key, default=None): """ Removes the key-value pair and returns the value. Args: key: The key to pop. default: The default value to return if the key does not exist. Returns: value: The value associated with the key, or the default value. """ if key in self: with self._buffer_lock: self.delete_buffer_set.add(key) self._buffered_count += 1 self._last_set_time = time.time() if key in self.buffer_dict: value = self.buffer_dict.pop(key) if self._raw: return decode(value) else: return value else: if self._cache_all_db: value = self._cache_dict.pop(key) else: key = self._encode_key(key) value = decode(self._static_view.get(key)) return value else: return default def __contains__(self, key): """ Checks if a key exists in the buffer or database. Args: key: The key to check. Returns: bool: True if the key exists, False otherwise. """ with self._buffer_lock: if key in self.buffer_dict: return True if key in self.delete_buffer_set: return False if self._cache_all_db: return key in self._cache_dict key = self._encode_key(key) return ( self._static_view.get(key) is not None ) # self._static_view.get() return a binary value or None def clear(self, wait=True): """ Clears the database and resets the buffer. """ self.close(write=False, wait=wait) self._db_manager.rebuild_db() self._init() def destroy(self): """ Destroys the database by closing and deleting it. """ self.close(write=False) self._unregister_auto_close() self._db_manager.destroy() self._logger.info(f"Destroyed database successfully.") def __del__(self): """ Destructor for the BaseDBDict class. Closes the database before object deletion. """ self.close(write=True) def __repr__(self): return str(self.db_dict()) def __len__(self): return self.stat()['count'] def close(self, write=True, wait=False): """ Closes the database and stops the background worker. Args: write (bool, optional): Whether to write the buffer to the database before closing. Defaults to True. wait (bool, optional): Whether to wait for the background worker to finish. Defaults to False. """ self._close_background_worker(write=write, block=wait) self._db_manager.close_static_view(self._static_view) self._db_manager.close() self._logger.info(f"Closed ({self._db_manager.db_type.upper()}) successfully") def _get_status_info( self, return_key=False, return_value=False, return_buffer_dict=False, return_view=True, decode_raw=True, ): static_view = None buffer_keys_set, buffer_values_list = None, None # shallow copy buffer data with self._buffer_lock: if return_view: static_view = self._db_manager.new_static_view() buffer_dict = self.buffer_dict.copy() delete_buffer_set = self.delete_buffer_set.copy() if self._raw and decode_raw:
delete_buffer_set = {decode_key(i) for i in delete_buffer_set}
4
2023-10-27 15:53:02+00:00
8k
hugoycj/light-hloc
lighthloc/matchers/lightglue.py
[ { "identifier": "BaseModel", "path": "lighthloc/utils/base_model.py", "snippet": "class BaseModel(nn.Module, metaclass=ABCMeta):\n default_conf = {}\n required_inputs = []\n\n def __init__(self, conf):\n \"\"\"Perform some logic and call the _init method of the child model.\"\"\"\n super().__init__()\n self.conf = conf = {**self.default_conf, **conf}\n self.required_inputs = copy(self.required_inputs)\n self._init(conf)\n sys.stdout.flush()\n\n def forward(self, data):\n \"\"\"Check the data and call the _forward method of the child model.\"\"\"\n for key in self.required_inputs:\n assert key in data, 'Missing key {} in data'.format(key)\n return self._forward(data)\n\n @abstractmethod\n def _init(self, conf):\n \"\"\"To be implemented by the child class.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def _forward(self, data):\n \"\"\"To be implemented by the child class.\"\"\"\n raise NotImplementedError" }, { "identifier": "LightGlue", "path": "lighthloc/matchers/modules/lightglue/lightglue.py", "snippet": "class LightGlue(nn.Module):\n default_conf = {\n \"name\": \"lightglue\", # just for interfacing\n \"input_dim\": 256, # input descriptor dimension (autoselected from weights)\n \"descriptor_dim\": 256,\n \"n_layers\": 9,\n \"num_heads\": 4,\n \"flash\": True, # enable FlashAttention if available.\n \"mp\": False, # enable mixed precision\n \"depth_confidence\": 0.95, # early stopping, disable with -1\n \"width_confidence\": 0.99, # point pruning, disable with -1\n \"filter_threshold\": 0.1, # match threshold\n \"weights\": None,\n }\n\n # Point pruning involves an overhead (gather).\n # Therefore, we only activate it if there are enough keypoints.\n pruning_keypoint_thresholds = {\n \"cpu\": -1,\n \"mps\": -1,\n \"cuda\": 1024,\n \"flash\": 1536,\n }\n\n required_data_keys = [\"image0\", \"image1\"]\n\n version = \"v0.1_arxiv\"\n url = \"https://github.com/cvg/LightGlue/releases/download/{}/{}_lightglue.pth\"\n\n features = {\n \"superpoint\": (\"superpoint_lightglue\", 256),\n \"disk\": (\"disk_lightglue\", 128),\n }\n\n def __init__(self, features=\"superpoint\", **conf) -> None:\n super().__init__()\n self.conf = {**self.default_conf, **conf}\n if features is not None:\n assert features in list(self.features.keys())\n self.conf[\"weights\"], self.conf[\"input_dim\"] = self.features[features]\n self.conf = conf = SimpleNamespace(**self.conf)\n\n if conf.input_dim != conf.descriptor_dim:\n self.input_proj = nn.Linear(conf.input_dim, conf.descriptor_dim, bias=True)\n else:\n self.input_proj = nn.Identity()\n\n head_dim = conf.descriptor_dim // conf.num_heads\n self.posenc = LearnableFourierPositionalEncoding(2, head_dim, head_dim)\n\n h, n, d = conf.num_heads, conf.n_layers, conf.descriptor_dim\n\n self.transformers = nn.ModuleList(\n [TransformerLayer(d, h, conf.flash) for _ in range(n)]\n )\n\n self.log_assignment = nn.ModuleList([MatchAssignment(d) for _ in range(n)])\n self.token_confidence = nn.ModuleList(\n [TokenConfidence(d) for _ in range(n - 1)]\n )\n self.register_buffer(\n \"confidence_thresholds\",\n torch.Tensor(\n [self.confidence_threshold(i) for i in range(self.conf.n_layers)]\n ),\n )\n\n state_dict = None\n if features is not None:\n fname = f\"{conf.weights}_{self.version}.pth\".replace(\".\", \"-\")\n state_dict = torch.hub.load_state_dict_from_url(\n self.url.format(self.version, features), file_name=fname\n )\n self.load_state_dict(state_dict, strict=False)\n elif conf.weights is not None:\n path = Path(__file__).parent\n path = path / \"weights/{}.pth\".format(self.conf.weights)\n state_dict = torch.load(str(path), map_location=\"cpu\")\n\n if state_dict:\n # rename old state dict entries\n for i in range(self.conf.n_layers):\n pattern = f\"self_attn.{i}\", f\"transformers.{i}.self_attn\"\n state_dict = {k.replace(*pattern): v for k, v in state_dict.items()}\n pattern = f\"cross_attn.{i}\", f\"transformers.{i}.cross_attn\"\n state_dict = {k.replace(*pattern): v for k, v in state_dict.items()}\n self.load_state_dict(state_dict, strict=False)\n\n # static lengths LightGlue is compiled for (only used with torch.compile)\n self.static_lengths = None\n\n def compile(\n self, mode=\"reduce-overhead\", static_lengths=[256, 512, 768, 1024, 1280, 1536]\n ):\n if self.conf.width_confidence != -1:\n warnings.warn(\n \"Point pruning is partially disabled for compiled forward.\",\n stacklevel=2,\n )\n\n for i in range(self.conf.n_layers):\n self.transformers[i].masked_forward = torch.compile(\n self.transformers[i].masked_forward, mode=mode, fullgraph=True\n )\n\n self.static_lengths = static_lengths\n\n def forward(self, data: dict) -> dict:\n \"\"\"\n Match keypoints and descriptors between two images\n\n Input (dict):\n image0: dict\n keypoints: [B x M x 2]\n descriptors: [B x M x D]\n image: [B x C x H x W] or image_size: [B x 2]\n image1: dict\n keypoints: [B x N x 2]\n descriptors: [B x N x D]\n image: [B x C x H x W] or image_size: [B x 2]\n Output (dict):\n log_assignment: [B x M+1 x N+1]\n matches0: [B x M]\n matching_scores0: [B x M]\n matches1: [B x N]\n matching_scores1: [B x N]\n matches: List[[Si x 2]], scores: List[[Si]]\n \"\"\"\n if self.conf.mp:\n with torch.cuda.amp.autocast():\n return self._forward(data)\n else:\n return self._forward(data)\n\n def _forward(self, data: dict) -> dict:\n for key in self.required_data_keys:\n assert key in data, f\"Missing key {key} in data\"\n data0, data1 = data[\"image0\"], data[\"image1\"]\n kpts0, kpts1 = data0[\"keypoints\"], data1[\"keypoints\"]\n b, m, _ = kpts0.shape\n b, n, _ = kpts1.shape\n device = kpts0.device\n size0, size1 = data0.get(\"image_size\"), data1.get(\"image_size\")\n kpts0 = normalize_keypoints(kpts0, size0).clone()\n kpts1 = normalize_keypoints(kpts1, size1).clone()\n\n desc0 = data0[\"descriptors\"].detach().contiguous()\n desc1 = data1[\"descriptors\"].detach().contiguous()\n\n assert desc0.shape[-1] == self.conf.input_dim\n assert desc1.shape[-1] == self.conf.input_dim\n\n if torch.is_autocast_enabled():\n desc0 = desc0.half()\n desc1 = desc1.half()\n\n mask0, mask1 = None, None\n c = max(m, n)\n do_compile = self.static_lengths and c <= max(self.static_lengths)\n if do_compile:\n kn = min([k for k in self.static_lengths if k >= c])\n desc0, mask0 = pad_to_length(desc0, kn)\n desc1, mask1 = pad_to_length(desc1, kn)\n kpts0, _ = pad_to_length(kpts0, kn)\n kpts1, _ = pad_to_length(kpts1, kn)\n desc0 = self.input_proj(desc0)\n desc1 = self.input_proj(desc1)\n # cache positional embeddings\n encoding0 = self.posenc(kpts0)\n encoding1 = self.posenc(kpts1)\n\n # GNN + final_proj + assignment\n do_early_stop = self.conf.depth_confidence > 0\n do_point_pruning = self.conf.width_confidence > 0 and not do_compile\n pruning_th = self.pruning_min_kpts(device)\n if do_point_pruning:\n ind0 = torch.arange(0, m, device=device)[None]\n ind1 = torch.arange(0, n, device=device)[None]\n # We store the index of the layer at which pruning is detected.\n prune0 = torch.ones_like(ind0)\n prune1 = torch.ones_like(ind1)\n token0, token1 = None, None\n for i in range(self.conf.n_layers):\n desc0, desc1 = self.transformers[i](\n desc0, desc1, encoding0, encoding1, mask0=mask0, mask1=mask1\n )\n if i == self.conf.n_layers - 1:\n continue # no early stopping or adaptive width at last layer\n\n if do_early_stop:\n token0, token1 = self.token_confidence[i](desc0, desc1)\n if self.check_if_stop(token0[..., :m, :], token1[..., :n, :], i, m + n):\n break\n if do_point_pruning and desc0.shape[-2] > pruning_th:\n scores0 = self.log_assignment[i].get_matchability(desc0)\n prunemask0 = self.get_pruning_mask(token0, scores0, i)\n keep0 = torch.where(prunemask0)[1]\n ind0 = ind0.index_select(1, keep0)\n desc0 = desc0.index_select(1, keep0)\n encoding0 = encoding0.index_select(-2, keep0)\n prune0[:, ind0] += 1\n if do_point_pruning and desc1.shape[-2] > pruning_th:\n scores1 = self.log_assignment[i].get_matchability(desc1)\n prunemask1 = self.get_pruning_mask(token1, scores1, i)\n keep1 = torch.where(prunemask1)[1]\n ind1 = ind1.index_select(1, keep1)\n desc1 = desc1.index_select(1, keep1)\n encoding1 = encoding1.index_select(-2, keep1)\n prune1[:, ind1] += 1\n\n desc0, desc1 = desc0[..., :m, :], desc1[..., :n, :]\n scores, _ = self.log_assignment[i](desc0, desc1)\n m0, m1, mscores0, mscores1 = filter_matches(scores, self.conf.filter_threshold)\n matches, mscores = [], []\n for k in range(b):\n valid = m0[k] > -1\n m_indices_0 = torch.where(valid)[0]\n m_indices_1 = m0[k][valid]\n if do_point_pruning:\n m_indices_0 = ind0[k, m_indices_0]\n m_indices_1 = ind1[k, m_indices_1]\n matches.append(torch.stack([m_indices_0, m_indices_1], -1))\n mscores.append(mscores0[k][valid])\n\n # TODO: Remove when hloc switches to the compact format.\n if do_point_pruning:\n m0_ = torch.full((b, m), -1, device=m0.device, dtype=m0.dtype)\n m1_ = torch.full((b, n), -1, device=m1.device, dtype=m1.dtype)\n m0_[:, ind0] = torch.where(m0 == -1, -1, ind1.gather(1, m0.clamp(min=0)))\n m1_[:, ind1] = torch.where(m1 == -1, -1, ind0.gather(1, m1.clamp(min=0)))\n mscores0_ = torch.zeros((b, m), device=mscores0.device)\n mscores1_ = torch.zeros((b, n), device=mscores1.device)\n mscores0_[:, ind0] = mscores0\n mscores1_[:, ind1] = mscores1\n m0, m1, mscores0, mscores1 = m0_, m1_, mscores0_, mscores1_\n else:\n prune0 = torch.ones_like(mscores0) * self.conf.n_layers\n prune1 = torch.ones_like(mscores1) * self.conf.n_layers\n\n pred = {\n \"matches0\": m0,\n \"matches1\": m1,\n \"matching_scores0\": mscores0,\n \"matching_scores1\": mscores1,\n \"stop\": i + 1,\n \"matches\": matches,\n \"scores\": mscores,\n \"prune0\": prune0,\n \"prune1\": prune1,\n }\n\n return pred\n\n def confidence_threshold(self, layer_index: int) -> float:\n \"\"\"scaled confidence threshold\"\"\"\n threshold = 0.8 + 0.1 * np.exp(-4.0 * layer_index / self.conf.n_layers)\n return np.clip(threshold, 0, 1)\n\n def get_pruning_mask(\n self, confidences: torch.Tensor, scores: torch.Tensor, layer_index: int\n ) -> torch.Tensor:\n \"\"\"mask points which should be removed\"\"\"\n keep = scores > (1 - self.conf.width_confidence)\n if confidences is not None: # Low-confidence points are never pruned.\n keep |= confidences <= self.confidence_thresholds[layer_index]\n return keep\n\n def check_if_stop(\n self,\n confidences0: torch.Tensor,\n confidences1: torch.Tensor,\n layer_index: int,\n num_points: int,\n ) -> torch.Tensor:\n \"\"\"evaluate stopping condition\"\"\"\n confidences = torch.cat([confidences0, confidences1], -1)\n threshold = self.confidence_thresholds[layer_index]\n ratio_confident = 1.0 - (confidences < threshold).float().sum() / num_points\n return ratio_confident > self.conf.depth_confidence\n\n def pruning_min_kpts(self, device: torch.device):\n if self.conf.flash and FLASH_AVAILABLE and device.type == \"cuda\":\n return self.pruning_keypoint_thresholds[\"flash\"]\n else:\n return self.pruning_keypoint_thresholds[device.type]" } ]
from ..utils.base_model import BaseModel from .modules.lightglue import LightGlue as LightGlue_
3,692
class LightGlue(BaseModel): default_conf = { 'features': 'superpoint', 'depth_confidence': 0.95, 'width_confidence': 0.99, } required_inputs = [ 'image0', 'keypoints0', 'descriptors0', 'image1', 'keypoints1', 'descriptors1', ] def _init(self, conf):
class LightGlue(BaseModel): default_conf = { 'features': 'superpoint', 'depth_confidence': 0.95, 'width_confidence': 0.99, } required_inputs = [ 'image0', 'keypoints0', 'descriptors0', 'image1', 'keypoints1', 'descriptors1', ] def _init(self, conf):
self.net = LightGlue_(conf.pop('features'), **conf)
0
2023-10-27 01:20:50+00:00
8k
KUNLP/XAI_EvidenceExtraction
src/model/main_function_rnn.py
[ { "identifier": "load_examples", "path": "src/functions/utils.py", "snippet": "def load_examples(args, tokenizer, evaluate=False, output_examples=False, do_predict=False, input_dict=None):\r\n '''\r\n\r\n :param args: 하이퍼 파라미터\r\n :param tokenizer: tokenization에 사용되는 tokenizer\r\n :param evaluate: 평가나 open test시, True\r\n :param output_examples: 평가나 open test 시, True / True 일 경우, examples와 features를 같이 return\r\n :param do_predict: open test시, True\r\n :param input_dict: open test시 입력되는 문서와 질문으로 이루어진 dictionary\r\n :return:\r\n examples : max_length 상관 없이, 원문으로 각 데이터를 저장한 리스트\r\n features : max_length에 따라 분할 및 tokenize된 원문 리스트\r\n dataset : max_length에 따라 분할 및 학습에 직접적으로 사용되는 tensor 형태로 변환된 입력 ids\r\n '''\r\n input_dir = args.data_dir\r\n print(\"Creating features from dataset file at {}\".format(input_dir))\r\n\r\n # processor 선언\r\n processor = SquadV1Processor()\r\n\r\n # open test 시\r\n if do_predict:\r\n examples = processor.get_example_from_input(input_dict)\r\n # 평가 시\r\n elif evaluate:\r\n examples = processor.get_dev_examples(os.path.join(args.data_dir),\r\n filename=args.predict_file, tokenizer=tokenizer)\r\n # 학습 시\r\n else:\r\n examples = processor.get_train_examples(os.path.join(args.data_dir),\r\n filename=args.train_file, tokenizer=tokenizer)\r\n examples, features = squad_convert_examples_to_features(\r\n examples=examples,\r\n tokenizer=tokenizer,\r\n max_seq_length=args.max_seq_length,\r\n doc_stride=args.doc_stride,\r\n max_query_length=args.max_query_length,\r\n is_training=not evaluate,\r\n return_dataset=\"pt\",\r\n threads=args.threads,\r\n )\r\n\r\n if output_examples:\r\n return examples, features\r\n return features\r" }, { "identifier": "set_seed", "path": "src/functions/utils.py", "snippet": "def set_seed(args):\r\n random.seed(args.seed)\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n if not args.no_cuda and torch.cuda.is_available():\r\n torch.cuda.manual_seed_all(args.seed)\r" }, { "identifier": "to_list", "path": "src/functions/utils.py", "snippet": "def to_list(tensor):\r\n return tensor.detach().cpu().tolist()\r" }, { "identifier": "load_input_data", "path": "src/functions/utils.py", "snippet": "def load_input_data(args, tokenizer, question, context):\r\n processor = SquadV1Processor()\r\n example = [processor.example_from_input(question, context)]\r\n features, dataset = squad_convert_examples_to_features(\r\n examples=example,\r\n tokenizer=tokenizer,\r\n max_seq_length=args.max_seq_length,\r\n doc_stride=args.doc_stride,\r\n max_query_length=args.max_query_length,\r\n is_training=False,\r\n return_dataset=\"pt\",\r\n threads=args.threads,\r\n )\r\n return dataset, example, features" }, { "identifier": "SquadResult", "path": "src/functions/processor_sent.py", "snippet": "class SquadResult(object):\r\n \"\"\"\r\n Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.\r\n\r\n Args:\r\n unique_id: The unique identifier corresponding to that example.\r\n start_logits: The logits corresponding to the start of the answer\r\n end_logits: The logits corresponding to the end of the answer\r\n \"\"\"\r\n\r\n def __init__(self, unique_id, start_logits, end_logits, evidence=None, start_top_index=None, end_top_index=None, cls_logits=None):\r\n self.start_logits = start_logits\r\n self.end_logits = end_logits\r\n self.unique_id = unique_id\r\n self.evidence = evidence\r\n if start_top_index:\r\n self.start_top_index = start_top_index\r\n self.end_top_index = end_top_index\r\n self.cls_logits = cls_logits\r" }, { "identifier": "eval_during_train", "path": "src/functions/evaluate_v1_0.py", "snippet": "def eval_during_train(args, global_step):\r\n expected_version = 'KorQuAD_v1.0'\r\n\r\n dataset_file = os.path.join(args.data_dir, args.predict_file)\r\n prediction_file = os.path.join(args.output_dir, 'predictions_{}.json'.format(global_step))\r\n\r\n with open(dataset_file) as dataset_f:\r\n dataset_json = json.load(dataset_f)\r\n\r\n dataset = dataset_json\r\n with open(prediction_file) as prediction_f:\r\n predictions = json.load(prediction_f)\r\n\r\n return evaluate(dataset, predictions)\r" }, { "identifier": "f1_score", "path": "src/functions/evaluate_v1_0.py", "snippet": "def f1_score(prediction, ground_truth):\r\n prediction_tokens = normalize_answer(prediction).split()\r\n ground_truth_tokens = normalize_answer(ground_truth).split()\r\n\r\n # F1 by character\r\n prediction_Char = []\r\n for tok in prediction_tokens:\r\n now = [a for a in tok]\r\n prediction_Char.extend(now)\r\n ground_truth_Char = []\r\n for tok in ground_truth_tokens:\r\n now = [a for a in tok]\r\n ground_truth_Char.extend(now)\r\n common = Counter(prediction_Char) & Counter(ground_truth_Char)\r\n num_same = sum(common.values())\r\n if num_same == 0:\r\n return 0\r\n\r\n precision = 1.0 * num_same / len(prediction_Char)\r\n recall = 1.0 * num_same / len(ground_truth_Char)\r\n f1 = (2 * precision * recall) / (precision + recall)\r\n\r\n return f1\r" }, { "identifier": "eval", "path": "src/functions/hotpotqa_metric.py", "snippet": "def eval(prediction_file, gold_file):\r\n with open(prediction_file) as f:\r\n prediction = json.load(f)\r\n prediction = {\"answer\": prediction, \"sp\": {}}\r\n with open(gold_file) as f:\r\n gold = json.load(f)\r\n\r\n metrics = {'em': 0, 'f1': 0, 'prec': 0, 'recall': 0,\r\n 'sp_em': 0, 'sp_f1': 0, 'sp_prec': 0, 'sp_recall': 0,\r\n 'joint_em': 0, 'joint_f1': 0, 'joint_prec': 0, 'joint_recall': 0}\r\n for dp in gold:\r\n cur_id = dp['_id']\r\n can_eval_joint = True\r\n\r\n if cur_id not in prediction['answer']:\r\n print('missing answer {}'.format(cur_id))\r\n can_eval_joint = False\r\n else:\r\n em, prec, recall = update_answer(\r\n metrics, prediction['answer'][cur_id], dp['answer'])\r\n if cur_id not in prediction['sp']:\r\n #print('missing sp fact {}'.format(cur_id))\r\n can_eval_joint = False\r\n else:\r\n sp_em, sp_prec, sp_recall = update_sp(\r\n metrics, prediction['sp'][cur_id], dp['supporting_facts'])\r\n\r\n if can_eval_joint:\r\n joint_prec = prec * sp_prec\r\n joint_recall = recall * sp_recall\r\n if joint_prec + joint_recall > 0:\r\n joint_f1 = 2 * joint_prec * joint_recall / (joint_prec + joint_recall)\r\n else:\r\n joint_f1 = 0.\r\n joint_em = em * sp_em\r\n\r\n metrics['joint_em'] += joint_em\r\n metrics['joint_f1'] += joint_f1\r\n metrics['joint_prec'] += joint_prec\r\n metrics['joint_recall'] += joint_recall\r\n\r\n N = len(gold)\r\n for k in metrics.keys():\r\n metrics[k] /= N\r\n\r\n print(metrics)\r" }, { "identifier": "compute_predictions_logits", "path": "src/functions/squad_metric.py", "snippet": "def compute_predictions_logits(\r\n all_examples,\r\n all_features,\r\n all_results,\r\n n_best_size,\r\n max_answer_length,\r\n do_lower_case,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n verbose_logging,\r\n version_2_with_negative,\r\n null_score_diff_threshold,\r\n tokenizer,\r\n):\r\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\r\n if output_prediction_file:\r\n logger.info(f\"Writing predictions to: {output_prediction_file}\")\r\n if output_nbest_file:\r\n logger.info(f\"Writing nbest to: {output_nbest_file}\")\r\n if output_null_log_odds_file and version_2_with_negative:\r\n logger.info(f\"Writing null_log_odds to: {output_null_log_odds_file}\")\r\n\r\n example_index_to_features = collections.defaultdict(list)\r\n for features in all_features:\r\n for feature in features:\r\n example_index_to_features[feature.example_index].append(feature)\r\n\r\n unique_id_to_result = {}\r\n for result in all_results:\r\n unique_id_to_result[result.unique_id] = result\r\n\r\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\r\n \"PrelimPrediction\", [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\", \"evidence\"]\r\n )\r\n\r\n all_predictions = collections.OrderedDict()\r\n all_nbest_json = collections.OrderedDict()\r\n scores_diff_json = collections.OrderedDict()\r\n\r\n for (example_index, examples) in enumerate(all_examples):\r\n # examples : 10개의 문서\r\n\r\n features = example_index_to_features[example_index]\r\n prelim_predictions = []\r\n # keep track of the minimum score of null start+end of position 0\r\n score_null = 1000000 # large and positive\r\n min_null_feature_index = 0 # the paragraph slice with min null score\r\n null_start_logit = 0 # the start logit at the slice with min null score\r\n null_end_logit = 0 # the end logit at the slice with min null score\r\n for (feature_index, feature) in enumerate(features):\r\n # 10개 문서에 종속되는 다수의 feature\r\n\r\n result = unique_id_to_result[feature.unique_id]\r\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\r\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\r\n # if we could have irrelevant answers, get the min score of irrelevant\r\n if version_2_with_negative:\r\n feature_null_score = result.start_logits[0] + result.end_logits[0]\r\n if feature_null_score < score_null:\r\n score_null = feature_null_score\r\n min_null_feature_index = feature_index\r\n null_start_logit = result.start_logits[0]\r\n null_end_logit = result.end_logits[0]\r\n for start_index in start_indexes:\r\n for end_index in end_indexes:\r\n # We could hypothetically create invalid predictions, e.g., predict\r\n # that the start of the span is in the question. We throw out all\r\n # invalid predictions.\r\n if start_index >= len(feature.tokens):\r\n continue\r\n if end_index >= len(feature.tokens):\r\n continue\r\n if start_index not in feature.token_to_orig_map:\r\n continue\r\n if end_index not in feature.token_to_orig_map:\r\n continue\r\n if not feature.token_is_max_context.get(start_index, False):\r\n continue\r\n length = end_index-start_index\r\n if length > max_answer_length:\r\n continue\r\n if end_index < start_index:\r\n continue\r\n prelim_predictions.append(\r\n _PrelimPrediction(\r\n feature_index=feature_index,\r\n start_index=start_index,\r\n end_index=end_index,\r\n start_logit=result.start_logits[start_index],\r\n end_logit=result.end_logits[end_index],\r\n evidence=result.evidence,\r\n\r\n )\r\n )\r\n\r\n prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)\r\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\r\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\", \"evidence\"]\r\n )\r\n\r\n seen_predictions = {}\r\n nbest = []\r\n for pred in prelim_predictions:\r\n if len(nbest) >= n_best_size:\r\n break\r\n feature = features[pred.feature_index]\r\n example = examples[feature.example_id]\r\n if pred.start_index > 0: # this is a non-null prediction\r\n tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]\r\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\r\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\r\n orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]\r\n\r\n tok_text = tokenizer.convert_tokens_to_string(tok_tokens)\r\n\r\n # tok_text = \" \".join(tok_tokens)\r\n #\r\n # # De-tokenize WordPieces that have been split off.\r\n # tok_text = tok_text.replace(\" ##\", \"\")\r\n # tok_text = tok_text.replace(\"##\", \"\")\r\n\r\n # Clean whitespace\r\n tok_text = tok_text.strip()\r\n tok_text = \" \".join(tok_text.split())\r\n orig_text = \" \".join(orig_tokens)\r\n\r\n final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)\r\n if final_text in seen_predictions:\r\n continue\r\n\r\n seen_predictions[final_text] = True\r\n else:\r\n final_text = \"\"\r\n seen_predictions[final_text] = True\r\n #[example.doc_sentences[feature.cur_sent_to_orig_sent[e]] if e in feature.cur_sent_to_orig_sent.keys() else None for e in pred.evidence]\r\n evidences = []\r\n for idx, sent_num in enumerate(pred.evidence):\r\n\r\n ex_idx = sent_num // max_answer_length\r\n sent_ids = sent_num % max_answer_length\r\n\r\n cur_feature = features[ex_idx]\r\n cur_example = examples[cur_feature.example_id]\r\n if sent_ids in cur_feature.cur_sent_to_orig_sent.keys():\r\n evidences.append(cur_example.doc_sentences[cur_feature.cur_sent_to_orig_sent[sent_ids]])\r\n\r\n # if pred.qt == 0:\r\n # final_text = 'yes'\r\n # elif pred.qt == 1:\r\n # final_text = 'no'\r\n nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit, evidence=evidences))\r\n # if we didn't include the empty option in the n-best, include it\r\n if version_2_with_negative:\r\n if \"\" not in seen_predictions:\r\n nbest.append(_NbestPrediction(text=\"\", start_logit=null_start_logit, end_logit=null_end_logit))\r\n\r\n # In very rare edge cases we could only have single null prediction.\r\n # So we just create a nonce prediction in this case to avoid failure.\r\n if len(nbest) == 1:\r\n nbest.insert(0, _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\r\n\r\n # In very rare edge cases we could have no valid predictions. So we\r\n # just create a nonce prediction in this case to avoid failure.\r\n if not nbest:\r\n nbest.append(_NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0, evidence=[None, None, None]))\r\n\r\n assert len(nbest) >= 1\r\n\r\n total_scores = []\r\n best_non_null_entry = None\r\n for entry in nbest:\r\n total_scores.append(entry.start_logit + entry.end_logit)\r\n if not best_non_null_entry:\r\n if entry.text:\r\n best_non_null_entry = entry\r\n\r\n probs = _compute_softmax(total_scores)\r\n\r\n nbest_json = []\r\n for (i, entry) in enumerate(nbest):\r\n output = collections.OrderedDict()\r\n output[\"text\"] = entry.text\r\n output[\"probability\"] = probs[i]\r\n output[\"start_logit\"] = entry.start_logit\r\n output[\"end_logit\"] = entry.end_logit\r\n output[\"evidence\"] = entry.evidence\r\n nbest_json.append(output)\r\n\r\n assert len(nbest_json) >= 1\r\n\r\n if not version_2_with_negative:\r\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\r\n\r\n if example.qas_id not in all_nbest_json.keys():\r\n all_nbest_json[example.qas_id] = []\r\n all_nbest_json[example.qas_id] += nbest_json[:2]\r\n\r\n for qas_id in all_predictions.keys():\r\n all_predictions[qas_id] = sorted(all_nbest_json[qas_id], key=lambda x: x[\"start_logit\"] + x[\"end_logit\"], reverse=True)[0][\"text\"]\r\n\r\n if output_prediction_file:\r\n with open(output_prediction_file, \"w\", encoding='utf8') as writer:\r\n json.dump(all_predictions, writer, indent='\\t', ensure_ascii=False)\r\n\r\n if output_nbest_file:\r\n with open(output_nbest_file, \"w\") as writer:\r\n json.dump(all_nbest_json, writer, indent='\\t', ensure_ascii=False)\r\n\r\n if output_null_log_odds_file and version_2_with_negative:\r\n with open(output_null_log_odds_file, \"w\") as writer:\r\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\r\n\r\n return all_predictions\r" }, { "identifier": "restore_prediction", "path": "src/functions/squad_metric.py", "snippet": "def restore_prediction(example, features, results, n_best_size, do_lower_case, verbose_logging, tokenizer):\r\n prelim_predictions = []\r\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\r\n \"PrelimPrediction\", [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"]\r\n )\r\n # keep track of the minimum score of null start+end of position 0\r\n score_null = 1000000 # large and positive\r\n min_null_feature_index = 0 # the paragraph slice with min null score\r\n null_start_logit = 0 # the start logit at the slice with min null score\r\n null_end_logit = 0 # the end logit at the slice with min null score\r\n for (feature_index, feature) in enumerate(features):\r\n # 10개 문서에 종속되는 다수의 feature\r\n\r\n result = results[feature_index]\r\n\r\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\r\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\r\n\r\n # if we could have irrelevant answers, get the min score of irrelevant\r\n feature_null_score = result.start_logits[0] + result.end_logits[0]\r\n if feature_null_score < score_null:\r\n score_null = feature_null_score\r\n min_null_feature_index = feature_index\r\n null_start_logit = result.start_logits[0]\r\n null_end_logit = result.end_logits[0]\r\n\r\n for start_index in start_indexes:\r\n for end_index in end_indexes:\r\n # We could hypothetically create invalid predictions, e.g., predict\r\n # that the start of the span is in the question. We throw out all\r\n # invalid predictions.\r\n if start_index >= len(feature.tokens):\r\n continue\r\n if end_index >= len(feature.tokens):\r\n continue\r\n if start_index not in feature.token_to_orig_map:\r\n continue\r\n if end_index not in feature.token_to_orig_map:\r\n continue\r\n if not feature.token_is_max_context.get(start_index, False):\r\n continue\r\n\r\n if end_index < start_index:\r\n continue\r\n prelim_predictions.append(\r\n _PrelimPrediction(\r\n feature_index=feature_index,\r\n start_index=start_index,\r\n end_index=end_index,\r\n start_logit=result.start_logits[start_index],\r\n end_logit=result.end_logits[end_index],\r\n )\r\n )\r\n\r\n prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)\r\n\r\n\r\n if prelim_predictions:\r\n pred = prelim_predictions[0]\r\n else:\r\n return ''\r\n feature = features[pred.feature_index]\r\n if pred.start_index > 0: # this is a non-null prediction\r\n tok_tokens = feature.tokens[pred.start_index: (pred.end_index + 1)]\r\n tok_text = tokenizer.convert_tokens_to_string(tok_tokens)\r\n tok_text = tok_text.strip()\r\n tok_text = \" \".join(tok_text.split())\r\n\r\n return tok_text\r\n else:\r\n return ''\r" }, { "identifier": "restore_prediction2", "path": "src/functions/squad_metric.py", "snippet": "def restore_prediction2(tokens, results, n_best_size, tokenizer):\r\n prelim_predictions = []\r\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\r\n \"PrelimPrediction\", [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"]\r\n )\r\n\r\n for result in results:\r\n # 10개 문서에 종속되는 다수의 feature\r\n\r\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\r\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\r\n\r\n for start_index in start_indexes:\r\n for end_index in end_indexes:\r\n # We could hypothetically create invalid predictions, e.g., predict\r\n # that the start of the span is in the question. We throw out all\r\n # invalid predictions.\r\n if start_index >= len(tokens):\r\n continue\r\n if end_index >= len(tokens):\r\n continue\r\n if '[SEP]' in tokens[start_index:end_index+1] or '[CLS]' in tokens[start_index:end_index+1]:\r\n continue\r\n if end_index < start_index:\r\n continue\r\n if end_index - start_index > 30:\r\n continue\r\n prelim_predictions.append(\r\n _PrelimPrediction(\r\n feature_index=0,\r\n start_index=start_index,\r\n end_index=end_index,\r\n start_logit=result.start_logits[start_index],\r\n end_logit=result.end_logits[end_index],\r\n )\r\n )\r\n\r\n prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)\r\n\r\n\r\n if prelim_predictions:\r\n pred = prelim_predictions[0]\r\n else:\r\n return ''\r\n\r\n if pred.start_index > 0: # this is a non-null prediction\r\n tok_tokens = tokens[pred.start_index: (pred.end_index + 1)]\r\n tok_text = tokenizer.convert_tokens_to_string(tok_tokens)\r\n tok_text = tok_text.strip()\r\n tok_text = \" \".join(tok_text.split())\r\n\r\n return tok_text\r\n else:\r\n return ''\r" } ]
from torch.nn import functional as F from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from tqdm import tqdm from nltk.translate.bleu_score import sentence_bleu from transformers import ( AdamW, get_linear_schedule_with_warmup ) from src.functions.utils import load_examples, set_seed, to_list, load_input_data from src.functions.processor_sent import SquadResult from src.functions.evaluate_v1_0 import eval_during_train, f1_score from src.functions.hotpotqa_metric import eval from src.functions.squad_metric import ( compute_predictions_logits, restore_prediction, restore_prediction2 ) import os import torch import timeit
5,881
def train(args, model, tokenizer, logger): # 학습에 사용하기 위한 dataset Load examples, features = load_examples(args, tokenizer, evaluate=False, output_examples=True) # optimization 최적화 schedule 을 위한 전체 training step 계산 t_total = len(features) // args.gradient_accumulation_steps * args.num_train_epochs # Layer에 따른 가중치 decay 적용 no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] # optimizer 및 scheduler 선언 optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Training Step logger.info("***** Running training *****") logger.info(" Num examples = %d", len(features)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Train batch size per GPU = %d", args.train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 tr_loss, logging_loss = 0.0, 0.0 # loss buffer 초기화 model.zero_grad()
def train(args, model, tokenizer, logger): # 학습에 사용하기 위한 dataset Load examples, features = load_examples(args, tokenizer, evaluate=False, output_examples=True) # optimization 최적화 schedule 을 위한 전체 training step 계산 t_total = len(features) // args.gradient_accumulation_steps * args.num_train_epochs # Layer에 따른 가중치 decay 적용 no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] # optimizer 및 scheduler 선언 optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Training Step logger.info("***** Running training *****") logger.info(" Num examples = %d", len(features)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Train batch size per GPU = %d", args.train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 tr_loss, logging_loss = 0.0, 0.0 # loss buffer 초기화 model.zero_grad()
set_seed(args)
1
2023-10-25 07:03:47+00:00
8k
vlc-robot/polarnet
polarnet/dataloaders/pcd_keystep_dataset.py
[ { "identifier": "get_assets_dir", "path": "polarnet/utils/utils.py", "snippet": "def get_assets_dir():\n return str(Path(polarnet.__file__).parent / \"assets\")" }, { "identifier": "pad_tensors", "path": "polarnet/utils/ops.py", "snippet": "def pad_tensors(tensors, lens=None, pad=0):\n \"\"\"B x [T, ...] torch tensors\"\"\"\n if lens is None:\n lens = [t.size(0) for t in tensors]\n max_len = max(lens)\n bs = len(tensors)\n hid = list(tensors[0].size()[1:])\n size = [bs, max_len] + hid\n\n dtype = tensors[0].dtype\n output = torch.zeros(*size, dtype=dtype)\n if pad:\n output.data.fill_(pad)\n for i, (t, l) in enumerate(zip(tensors, lens)):\n output.data[i, :l, ...] = t.data\n return output" }, { "identifier": "gen_seq_masks", "path": "polarnet/utils/ops.py", "snippet": "def gen_seq_masks(seq_lens, max_len=None):\n \"\"\"\n Args:\n seq_lens: list or nparray int, shape=(N, )\n Returns:\n masks: nparray, shape=(N, L), padded=0\n \"\"\"\n seq_lens = np.array(seq_lens)\n if max_len is None:\n max_len = max(seq_lens)\n if max_len == 0:\n return np.zeros((len(seq_lens), 0), dtype=np.bool)\n batch_size = len(seq_lens)\n masks = np.arange(max_len).reshape(-1, max_len).repeat(batch_size, 0)\n masks = masks < seq_lens.reshape(-1, 1)\n return masks" }, { "identifier": "KeystepDataset", "path": "polarnet/dataloaders/keystep_dataset.py", "snippet": "class KeystepDataset(Dataset):\n def __init__(\n self,\n data_dir,\n taskvars,\n instr_embed_file=None,\n gripper_channel=False,\n camera_ids=None,\n cameras=(\"left_shoulder\", \"right_shoulder\", \"wrist\"),\n use_instr_embed=\"none\",\n is_training=False,\n in_memory=False,\n only_success=False,\n **kwargs,\n ):\n \"\"\"\n - use_instr_embed:\n 'none': use task_id;\n 'avg': use the average instruction embedding;\n 'last': use the last instruction embedding;\n 'all': use the embedding of all instruction tokens.\n \"\"\"\n self.data_dir = data_dir\n\n if len(taskvars) == 1 and os.path.exists(taskvars[0]):\n with open(taskvars[0]) as file:\n self.taskvars = [taskvar.rstrip() for taskvar in file.readlines()]\n self.taskvars.sort()\n else:\n self.taskvars = taskvars\n\n self.instr_embed_file = instr_embed_file\n self.taskvar_to_id = {x: i for i, x in enumerate(self.taskvars)}\n self.use_instr_embed = use_instr_embed\n self.gripper_channel = gripper_channel\n self.cameras = cameras\n if camera_ids is None:\n self.camera_ids = np.arange(len(self.cameras))\n else:\n self.camera_ids = np.array(camera_ids)\n self.in_memory = in_memory\n self.is_training = is_training\n self.multi_instruction = kwargs.get(\"multi_instruction\", True)\n self.max_demos_per_taskvar = kwargs.get(\"max_demos_per_taskvar\", None)\n self.exclude_overlength_episodes = kwargs.get(\n \"exclude_overlength_episodes\", None\n )\n\n self.memory = {}\n\n self._transform = DataTransform((0.75, 1.25))\n\n self.lmdb_envs, self.lmdb_txns = [], []\n self.episode_ids = []\n for i, taskvar in enumerate(self.taskvars):\n demo_res_file = os.path.join(data_dir, taskvar, \"results.json\")\n if only_success and os.path.exists(demo_res_file):\n demo_results = json.load(open(demo_res_file, \"r\"))\n if not os.path.exists(os.path.join(data_dir, taskvar)):\n self.lmdb_envs.append(None)\n self.lmdb_txns.append(None)\n continue\n lmdb_env = lmdb.open(\n os.path.join(data_dir, taskvar), readonly=True, lock=False\n )\n self.lmdb_envs.append(lmdb_env)\n lmdb_txn = lmdb_env.begin()\n self.lmdb_txns.append(lmdb_txn)\n keys = [\n key.decode(\"ascii\")\n for key in list(lmdb_txn.cursor().iternext(values=False))\n ]\n self.episode_ids.extend(\n [\n (i, key.encode(\"ascii\"))\n for key in keys\n if key.startswith(\"episode\")\n and ((not only_success) or demo_results[key])\n ][: self.max_demos_per_taskvar]\n )\n if self.in_memory:\n self.memory[f\"taskvar{i}\"] = {}\n\n if self.use_instr_embed != \"none\":\n assert self.instr_embed_file is not None\n self.lmdb_instr_env = lmdb.open(\n self.instr_embed_file, readonly=True, lock=False\n )\n self.lmdb_instr_txn = self.lmdb_instr_env.begin()\n if True: # self.in_memory:\n self.memory[\"instr_embeds\"] = {}\n else:\n self.lmdb_instr_env = None\n\n def __exit__(self):\n for lmdb_env in self.lmdb_envs:\n if lmdb_env is not None:\n lmdb_env.close()\n if self.lmdb_instr_env is not None:\n self.lmdb_instr_env.close()\n\n def __len__(self):\n return len(self.episode_ids)\n\n def get_taskvar_episode(self, taskvar_idx, episode_key):\n if self.in_memory:\n mem_key = f\"taskvar{taskvar_idx}\"\n if episode_key in self.memory[mem_key]:\n return self.memory[mem_key][episode_key]\n\n value = self.lmdb_txns[taskvar_idx].get(episode_key)\n value = msgpack.unpackb(value)\n # rgb, pc: (num_steps, num_cameras, height, width, 3)\n value[\"rgb\"] = value[\"rgb\"][:, self.camera_ids]\n value[\"pc\"] = value[\"pc\"][:, self.camera_ids]\n if self.in_memory:\n self.memory[mem_key][episode_key] = value\n return value\n\n def get_taskvar_instr_embeds(self, taskvar):\n instr_embeds = None\n if True: # self.in_memory:\n if taskvar in self.memory[\"instr_embeds\"]:\n instr_embeds = self.memory[\"instr_embeds\"][taskvar]\n\n if instr_embeds is None:\n instr_embeds = self.lmdb_instr_txn.get(taskvar.encode(\"ascii\"))\n instr_embeds = msgpack.unpackb(instr_embeds)\n instr_embeds = [torch.from_numpy(x).float() for x in instr_embeds]\n if self.in_memory:\n self.memory[\"instr_embeds\"][taskvar] = instr_embeds\n\n # randomly select one instruction for the taskvar\n if self.multi_instruction:\n ridx = np.random.randint(len(instr_embeds))\n else:\n ridx = 0\n instr_embeds = instr_embeds[ridx]\n\n if self.use_instr_embed == \"avg\":\n instr_embeds = torch.mean(instr_embeds, 0, keepdim=True)\n elif self.use_instr_embed == \"last\":\n instr_embeds = instr_embeds[-1:]\n\n return instr_embeds # (num_ttokens, dim)\n\n def __getitem__(self, idx):\n taskvar_idx, episode_key = self.episode_ids[idx]\n\n value = self.get_taskvar_episode(taskvar_idx, episode_key)\n\n # The last one is the stop observation\n rgbs = (\n torch.from_numpy(value[\"rgb\"][:-1]).float().permute(0, 1, 4, 2, 3)\n ) # (T, N, C, H, W)\n pcs = torch.from_numpy(value[\"pc\"][:-1]).float().permute(0, 1, 4, 2, 3)\n # normalise to [-1, 1]\n rgbs = 2 * (rgbs / 255.0 - 0.5)\n\n num_steps, num_cameras, _, im_height, im_width = rgbs.size()\n\n if self.gripper_channel:\n gripper_imgs = torch.zeros(\n num_steps, num_cameras, 1, im_height, im_width, dtype=torch.float32\n )\n for t in range(num_steps):\n for c, cam in enumerate(self.cameras):\n u, v = value[\"gripper_pose\"][t][cam]\n if u >= 0 and u < 128 and v >= 0 and v < 128:\n gripper_imgs[t, c, 0, v, u] = 1\n rgbs = torch.cat([rgbs, gripper_imgs], dim=2)\n\n # rgb, pcd: (T, N, C, H, W)\n outs = {\"rgbs\": rgbs, \"pcds\": pcs}\n if self.is_training:\n outs = self._transform(outs)\n\n outs[\"step_ids\"] = torch.arange(0, num_steps).long()\n outs[\"actions\"] = torch.from_numpy(value[\"action\"][1:])\n outs[\"episode_ids\"] = episode_key.decode(\"ascii\")\n outs[\"taskvars\"] = self.taskvars[taskvar_idx]\n outs[\"taskvar_ids\"] = taskvar_idx\n\n if self.exclude_overlength_episodes is not None:\n for key in [\"rgbs\", \"pcds\", \"step_ids\", \"actions\"]:\n outs[key] = outs[key][: self.exclude_overlength_episodes]\n\n if self.use_instr_embed != \"none\":\n outs[\"instr_embeds\"] = self.get_taskvar_instr_embeds(outs[\"taskvars\"])\n\n return outs" }, { "identifier": "get_workspace", "path": "polarnet/config/constants.py", "snippet": "def get_workspace(real_robot=False):\n if real_robot: \n # ur5 robotics room\n TABLE_HEIGHT = 0.01 # meters\n\n X_BBOX = (-1, 0) # 0 is the robot base\n Y_BBOX = (-0.175, 0.4) # 0 is the robot base\n Z_BBOX = (0, 0.75) # 0 is the table\n else:\n # rlbench workspace\n TABLE_HEIGHT = 0.76 # meters\n\n X_BBOX = (-0.5, 1.5) # 0 is the robot base\n Y_BBOX = (-1, 1) # 0 is the robot base \n Z_BBOX = (0.2, 2) # 0 is the floor\n\n return {\n 'TABLE_HEIGHT': TABLE_HEIGHT, \n 'X_BBOX': X_BBOX, \n 'Y_BBOX': Y_BBOX, \n 'Z_BBOX': Z_BBOX\n }" }, { "identifier": "quaternion_to_discrete_euler", "path": "polarnet/utils/coord_transforms.py", "snippet": "def quaternion_to_discrete_euler(quaternion, resolution: int):\n euler = R.from_quat(quaternion).as_euler('xyz', degrees=True) + 180\n assert np.min(euler) >= 0 and np.max(euler) <= 360\n disc = np.around((euler / resolution)).astype(int)\n disc[disc == int(360 / resolution)] = 0\n return disc" } ]
from typing import List, Dict, Optional from PIL import Image from scipy.spatial.transform import Rotation as R from torch.utils.data import Dataset from polarnet.utils.utils import get_assets_dir from polarnet.utils.ops import pad_tensors, gen_seq_masks from polarnet.dataloaders.keystep_dataset import KeystepDataset from polarnet.config.constants import get_workspace from polarnet.utils.coord_transforms import quaternion_to_discrete_euler from torch.utils.data import DataLoader import os import numpy as np import copy import json import open3d as o3d import einops import torch import torch.nn.functional as F import torchvision.transforms as transforms import torchvision.transforms.functional as transforms_f import lmdb import msgpack import msgpack_numpy import time
3,854
msgpack_numpy.patch() def action_rot_quat_to_euler(action, resolution): pos = action[:3] quat = action[3:7] open = action[7] rot_disc = quaternion_to_discrete_euler(quat, resolution) return np.concatenate([pos, rot_disc, [open]]).astype(np.float32) def random_shift_pcd_and_action(pcd, action, shift_range, shift=None): ''' pcd: (npoints, 3) or (T, 3, npoints) action: (8) or (T, 8) shift_range: float ''' if shift is None: shift = np.random.uniform(-shift_range, shift_range, size=(3, )) if len(pcd.shape) == 2: pcd = pcd + shift action[:3] += shift elif len(pcd.shape) == 3: pcd = pcd + shift[None, :, None] action[..., :3] += shift[None, :] return pcd, action def random_rotate_pcd_and_action(pcd, action, rot_range, rot=None): ''' pcd: (npoints, 3) or (T, 3, npoints) action: (8) or (T, 8) shift_range: float ''' if rot is None: rot = np.random.uniform(-rot_range, rot_range) r = R.from_euler('z', rot, degrees=True) if len(pcd.shape) == 2: pcd = r.apply(pcd) action[:3] = r.apply(action[:3]) a_ori = R.from_quat(action[3:7]) a_new = r * a_ori action[3:7] = a_new.as_quat() elif len(pcd.shape) == 3: pos_ori = einops.rearrange(pcd, 't c n -> (t n) c') pos_new = r.apply(pos_ori) pcd = einops.rearrange(pos_new, '(t n) c -> t c n', t=pcd.shape[0], n=pcd.shape[2]) action[..., :3] = r.apply(action[..., :3]) a_ori = R.from_quat(action[..., 3:7]) a_new = r * a_ori action[..., 3:7] = a_new.as_quat() return pcd, action class PCDKeystepDataset(KeystepDataset): def __init__( self, data_dir, taskvars, instr_embed_file=None, gripper_channel=False, camera_ids=None, cameras=..., use_instr_embed='none', is_training=False, in_memory=False, voxel_size=0.01, npoints=2048, use_color=True, use_normal=True, use_height=True, pc_space='none', color_drop=0, pc_center='point', pc_radius_norm=True, **kwargs ): ''' - pc_space: - none: no filter points - workspace: filter points inside x_bbox, y_bbox, and z_bbox - workspace_on_table: filter points inside 3 bboxes and above the table height ''' super().__init__( data_dir, taskvars, instr_embed_file, gripper_channel, camera_ids, cameras, use_instr_embed, is_training, in_memory, **kwargs ) self.voxel_size = voxel_size self.npoints = npoints self.use_normal = use_normal self.use_height = use_height self.use_color = use_color self.color_drop = color_drop self.pc_space = pc_space self.pc_center = pc_center self.pc_radius_norm = pc_radius_norm self.rgb_augment = kwargs.get('rgb_augment', False) self.max_steps_per_episode = kwargs.get('max_steps_per_episode', None) self.add_pcd_noises = kwargs.get('add_pcd_noises', False) self.pcd_noises_std = kwargs.get('pcd_noises_std', 0.01) self.remove_pcd_outliers = kwargs.get('remove_pcd_outliers', False)
msgpack_numpy.patch() def action_rot_quat_to_euler(action, resolution): pos = action[:3] quat = action[3:7] open = action[7] rot_disc = quaternion_to_discrete_euler(quat, resolution) return np.concatenate([pos, rot_disc, [open]]).astype(np.float32) def random_shift_pcd_and_action(pcd, action, shift_range, shift=None): ''' pcd: (npoints, 3) or (T, 3, npoints) action: (8) or (T, 8) shift_range: float ''' if shift is None: shift = np.random.uniform(-shift_range, shift_range, size=(3, )) if len(pcd.shape) == 2: pcd = pcd + shift action[:3] += shift elif len(pcd.shape) == 3: pcd = pcd + shift[None, :, None] action[..., :3] += shift[None, :] return pcd, action def random_rotate_pcd_and_action(pcd, action, rot_range, rot=None): ''' pcd: (npoints, 3) or (T, 3, npoints) action: (8) or (T, 8) shift_range: float ''' if rot is None: rot = np.random.uniform(-rot_range, rot_range) r = R.from_euler('z', rot, degrees=True) if len(pcd.shape) == 2: pcd = r.apply(pcd) action[:3] = r.apply(action[:3]) a_ori = R.from_quat(action[3:7]) a_new = r * a_ori action[3:7] = a_new.as_quat() elif len(pcd.shape) == 3: pos_ori = einops.rearrange(pcd, 't c n -> (t n) c') pos_new = r.apply(pos_ori) pcd = einops.rearrange(pos_new, '(t n) c -> t c n', t=pcd.shape[0], n=pcd.shape[2]) action[..., :3] = r.apply(action[..., :3]) a_ori = R.from_quat(action[..., 3:7]) a_new = r * a_ori action[..., 3:7] = a_new.as_quat() return pcd, action class PCDKeystepDataset(KeystepDataset): def __init__( self, data_dir, taskvars, instr_embed_file=None, gripper_channel=False, camera_ids=None, cameras=..., use_instr_embed='none', is_training=False, in_memory=False, voxel_size=0.01, npoints=2048, use_color=True, use_normal=True, use_height=True, pc_space='none', color_drop=0, pc_center='point', pc_radius_norm=True, **kwargs ): ''' - pc_space: - none: no filter points - workspace: filter points inside x_bbox, y_bbox, and z_bbox - workspace_on_table: filter points inside 3 bboxes and above the table height ''' super().__init__( data_dir, taskvars, instr_embed_file, gripper_channel, camera_ids, cameras, use_instr_embed, is_training, in_memory, **kwargs ) self.voxel_size = voxel_size self.npoints = npoints self.use_normal = use_normal self.use_height = use_height self.use_color = use_color self.color_drop = color_drop self.pc_space = pc_space self.pc_center = pc_center self.pc_radius_norm = pc_radius_norm self.rgb_augment = kwargs.get('rgb_augment', False) self.max_steps_per_episode = kwargs.get('max_steps_per_episode', None) self.add_pcd_noises = kwargs.get('add_pcd_noises', False) self.pcd_noises_std = kwargs.get('pcd_noises_std', 0.01) self.remove_pcd_outliers = kwargs.get('remove_pcd_outliers', False)
self.WORKSPACE = get_workspace(real_robot=kwargs.get('real_robot', False))
4
2023-10-29 21:41:09+00:00
8k
stanleylsx/text_embedding
main.py
[ { "identifier": "DataPrecess", "path": "engines/data.py", "snippet": "class DataPrecess:\n \"\"\"\n 文本处理\n \"\"\"\n\n def __init__(self, logger):\n super(DataPrecess, self).__init__()\n self.logger = logger\n self.max_sequence_length = configure['max_sequence_length']\n self.decision_threshold = configure['decision_threshold']\n self.train_type = configure['train_type']\n match configure['model_type']:\n case 'XLMRoberta':\n self.tokenizer = XLMRobertaTokenizer.from_pretrained(configure['hf_tag'])\n case 'RoFormer':\n self.tokenizer = RoFormerTokenizer.from_pretrained(configure['hf_tag'])\n case 'Bert':\n self.tokenizer = BertTokenizer.from_pretrained(configure['hf_tag'])\n\n def prepare_pair_data(self, df_values):\n inputs_a, inputs_b, labels = [], [], []\n for sentence1, sentence2, label in df_values:\n inputs_a.append(sentence1)\n inputs_b.append(sentence2)\n labels.append(label)\n inputs_a = self.tokenizer.batch_encode_plus(inputs_a,\n padding='longest',\n truncation=True,\n max_length=self.max_sequence_length,\n return_tensors='pt')\n inputs_b = self.tokenizer.batch_encode_plus(inputs_b,\n padding='longest',\n truncation=True,\n max_length=self.max_sequence_length,\n return_tensors='pt')\n token_ids_a, token_ids_b = inputs_a['input_ids'], inputs_b['input_ids']\n return token_ids_a, token_ids_b, torch.tensor(labels)\n\n def prepare_simcse_sup_data(self, df_values):\n triple_sentences = []\n for sentence, entailment, contradiction in df_values:\n triple_sentences.extend([sentence, entailment, contradiction])\n inputs = self.tokenizer.batch_encode_plus(triple_sentences,\n max_length=self.max_sequence_length,\n truncation=True,\n padding='longest',\n return_tensors='pt')\n token_ids = inputs['input_ids']\n return token_ids\n\n def prepare_simcse_unsup_data(self, df_values):\n sentences = []\n for sentence in df_values:\n sentence = sentence[0]\n sentences.extend([sentence, sentence])\n inputs = self.tokenizer.batch_encode_plus(sentences,\n max_length=self.max_sequence_length,\n truncation=True,\n padding='longest',\n return_tensors='pt')\n token_ids = inputs['input_ids']\n return token_ids\n\n def get_dataset(self, df_values):\n \"\"\"\n 构建Dataset\n \"\"\"\n if self.train_type == 'cosent':\n inputs_a, inputs_b, labels = self.prepare_pair_data(df_values)\n dataset = (inputs_a, inputs_b, labels)\n elif self.train_type == 'simcse_sup':\n dataset = self.prepare_simcse_sup_data(df_values)\n elif self.train_type == 'simcse_unsup':\n dataset = self.prepare_simcse_unsup_data(df_values)\n return dataset\n\n def get_eval_dataset(self, df_values):\n \"\"\"\n 构建验证集Dataset\n \"\"\"\n inputs_a, inputs_b, labels = self.prepare_pair_data(df_values)\n dataset = (inputs_a, inputs_b, labels)\n return dataset\n\n def batch_tokenize(self, sentences):\n token_ids = self.tokenizer.batch_encode_plus(sentences,\n max_length=self.max_sequence_length,\n truncation=True,\n padding='longest',\n return_tensors='pt').input_ids\n return token_ids" }, { "identifier": "use_cuda", "path": "config.py", "snippet": "" }, { "identifier": "Train", "path": "engines/train.py", "snippet": "class Train:\n def __init__(self, data_manage, device, logger):\n self.logger = logger\n self.device = device\n self.data_manage = data_manage\n self.decision_threshold = data_manage.decision_threshold\n self.train_type = data_manage.train_type\n self.use_fp16 = configure['use_fp16']\n\n @torch.inference_mode()\n def evaluate(self, model, val_loader):\n \"\"\"\n 验证集评估函数,分别计算f1、precision、recall和spearmanr相关系数\n \"\"\"\n model.eval()\n start_time = time.time()\n loss_sum = 0.0\n all_predicts = []\n all_labels = []\n preds_sims = []\n for _, batch in enumerate(tqdm(val_loader)):\n input_a, input_b, labels = batch\n input_a, input_b, labels = input_a.to(self.device), input_b.to(self.device), labels.to(self.device)\n vectors_a, vectors_b = model(input_a), model(input_b)\n pred_sims = torch.cosine_similarity(vectors_a, vectors_b, dim=1)\n loss = cosent_loss(pred_sims, labels, self.device)\n loss_sum += loss.item()\n predicts = torch.where(pred_sims >= self.decision_threshold, 1, 0)\n preds_sims.extend(pred_sims.cpu().numpy())\n all_predicts.extend(predicts.cpu().numpy())\n all_labels.extend(labels.cpu().numpy())\n\n val_time = time.time() - start_time\n val_loss = loss_sum / len(val_loader)\n val_measures = cal_metrics(all_predicts, all_labels)\n val_measures |= compute_corrcoef(all_labels, preds_sims)\n # 打印验证集上的指标\n res_str = ''\n for k, v in val_measures.items():\n res_str += (k + ': %.3f ' % v)\n self.logger.info('loss: %.5f, %s' % (val_loss, res_str))\n self.logger.info('time consumption of evaluating:%.2f(min)' % val_time)\n return val_measures\n\n def train(self):\n batch_size = 256\n epoch = configure['epochs']\n learning_rate = configure['learning_rate']\n batch_size = configure['batch_size']\n gradient_accumulation_steps = configure['gradient_accumulation_steps']\n print_per_batch = configure['print_per_batch']\n train_file = configure['train_file']\n val_file = configure['val_file']\n train_data = pd.read_csv(train_file, encoding='utf-8')\n\n patience = configure['patience']\n is_early_stop = configure['is_early_stop']\n checkpoints_dir = configure['checkpoints_dir']\n model_name = configure['model_name']\n best_f1 = 0.0\n best_at_epoch = 0\n patience_counter = 0\n\n very_start_time = time.time()\n self.logger.info('train_data_length:{}'.format(len(train_data)))\n train_loader = DataLoader(dataset=train_data.values,\n collate_fn=self.data_manage.get_dataset,\n shuffle=True,\n batch_size=batch_size)\n\n if val_file != '':\n val_data = pd.read_csv(val_file, encoding='utf-8')\n if val_data.columns.tolist() != ['sentence1', 'sentence2', 'label']:\n raise ValueError('val_file format error')\n self.logger.info('val_data_length:{}'.format(len(val_data)))\n val_loader = DataLoader(dataset=val_data.values,\n collate_fn=self.data_manage.get_eval_dataset,\n shuffle=False,\n batch_size=batch_size)\n\n total_steps = len(train_loader) * epoch\n num_train_optimization_steps = int(len(train_data) / batch_size / gradient_accumulation_steps) * epoch\n self.logger.info(f'Num steps:{num_train_optimization_steps}')\n model = Model().to(self.device)\n params = list(model.parameters())\n optimizer = AdamW(params, lr=learning_rate)\n if self.use_fp16:\n scaler = GradScaler()\n\n if os.path.exists(os.path.join(checkpoints_dir, model_name)):\n self.logger.info('Resuming from checkpoint...')\n model.load_state_dict(torch.load(os.path.join(checkpoints_dir, model_name)))\n optimizer_checkpoint = torch.load(os.path.join(checkpoints_dir, model_name + '.optimizer'))\n optimizer.load_state_dict(optimizer_checkpoint['optimizer'])\n else:\n self.logger.info('Initializing from scratch.')\n\n if configure['use_ewc']:\n original_weight = get_mean_params(model)\n\n # 定义梯度策略\n warmup_steps = math.ceil(total_steps * configure['warmup_ratio'])\n scheduler = get_linear_schedule_with_warmup(optimizer=optimizer,\n num_warmup_steps=warmup_steps,\n num_training_steps=total_steps)\n\n self.logger.info(('+' * 20) + 'training starting' + ('+' * 20))\n\n for i in range(epoch):\n train_start = time.time()\n self.logger.info('epoch:{}/{}'.format(i + 1, epoch))\n loss, loss_sum = 0.0, 0.0\n model.train()\n\n for step, batch in enumerate(tqdm(train_loader)):\n if self.train_type == 'cosent':\n input_a, input_b, labels = batch\n input_a, input_b, labels = input_a.to(self.device), input_b.to(self.device), labels.to(self.device)\n if self.use_fp16:\n with autocast():\n vectors_a, vectors_b = model(input_a), model(input_b)\n pred_sims = torch.cosine_similarity(vectors_a, vectors_b, dim=1)\n loss = cosent_loss(pred_sims, labels, self.device)\n else:\n vectors_a, vectors_b = model(input_a), model(input_b)\n pred_sims = torch.cosine_similarity(vectors_a, vectors_b, dim=1)\n loss = cosent_loss(pred_sims, labels, self.device)\n else:\n batch = batch.to(self.device)\n if self.use_fp16:\n with autocast():\n out = model(batch)\n if self.train_type == 'simcse_sup':\n loss = simcse_sup_loss(out, self.device)\n elif self.train_type == 'simcse_unsup':\n loss = simcse_unsup_loss(out, self.device)\n else:\n out = model(batch)\n if self.train_type == 'simcse_sup':\n loss = simcse_sup_loss(out, self.device)\n elif self.train_type == 'simcse_unsup':\n loss = simcse_unsup_loss(out, self.device)\n\n if configure['use_ewc']:\n loss = loss + ewc_loss(model, original_weight)\n\n loss_sum += loss.item()\n if self.use_fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n if (step + 1) % gradient_accumulation_steps == 0:\n if self.use_fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n\n # 打印训练过程中的指标\n if step % print_per_batch == 0 and step != 0:\n if self.train_type == 'cosent':\n out_classes = torch.where(pred_sims >= self.decision_threshold, 1, 0)\n measures = cal_metrics(out_classes.cpu(), labels.cpu())\n measures |= compute_corrcoef(labels.cpu().numpy(), pred_sims.cpu().detach().numpy())\n res_str = ''\n for k, v in measures.items():\n res_str += (k + ': %.3f ' % v)\n self.logger.info('training step: %5d, loss: %.5f, %s' % (step, loss, res_str))\n else:\n self.logger.info('training step: %5d, loss: %.5f' % (step, loss))\n\n train_time = (time.time() - train_start) / 60\n self.logger.info('time consumption of training:%.2f(min)' % train_time)\n if val_file != '':\n self.logger.info('start evaluate model...')\n val_measures = self.evaluate(model, val_loader)\n\n if val_measures['f1'] > best_f1:\n patience_counter = 0\n best_f1 = val_measures['f1']\n best_at_epoch = i + 1\n optimizer_checkpoint = {'optimizer': optimizer.state_dict()}\n torch.save(optimizer_checkpoint, os.path.join(checkpoints_dir, model_name + '.optimizer'))\n torch.save(model.state_dict(), os.path.join(checkpoints_dir, model_name))\n self.logger.info('saved the new best model with f1: %.3f' % best_f1)\n else:\n patience_counter += 1\n\n if is_early_stop:\n if patience_counter >= patience:\n self.logger.info('early stopped, no progress obtained within {} epochs'.format(patience))\n self.logger.info('overall best f1 is {} at {} epoch'.format(best_f1, best_at_epoch))\n self.logger.info('total training time consumption: %.3f(min)' % ((time.time() - very_start_time) / 60))\n return\n else:\n optimizer_checkpoint = {'optimizer': optimizer.state_dict()}\n torch.save(optimizer_checkpoint, os.path.join(checkpoints_dir, model_name + '.optimizer'))\n torch.save(model.state_dict(), os.path.join(checkpoints_dir, model_name))\n self.logger.info('saved the current model')\n if val_file != '':\n self.logger.info('overall best f1 is {} at {} epoch'.format(best_f1, best_at_epoch))\n self.logger.info('total training time consumption: %.3f(min)' % ((time.time() - very_start_time) / 60))" }, { "identifier": "Predictor", "path": "engines/predict.py", "snippet": "class Predictor:\n def __init__(self, data_manage, device, logger):\n self.logger = logger\n self.data_manage = data_manage\n self.device = device\n self.checkpoints_dir = configure['checkpoints_dir']\n self.model_name = configure['model_name']\n self.model = Model().to(device)\n if not os.path.exists(os.path.join(self.checkpoints_dir, self.model_name)):\n logger.info('Local checkpoint not found, load raw HF model.')\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.checkpoints_dir, self.model_name)))\n self.model.eval()\n\n @torch.inference_mode()\n def predict_one(self, sentence_a, sentence_b):\n token_ids_a = self.data_manage.tokenizer(sentence_a).input_ids\n token_ids_b = self.data_manage.tokenizer(sentence_b).input_ids\n token_ids_a = torch.tensor([token_ids_a]).to(self.device)\n token_ids_b = torch.tensor([token_ids_b]).to(self.device)\n vector_a = self.model(token_ids_a)\n vector_b = self.model(token_ids_b)\n similarity = float(torch.cosine_similarity(vector_a, vector_b, dim=1).detach().cpu().squeeze(0))\n if_similar = 'similar' if similarity >= self.data_manage.decision_threshold else 'dissimilar'\n return similarity, if_similar\n\n @torch.inference_mode()\n def get_embedding(self, sentence):\n \"\"\"\n 获取句向量\n \"\"\"\n token_ids = self.data_manage.batch_tokenize([sentence]).to(self.device)\n vector = self.model(token_ids)\n vector = vector.detach().cpu().squeeze(0).numpy()\n return vector\n\n def convert_onnx(self):\n max_sequence_length = self.data_manage.max_sequence_length\n dummy_input = torch.ones([1, max_sequence_length]).to('cpu').int()\n onnx_path = self.checkpoints_dir + '/model.onnx'\n torch.onnx.export(self.model.to('cpu'), dummy_input,\n f=onnx_path,\n input_names=['input'],\n output_names=['vector'],\n dynamic_axes={'input': {0: 'batch_size', 1: 'max_sequence_length'},\n 'vector': {0: 'batch_size'}})\n\n def mteb(self):\n model = MyModel(self.data_manage, self.model, self.device)\n task_class = configure['task_class']\n match task_class:\n case 'reranking':\n task_names = ['T2Reranking', 'MMarcoRetrieval', 'CMedQAv1', 'CMedQAv2']\n case 'pairclassification':\n task_names = ['Cmnli', 'Ocnli']\n case 'clustering':\n task_names = ['CLSClusteringS2S', 'CLSClusteringP2P', 'ThuNewsClusteringS2S', 'ThuNewsClusteringP2P']\n case 'sts':\n task_names = ['ATEC', 'BQ', 'LCQMC', 'PAWSX', 'STSB', 'AFQMC', 'QBQTC']\n case 'retrieval':\n task_names = ['T2Retrieval', 'MMarcoRetrieval', 'DuRetrieval', 'CovidRetrieval',\n 'CmedqaRetrieval', 'EcomRetrieval', 'MedicalRetrieval', 'VideoRetrieval']\n output_dir = os.path.join(self.checkpoints_dir, 'generic_test/' + task_class)\n self.logger.info(f'Total tasks: {task_names}')\n for task in task_names:\n MTEB(tasks=[task], task_langs=['zh', 'zh-CN']).run(model, output_folder=output_dir)\n\n def test(self, trainer):\n test_file = configure['test_file']\n batch_size = configure['batch_size']\n if test_file != '':\n test_data = pd.read_csv(test_file, encoding='utf-8')\n if test_data.columns.tolist() != ['sentence1', 'sentence2', 'label']:\n raise ValueError('test_file format error')\n self.logger.info('test_data_length:{}'.format(len(test_data)))\n test_loader = DataLoader(dataset=test_data.values,\n collate_fn=self.data_manage.get_eval_dataset,\n shuffle=False,\n batch_size=batch_size)\n trainer.evaluate(self.model, test_loader)\n\n def batch_embedding(self):\n test_file = configure['test_file']\n if test_file != '':\n indices = []\n vectors = []\n sentences = []\n test_data = pd.read_csv(test_file, encoding='utf-8')\n for _, row in test_data.iterrows():\n index = row['index']\n indices.append(index)\n sentence = row['sentence']\n sentences.append(sentence)\n vector = self.get_embedding(sentence)\n vectors.append(vector.tolist())\n test_result = pd.DataFrame({'index': indices, 'sentence': sentences, 'vector': vectors})\n test_result.to_csv('batch_test_result.csv', index=False)" } ]
from loguru import logger from engines.data import DataPrecess from config import use_cuda, cuda_device, mode, configure from engines.train import Train from engines.predict import Predictor import random import numpy as np import os import torch import json
4,693
# -*- coding: utf-8 -*- # @Time : 2023/10/27 22:05 # @Author : lishouxian # @Email : [email protected] # @File : main.py # @Software: VSCode def set_env(configure): random.seed(configure.seed) np.random.seed(configure.seed) def fold_check(configure): if configure['checkpoints_dir'] == '': raise Exception('checkpoints_dir did not set...') if not os.path.exists(configure['checkpoints_dir']): print('checkpoints fold not found, creating...') os.makedirs(configure['checkpoints_dir']) if __name__ == '__main__': log_name = './logs/' + mode + '.log' logger.add(log_name, encoding='utf-8') fold_check(configure) if use_cuda: if torch.cuda.is_available(): if cuda_device == -1: device = torch.device('cuda') else: device = torch.device(f'cuda:{cuda_device}') else: raise ValueError( "'use_cuda' set to True when cuda is unavailable." " Make sure CUDA is available or set use_cuda=False." ) else: device = 'cpu' logger.info(f'device: {device}') logger.info(json.dumps(configure, indent=2, ensure_ascii=False)) data_manage = DataPrecess(logger) if mode == 'train': logger.info('stage: train')
# -*- coding: utf-8 -*- # @Time : 2023/10/27 22:05 # @Author : lishouxian # @Email : [email protected] # @File : main.py # @Software: VSCode def set_env(configure): random.seed(configure.seed) np.random.seed(configure.seed) def fold_check(configure): if configure['checkpoints_dir'] == '': raise Exception('checkpoints_dir did not set...') if not os.path.exists(configure['checkpoints_dir']): print('checkpoints fold not found, creating...') os.makedirs(configure['checkpoints_dir']) if __name__ == '__main__': log_name = './logs/' + mode + '.log' logger.add(log_name, encoding='utf-8') fold_check(configure) if use_cuda: if torch.cuda.is_available(): if cuda_device == -1: device = torch.device('cuda') else: device = torch.device(f'cuda:{cuda_device}') else: raise ValueError( "'use_cuda' set to True when cuda is unavailable." " Make sure CUDA is available or set use_cuda=False." ) else: device = 'cpu' logger.info(f'device: {device}') logger.info(json.dumps(configure, indent=2, ensure_ascii=False)) data_manage = DataPrecess(logger) if mode == 'train': logger.info('stage: train')
trainer = Train(data_manage, device, logger)
2
2023-10-27 07:47:02+00:00
8k
akekic/causal-component-analysis
model/encoder.py
[ { "identifier": "ParamMultiEnvCausalDistribution", "path": "model/normalizing_flow/distribution.py", "snippet": "class ParamMultiEnvCausalDistribution(MultiEnvCausalDistribution):\n \"\"\"\n Parametric multi-environment causal distribution.\n\n This class learns the parameters of the causal mechanisms and noise distributions. The causal mechanisms\n are assumed to be linear, and the noise distributions are assumed to be Gaussian. In environments where\n a variable is intervened on, the connection to its parents is assumed to be cut off, and the noise distribution\n can be shifted relative to the observational environment (when the variable is not intervened on).\n\n Theoretically, we can fix some of the mechanisms involved w.l.o.g. and still achieve identifiability\n (see Appendix G2 of [1]). There are two ways to do this:\n 1. Fix all mechanisms that are intervened on.\n 2. Fix all observational mechanisms with an empty parent set and all intervened mechanisms with a\n non-empty parent set.\n However, we do not have to fix any of the mechanisms and in practice, we find that this leads to better\n performance.\n\n Attributes\n ----------\n adjacency_matrix: np.ndarray\n Adjacency matrix of the causal graph.\n fix_mechanisms: bool\n Whether to fix any of the mechanisms. Default: False.\n fix_all_intervention_targets: bool\n Whether to fix all mechanisms that are intervened on (option 1 above). If False, we fix all observational\n mechanisms with an empty parent set and all intervened mechanisms with a non-empty parent set (option 2 above).\n Default: False.\n intervention_targets_per_env: Tensor, shape (num_envs, latent_dim)\n Intervention targets per environment, with 1 indicating that the variable is intervened on\n and 0 indicating that the variable is not intervened on. This variable also implicitly defines\n the number of environments.\n dag: nx.DiGraph\n Directed acyclic graph of the causal connections.\n coeff_values: nn.ParameterList\n List of lists of coefficients for the linear mechanisms. The outer list has length equal to the number of\n variables, and the inner list has length equal to the number of parents of the variable. The last element\n of the inner list is the variance parameter. I.e. coeff_values[i][:-1] are the linear weights of the parent\n variables of variable i, and coeff_values[i][-1] is weight of the exogenous noise.\n noise_means: nn.ParameterList\n List of lists of means for the noise distributions. The outer list has length equal to the number of\n environments, and the inner list has length equal to the number of variables. noise_means[e][i] is the mean\n of the noise distribution for variable i in environment e. Note that not all of these parameters are\n used in the computation of the log probability. If a variable i is not intervened on in environment e,\n we use the observational noise distribution, i.e. noise_means[0][i] (e=0 is assumed to be the\n observational environment).\n noise_stds: nn.ParameterList\n Same as noise_means, but for the standard deviations of the noise distributions.\n coeff_values_requires_grad: list[list[bool]]\n Whether each coefficient is trainable. This is used to fix the coefficients of the mechanisms.\n noise_means_requires_grad: list[list[bool]]\n Whether each noise mean is trainable. This is used to fix the noise means of the mechanisms.\n noise_stds_requires_grad: list[list[bool]]\n Whether each noise standard deviation is trainable. This is used to fix the noise standard deviations\n\n References\n ----------\n [1] https://arxiv.org/abs/2305.17225\n \"\"\"\n\n trainable = True\n\n def __init__(\n self,\n adjacency_matrix: np.ndarray,\n intervention_targets_per_env: Tensor,\n fix_mechanisms: bool = False,\n fix_all_intervention_targets: bool = False,\n ) -> None:\n super().__init__()\n self.adjacency_matrix = adjacency_matrix\n self.fix_mechanisms = fix_mechanisms\n self.fix_all_intervention_targets = fix_all_intervention_targets\n self.intervention_targets_per_env = intervention_targets_per_env\n\n self.dag = nx.DiGraph(adjacency_matrix)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n coeff_values, coeff_values_requires_grad = self._set_initial_coeffs(\n self.dag, device\n )\n noise_means, noise_means_requires_grad = self._set_initial_noise_means(\n self.dag,\n fix_mechanisms,\n intervention_targets_per_env,\n fix_all_intervention_targets,\n device,\n )\n noise_stds, noise_stds_requires_grad = self._set_initial_noise_stds(\n self.dag,\n fix_mechanisms,\n intervention_targets_per_env,\n fix_all_intervention_targets,\n device,\n )\n\n self.coeff_values = nn.ParameterList(coeff_values)\n self.noise_means = nn.ParameterList(noise_means)\n self.noise_stds = nn.ParameterList(noise_stds)\n\n self.coeff_values_requires_grad = coeff_values_requires_grad\n self.noise_means_requires_grad = noise_means_requires_grad\n self.noise_stds_requires_grad = noise_stds_requires_grad\n\n def multi_env_log_prob(\n self, z: Tensor, e: Tensor, intervention_targets: Tensor\n ) -> Tensor:\n log_p = torch.zeros(len(z), dtype=z.dtype, device=z.device)\n for env in e.unique():\n env_mask = (e == env).flatten()\n z_env = z[env_mask, :]\n intervention_targets_env = intervention_targets[env_mask, :]\n\n for i in range(z.shape[1]):\n parents = list(self.dag.predecessors(i))\n\n if len(parents) == 0 or intervention_targets_env[0, i] == 1:\n parent_contribution = 0\n else:\n coeffs_raw = self.coeff_values[i][:-1]\n if isinstance(coeffs_raw, nn.ParameterList):\n coeffs_raw = torch.cat([c for c in coeffs_raw])\n parent_coeffs = coeffs_raw.to(z.device)\n parent_contribution = parent_coeffs.matmul(z_env[:, parents].T)\n\n noise_env_idx = int(env) if intervention_targets_env[0, i] == 1 else 0\n var = self.noise_stds[noise_env_idx][i] ** 2 * torch.ones_like(\n z_env[:, i]\n )\n noise_coeff = self.coeff_values[i][-1].to(z.device)\n noise_contribution = noise_coeff * self.noise_means[noise_env_idx][i]\n var *= noise_coeff ** 2\n\n log_p[env_mask] += torch.distributions.Normal(\n parent_contribution + noise_contribution, var.sqrt()\n ).log_prob(z_env[:, i])\n\n return log_p\n\n @staticmethod\n def _set_initial_coeffs(\n dag: nx.DiGraph, device: torch.device\n ) -> tuple[list[ParameterList], list[list[bool]]]:\n coeff_values = []\n coeff_values_requires_grad = []\n for i in range(dag.number_of_nodes()):\n coeff_values_i = []\n coeff_values_requires_grad_i = []\n num_parents = len(list(dag.predecessors(i)))\n for j in range(num_parents):\n random_val = Uniform(-1, 1).sample((1,))\n val = random_val\n param = nn.Parameter(val * torch.ones(1), requires_grad=True).to(device)\n coeff_values_i.append(param)\n coeff_values_requires_grad_i.append(True)\n const = torch.ones(1, requires_grad=False).to(device) # variance param\n coeff_values_i.append(const)\n coeff_values_requires_grad_i.append(False)\n coeff_values.append(nn.ParameterList(coeff_values_i))\n coeff_values_requires_grad.append(coeff_values_requires_grad_i)\n return coeff_values, coeff_values_requires_grad\n\n @staticmethod\n def _set_initial_noise_means(\n dag: nx.DiGraph,\n fix_mechanisms: bool,\n intervention_targets_per_env: Tensor,\n fix_all_intervention_targets: bool,\n device: torch.device,\n ) -> tuple[list[ParameterList], list[list[bool]]]:\n noise_means = []\n noise_means_requires_grad = []\n num_envs = intervention_targets_per_env.shape[0]\n\n for e in range(num_envs):\n noise_means_e = []\n noise_means_requires_grad_e = []\n for i in range(dag.number_of_nodes()):\n is_shifted = intervention_targets_per_env[e][i] == 1\n is_root = len(list(dag.predecessors(i))) == 0\n if fix_all_intervention_targets:\n is_fixed = is_shifted\n else:\n is_fixed = (is_shifted and not is_root) or (\n not is_shifted and is_root\n )\n is_fixed = is_fixed and fix_mechanisms\n random_val = Uniform(-0.5, 0.5).sample((1,))\n val = random_val\n param = (\n nn.Parameter(val * torch.ones(1), requires_grad=not is_fixed)\n ).to(device)\n noise_means_e.append(param)\n noise_means_requires_grad_e.append(not is_fixed)\n noise_means.append(nn.ParameterList(noise_means_e))\n noise_means_requires_grad.append(noise_means_requires_grad_e)\n return noise_means, noise_means_requires_grad\n\n @staticmethod\n def _set_initial_noise_stds(\n dag: nx.DiGraph,\n fix_mechanisms: bool,\n intervention_targets_per_env: Tensor,\n fix_all_intervention_targets: bool,\n device: torch.device,\n ) -> tuple[list[ParameterList], list[list[bool]]]:\n noise_stds = []\n noise_stds_requires_grad = []\n for e in range(intervention_targets_per_env.shape[0]):\n noise_stds_e = []\n noise_stds_requires_grad_e = []\n for i in range(dag.number_of_nodes()):\n is_shifted = intervention_targets_per_env[e][i] == 1\n is_root = len(list(dag.predecessors(i))) == 0\n if fix_all_intervention_targets:\n is_fixed = is_shifted\n else:\n is_fixed = (is_shifted and not is_root) or (\n not is_shifted and is_root\n )\n is_fixed = is_fixed and fix_mechanisms\n random_val = Uniform(0.5, 1.5).sample((1,))\n val = random_val\n param = (\n nn.Parameter(val * torch.ones(1), requires_grad=not is_fixed)\n ).to(device)\n noise_stds_e.append(param)\n noise_stds_requires_grad_e.append(not is_fixed)\n noise_stds.append(nn.ParameterList(noise_stds_e))\n noise_stds_requires_grad.append(noise_stds_requires_grad_e)\n return noise_stds, noise_stds_requires_grad" }, { "identifier": "NaiveMultiEnvCausalDistribution", "path": "model/normalizing_flow/distribution.py", "snippet": "class NaiveMultiEnvCausalDistribution(MultiEnvCausalDistribution):\n \"\"\"\n Naive multi-environment causal distribution.\n\n This is a dummy-version of ParamMultiEnvCausalDistribution, where the causal mechanisms are assumed to\n be trivial (no connectioons between variables) and the noise distributions are assumed to be Gaussian\n and independent of the environment. This is equivalent to the independent component analysis (ICA) case.\n \"\"\"\n\n def __init__(\n self,\n adjacency_matrix: np.ndarray,\n ) -> None:\n super().__init__()\n self.adjacency_matrix = adjacency_matrix\n\n self.q0 = DiagGaussian(adjacency_matrix.shape[0], trainable=True)\n\n def multi_env_log_prob(\n self, z: Tensor, e: Tensor, intervention_targets: Tensor\n ) -> Tensor:\n return self.q0.log_prob(z)" }, { "identifier": "NonparamMultiEnvCausalDistribution", "path": "model/normalizing_flow/nonparametric_distribution.py", "snippet": "class NonparamMultiEnvCausalDistribution(nf.NormalizingFlow):\n \"\"\"\n Nonarametric multi-environment causal distribution.\n\n A nonparametric causal distribution that uses a normalizing flow to parameterize the latent\n causal mechanisms. This causal distribution has two parts:\n 1. The latent SCM, which is parameterized by a normalizing flow. It represents the reduced\n form of the SCM, mapping independent (Gaussian) exogenous noise to the endogenous latent\n variables. The causal structure of the latent SCM is encoded through the topological order\n of the latent variables according to the adjacency matrix.\n 2. Fixed, simple base distributions for the mechanisms that are intervened on.\n\n Attributes\n ----------\n adjacency_matrix : np.ndarray\n The adjacency matrix of the SCM.\n K : int\n The number of normalizing flow blocks to use for the reduced form of the SCM.\n net_hidden_dim : int\n The hidden dimension of the neural networks used in the normalizing flow blocks.\n net_hidden_layers : int\n The number of hidden layers of the neural networks used in the normalizing flow blocks.\n perm : torch.Tensor\n The permutation of the latent variables according to the topological order.\n\n Methods\n -------\n multi_env_log_prob(z, e, intervention_targets) -> torch.Tensor\n Compute the log probability of the given data.\n\n References\n ----------\n [1] https://arxiv.org/abs/2305.17225\n \"\"\"\n\n trainable = True\n\n def __init__(\n self,\n adjacency_matrix: np.ndarray,\n K: int = 3,\n net_hidden_dim: int = 128,\n net_hidden_layers: int = 3,\n ) -> None:\n self.adjacency_matrix = adjacency_matrix\n self.K = K\n self.net_hidden_dim = net_hidden_dim\n self.net_hidden_layers = net_hidden_layers\n\n latent_dim = adjacency_matrix.shape[0]\n\n # permutation according to topological order\n self.perm = torch.tensor(\n list(nx.topological_sort(nx.DiGraph(self.adjacency_matrix))),\n dtype=torch.long,\n )\n\n flows = make_spline_flows(\n K, latent_dim, net_hidden_dim, net_hidden_layers, permutation=False\n )\n q0 = MultiEnvBaseDistribution()\n super().__init__(q0=q0, flows=flows)\n\n def multi_env_log_prob(\n self, z: Tensor, e: Tensor, intervention_targets: Tensor\n ) -> Tensor:\n z = z[:, self.perm] # permute inputs to be in topological order\n log_q, u = self._determinant_terms(intervention_targets, z)\n prob_terms = self.q0.multi_env_log_prob(u, e, intervention_targets)\n prob_terms_intervened = self._prob_terms_intervened(intervention_targets, z)\n log_q += prob_terms + prob_terms_intervened\n\n return log_q\n\n def _determinant_terms(\n self, intervention_targets: Tensor, z: Tensor\n ) -> tuple[Tensor, Tensor]:\n log_q = torch.zeros(len(z), dtype=z.dtype, device=z.device)\n u = z\n for i in range(len(self.flows) - 1, -1, -1):\n u, log_det = self.flows[i].inverse(u)\n log_q += log_det\n\n # remove determinant terms for intervened mechanisms\n jac_row = torch.autograd.functional.jvp(\n self.inverse, z, v=intervention_targets, create_graph=True\n )[1]\n jac_diag_element = (jac_row * intervention_targets).sum(dim=1)\n # mask zero elements\n not_intervened_mask = ~intervention_targets.sum(dim=1).to(bool)\n jac_diag_element[not_intervened_mask] = 1\n log_q -= log(abs(jac_diag_element) + 1e-8)\n return log_q, u\n\n def _prob_terms_intervened(self, intervention_targets: Tensor, z: Tensor) -> Tensor:\n \"\"\"\n Compute the probability terms for the intervened mechanisms.\n \"\"\"\n gaussian_nll = gaussian_nll_loss(\n z, torch.zeros_like(z), torch.ones_like(z), full=True, reduction=\"none\"\n )\n mask = intervention_targets.to(bool)\n prob_terms_intervention_targets = -(mask * gaussian_nll).sum(dim=1)\n return prob_terms_intervention_targets" }, { "identifier": "make_spline_flows", "path": "model/normalizing_flow/utils.py", "snippet": "def make_spline_flows(\n K: int,\n latent_dim: int,\n net_hidden_dim: int,\n net_hidden_layers: int,\n permutation: bool = True,\n) -> list[nf.flows.Flow]:\n flows = []\n for i in range(K):\n flows += [\n nf.flows.AutoregressiveRationalQuadraticSpline(\n latent_dim, net_hidden_layers, net_hidden_dim\n )\n ]\n if permutation:\n flows += [nf.flows.LULinearPermute(latent_dim)]\n return flows" } ]
from typing import Optional from torch import abs, det, log, Tensor from .normalizing_flow import ParamMultiEnvCausalDistribution from .normalizing_flow.distribution import NaiveMultiEnvCausalDistribution from .normalizing_flow.nonparametric_distribution import ( NonparamMultiEnvCausalDistribution, ) from .normalizing_flow.utils import make_spline_flows import normflows as nf import numpy as np import torch import torch.nn as nn
4,909
class CauCAEncoder(nf.NormalizingFlow): """ CauCA encoder for multi-environment data. The encoder maps from the observed data x to the latent space v_hat. The latent space is assumed to have causal structure. The encoder is trained to maximize the likelihood of the data under the causal model. x and v_hat are assumed to have the same dimension. The encoder has two main components: 1. A causal base distribution q0 over the latent space. This encodes the latent causal structure. 2. An unmixing function mapping from the observations to the latent space. Attributes ---------- latent_dim: int Dimension of the latent and observed variables. adjacency_matrix: np.ndarray, shape (latent_dim, latent_dim) Adjacency matrix of the latent causal graph. intervention_targets_per_env: Tensor, shape (no_envs, latent_dim) Which variables are intervened on in each environment. fix_mechanisms: bool Whether to fix some fixable mechanisms in the causal model. (See documentation of the ParamMultiEnvCausalDistribution for details.) Default: False. fix_all_intervention_targets: bool Whether to fix all intervention targets in the causal model. (See documentation of the ParamMultiEnvCausalDistribution for details.) Default: False. nonparametric_base_distr: bool Whether to use a nonparametric base distribution. If False, a parametric base distribution assuming linear causal mechanisms is used. Default: False. flows: Optional[list[nf.flows.Flow]] List of normalizing flows to use for the unmixing function. Default: None. q0: Optional[nf.distributions.BaseDistribution] Base distribution over the latent space. Default: None. K_cbn: int Number of normalizing flows to use for the nonparametric base distribution. Default: 3. net_hidden_dim_cbn: int Hidden dimension of the neural network used in the nonparametric base distribution. Default: 128. net_hidden_layers_cbn: int Number of hidden layers in the neural network used in the nonparametric base distribution. Default: 3. Methods ------- multi_env_log_prob(x, e, intervention_targets) -> Tensor Computes log probability of x in environment e. forward(x) -> Tensor Maps from the observed data x to the latent space v_hat. """ def __init__( self, latent_dim: int, adjacency_matrix: np.ndarray, intervention_targets_per_env: Optional[Tensor] = None, fix_mechanisms: bool = False, fix_all_intervention_targets: bool = False, nonparametric_base_distr: bool = False, flows: Optional[list[nf.flows.Flow]] = None, q0: Optional[nf.distributions.BaseDistribution] = None, K_cbn: int = 3, net_hidden_dim_cbn: int = 128, net_hidden_layers_cbn: int = 3, ) -> None: self.latent_dim = latent_dim self.adjacency_matrix = adjacency_matrix self.intervention_targets_per_env = intervention_targets_per_env self.fix_mechanisms = fix_mechanisms self.fix_all_intervention_targets = fix_all_intervention_targets self.nonparametric_base_distr = nonparametric_base_distr self.K_cbn = K_cbn self.net_hidden_dim_cbn = net_hidden_dim_cbn self.net_hidden_layers_cbn = net_hidden_layers_cbn if q0 is None: if self.nonparametric_base_distr:
class CauCAEncoder(nf.NormalizingFlow): """ CauCA encoder for multi-environment data. The encoder maps from the observed data x to the latent space v_hat. The latent space is assumed to have causal structure. The encoder is trained to maximize the likelihood of the data under the causal model. x and v_hat are assumed to have the same dimension. The encoder has two main components: 1. A causal base distribution q0 over the latent space. This encodes the latent causal structure. 2. An unmixing function mapping from the observations to the latent space. Attributes ---------- latent_dim: int Dimension of the latent and observed variables. adjacency_matrix: np.ndarray, shape (latent_dim, latent_dim) Adjacency matrix of the latent causal graph. intervention_targets_per_env: Tensor, shape (no_envs, latent_dim) Which variables are intervened on in each environment. fix_mechanisms: bool Whether to fix some fixable mechanisms in the causal model. (See documentation of the ParamMultiEnvCausalDistribution for details.) Default: False. fix_all_intervention_targets: bool Whether to fix all intervention targets in the causal model. (See documentation of the ParamMultiEnvCausalDistribution for details.) Default: False. nonparametric_base_distr: bool Whether to use a nonparametric base distribution. If False, a parametric base distribution assuming linear causal mechanisms is used. Default: False. flows: Optional[list[nf.flows.Flow]] List of normalizing flows to use for the unmixing function. Default: None. q0: Optional[nf.distributions.BaseDistribution] Base distribution over the latent space. Default: None. K_cbn: int Number of normalizing flows to use for the nonparametric base distribution. Default: 3. net_hidden_dim_cbn: int Hidden dimension of the neural network used in the nonparametric base distribution. Default: 128. net_hidden_layers_cbn: int Number of hidden layers in the neural network used in the nonparametric base distribution. Default: 3. Methods ------- multi_env_log_prob(x, e, intervention_targets) -> Tensor Computes log probability of x in environment e. forward(x) -> Tensor Maps from the observed data x to the latent space v_hat. """ def __init__( self, latent_dim: int, adjacency_matrix: np.ndarray, intervention_targets_per_env: Optional[Tensor] = None, fix_mechanisms: bool = False, fix_all_intervention_targets: bool = False, nonparametric_base_distr: bool = False, flows: Optional[list[nf.flows.Flow]] = None, q0: Optional[nf.distributions.BaseDistribution] = None, K_cbn: int = 3, net_hidden_dim_cbn: int = 128, net_hidden_layers_cbn: int = 3, ) -> None: self.latent_dim = latent_dim self.adjacency_matrix = adjacency_matrix self.intervention_targets_per_env = intervention_targets_per_env self.fix_mechanisms = fix_mechanisms self.fix_all_intervention_targets = fix_all_intervention_targets self.nonparametric_base_distr = nonparametric_base_distr self.K_cbn = K_cbn self.net_hidden_dim_cbn = net_hidden_dim_cbn self.net_hidden_layers_cbn = net_hidden_layers_cbn if q0 is None: if self.nonparametric_base_distr:
q0 = NonparamMultiEnvCausalDistribution(
2
2023-10-25 09:25:26+00:00
8k
facebookresearch/verde
src/train/evaluator.py
[ { "identifier": "TransformerModel", "path": "src/train/model/transformer.py", "snippet": "class TransformerModel(nn.Module):\n\n STORE_OUTPUTS = False\n\n def __init__(self, params, id2word, is_encoder, with_output):\n \"\"\"\n Transformer model (encoder or decoder).\n \"\"\"\n super().__init__()\n\n # encoder / decoder, output layer\n self.dtype = torch.half if params.fp16 else torch.float\n self.is_encoder = is_encoder\n self.is_decoder = not is_encoder\n self.with_output = with_output\n\n self.apex = params.nvidia_apex\n self.xav_init = params.xav_init\n\n # dictionary\n self.n_words = params.n_words\n self.eos_index = params.eos_index\n self.pad_index = params.pad_index\n self.id2word = id2word\n assert len(self.id2word) == self.n_words\n\n # model parameters\n self.dim = params.enc_emb_dim if is_encoder else params.dec_emb_dim # 512 by default\n self.src_dim = params.enc_emb_dim\n self.hidden_dim = self.dim * 4 # 2048 by default\n self.n_hidden_layers = params.n_enc_hidden_layers if is_encoder else params.n_dec_hidden_layers\n self.n_heads = params.n_enc_heads if is_encoder else params.n_dec_heads # 8 by default\n self.n_layers = params.n_enc_layers if is_encoder else params.n_dec_layers\n self.dropout = params.dropout\n self.attention_dropout = params.attention_dropout\n self.norm_attention = params.norm_attention\n self.weight_vec = None\n assert (\n self.dim % self.n_heads == 0\n ), \"transformer dim must be a multiple of n_heads\"\n\n # iteration \n self.loop_idx = params.enc_loop_idx if is_encoder else params.dec_loop_idx\n assert self.loop_idx < self.n_layers, \"loop idx must be lower than nr of layers\" \n self.loops = params.enc_loops if is_encoder else params.dec_loops\n \n self.act = params.enc_act if is_encoder else params.dec_act\n assert (not self.act) or (self.loop_idx >= 0)\n \n # embeddings\n self.position_embeddings = Embedding(N_MAX_POSITIONS, self.dim)\n if params.sinusoidal_embeddings:\n create_sinusoidal_embeddings(\n N_MAX_POSITIONS, self.dim, out=self.position_embeddings.weight\n )\n self.embeddings = Embedding(self.n_words, self.dim, padding_idx=self.pad_index)\n self.layer_norm_emb = nn.LayerNorm(self.dim, eps=1e-12)\n\n # transformer layers\n self.layers = nn.ModuleList()\n\n for layer_id in range(self.n_layers):\n if params.enc_gated and self.is_encoder:\n gated = True\n elif params.dec_gated and self.is_decoder:\n gated = True\n elif params.gated and layer_id == self.loop_idx:\n gated = True\n else:\n gated = False\n\n if self.act and layer_id == self.loop_idx:\n self.layers.append(AdaptiveHalt(params, self.is_encoder, gated))\n else:\n self.layers.append(TransformerLayer(params, self.is_encoder, gated))\n\n self.cache = None\n\n # output layer\n if self.with_output:\n self.proj = nn.Linear(self.dim, params.n_words, bias=True)\n if self.xav_init:\n nn.init.xavier_uniform_(self.proj.weight)\n nn.init.constant_(self.proj.bias, 0.0)\n if params.share_inout_emb:\n self.proj.weight = self.embeddings.weight\n\n def forward(self, mode, **kwargs):\n \"\"\"\n Forward function with different forward modes.\n ### Small hack to handle PyTorch distributed.\n \"\"\"\n if mode == \"fwd\":\n return self.fwd(**kwargs)\n elif mode == \"predict\":\n return self.predict(**kwargs)\n else:\n raise Exception(\"Unknown mode: %s\" % mode)\n\n def fwd(\n self,\n x,\n lengths,\n causal,\n src_enc=None,\n src_len=None,\n positions=None,\n use_cache=False\n ):\n \"\"\"\n Inputs:\n `x` LongTensor(slen, bs), containing word indices\n `lengths` LongTensor(bs), containing the length of each sentence\n `causal` Boolean, if True, the attention is only done over previous hidden states\n `positions` LongTensor(slen, bs), containing word positions\n \"\"\"\n # lengths = (x != self.pad_index).float().sum(dim=1)\n # mask = x != self.pad_index\n\n # check inputs\n slen, bs = x.size()\n assert lengths.size(0) == bs\n assert lengths.max().item() <= slen\n x = x.transpose(0, 1) # batch size as dimension 0\n assert (src_enc is None) == (src_len is None)\n if src_enc is not None:\n assert self.is_decoder\n assert src_enc.size(0) == bs\n assert not (use_cache and self.cache is None)\n\n # generate masks\n mask, attn_mask = get_masks(slen, lengths, causal)\n src_mask = None\n if self.is_decoder and src_enc is not None:\n src_mask = (\n torch.arange(src_len.max(), dtype=torch.long, device=lengths.device)\n < src_len[:, None]\n )\n\n # positions\n if positions is None:\n positions = x.new(slen).long()\n positions = torch.arange(slen, out=positions).unsqueeze(0)\n else:\n assert positions.size() == (slen, bs)\n positions = positions.transpose(0, 1)\n\n # do not recompute cached elements\n if use_cache:\n _slen = slen - self.cache[\"slen\"]\n x = x[:, -_slen:]\n positions = positions[:, -_slen:]\n mask = mask[:, -_slen:]\n attn_mask = attn_mask[:, -_slen:]\n\n # all layer outputs\n if TransformerModel.STORE_OUTPUTS and not self.training:\n self.outputs = []\n for i in range(self.n_layers):\n self.layers[i].self_attention.outputs = []\n if self.is_decoder:\n for i in range(self.n_layers):\n self.layers[i].cross_attention.outputs = []\n\n\n # embeddings\n tensor = self.embeddings(x)\n tensor = tensor + self.position_embeddings(positions).expand_as(tensor)\n tensor = self.layer_norm_emb(tensor)\n tensor = F.dropout(tensor, p=self.dropout, training=self.training)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n if TransformerModel.STORE_OUTPUTS and not self.training:\n self.outputs.append(tensor.detach().cpu())\n\n # transformer layers\n for i in range(self.n_layers):\n loops = 1\n if self.loop_idx == -2 or self.loop_idx == i:\n loops = self.loops\n tensor = self.layers[i].forward(tensor, attn_mask, src_mask, src_enc, use_cache=use_cache, cache=self.cache, loop_count=loops)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n if TransformerModel.STORE_OUTPUTS and not self.training:\n self.outputs.append(tensor.detach().cpu()) \n\n # update cache length\n if use_cache:\n self.cache[\"slen\"] += tensor.size(1)\n\n # move back sequence length to dimension 0\n tensor = tensor.transpose(0, 1)\n\n return tensor\n\n def predict(self, tensor, pred_mask, y, get_scores, weighted=False):\n \"\"\"\n Given the last hidden state, compute word scores and/or the loss.\n `pred_mask` is a ByteTensor of shape (slen, bs), filled with 1 when\n we need to predict a word\n `y` is a LongTensor of shape (pred_mask.sum(),)\n `get_scores` is a boolean specifying whether we need to return scores\n \"\"\"\n x = tensor[pred_mask.unsqueeze(-1).expand_as(tensor)].view(-1, self.dim)\n assert (y == self.pad_index).sum().item() == 0\n scores = self.proj(x).view(-1, self.n_words)\n\n loss = F.cross_entropy(scores.float(), y, reduction=\"mean\")\n return scores, loss\n\n def generate(self, src_enc, src_len, max_len=200, sample_temperature=None):\n \"\"\"\n Decode a sentence given initial start.\n `x`:\n - LongTensor(bs, slen)\n <EOS> W1 W2 W3 <EOS> <PAD>\n <EOS> W1 W2 W3 W4 <EOS>\n `lengths`:\n - LongTensor(bs) [5, 6]\n `positions`:\n - False, for regular \"arange\" positions (LM)\n - True, to reset positions from the new generation (MT)\n \"\"\"\n\n # input batch\n bs = len(src_len)\n assert src_enc.size(0) == bs\n\n # generated sentences\n generated = src_len.new(max_len, bs) # upcoming output\n generated.fill_(self.pad_index) # fill upcoming ouput with <PAD>\n generated[0].fill_(self.eos_index) # we use <EOS> for <BOS> everywhere\n\n # positions\n positions = src_len.new(max_len).long()\n positions = (\n torch.arange(max_len, out=positions).unsqueeze(1).expand(max_len, bs)\n )\n\n # current position / max lengths / length of generated sentences / unfinished sentences\n cur_len = 1\n gen_len = src_len.clone().fill_(1)\n unfinished_sents = src_len.clone().fill_(1)\n\n # cache compute states\n self.cache = {\"slen\": 0}\n\n while cur_len < max_len:\n\n # compute word scores\n tensor = self.forward(\n \"fwd\",\n x=generated[:cur_len],\n lengths=gen_len,\n positions=positions[:cur_len],\n causal=True,\n src_enc=src_enc,\n src_len=src_len,\n use_cache=True,\n )\n assert tensor.size() == (1, bs, self.dim)\n tensor = tensor.data[-1, :, :].to(self.dtype) # (bs, dim)\n scores = self.proj(tensor) # (bs, n_words)\n\n # select next words: sample or greedy\n if sample_temperature is None:\n next_words = torch.topk(scores, 1)[1].squeeze(1)\n else:\n next_words = torch.multinomial(\n F.softmax(scores.float() / sample_temperature, dim=1), 1\n ).squeeze(1)\n assert next_words.size() == (bs,)\n\n # update generations / lengths / finished sentences / current length\n generated[cur_len] = next_words * unfinished_sents + self.pad_index * (\n 1 - unfinished_sents\n )\n gen_len.add_(unfinished_sents)\n unfinished_sents.mul_(next_words.ne(self.eos_index).long())\n cur_len = cur_len + 1\n\n # stop when there is a </s> in each sentence, or if we exceed the maximul length\n if unfinished_sents.max() == 0:\n break\n\n # add <EOS> to unfinished sentences\n if cur_len == max_len:\n generated[-1].masked_fill_(unfinished_sents.byte(), self.eos_index)\n\n # sanity check\n assert (generated == self.eos_index).sum() == 2 * bs\n\n return generated[:cur_len], gen_len\n\n def generate_beam(\n self, src_enc, src_len, beam_size, length_penalty, early_stopping, max_len=200\n ):\n \"\"\"\n Decode a sentence given initial start.\n `x`:\n - LongTensor(bs, slen)\n <EOS> W1 W2 W3 <EOS> <PAD>\n <EOS> W1 W2 W3 W4 <EOS>\n `lengths`:\n - LongTensor(bs) [5, 6]\n `positions`:\n - False, for regular \"arange\" positions (LM)\n - True, to reset positions from the new generation (MT)\n \"\"\"\n\n # check inputs\n assert src_enc.size(0) == src_len.size(0)\n assert beam_size >= 1\n\n # batch size / number of words\n bs = len(src_len)\n n_words = self.n_words\n\n # expand to beam size the source latent representations / source lengths\n src_enc = (\n src_enc.unsqueeze(1)\n .expand((bs, beam_size) + src_enc.shape[1:])\n .contiguous()\n .view((bs * beam_size,) + src_enc.shape[1:])\n )\n src_len = src_len.unsqueeze(1).expand(bs, beam_size).contiguous().view(-1)\n\n # generated sentences (batch with beam current hypotheses)\n generated = src_len.new(max_len, bs * beam_size) # upcoming output\n generated.fill_(self.pad_index) # fill upcoming ouput with <PAD>\n generated[0].fill_(self.eos_index) # we use <EOS> for <BOS> everywhere\n\n # generated hypotheses\n generated_hyps = [\n BeamHypotheses(beam_size, max_len, length_penalty, early_stopping)\n for _ in range(bs)\n ]\n\n # positions\n positions = src_len.new(max_len).long()\n positions = (\n torch.arange(max_len, out=positions).unsqueeze(1).expand_as(generated)\n )\n\n # scores for each sentence in the beam\n beam_scores = src_enc.new(bs, beam_size).float().fill_(0)\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1)\n\n # current position\n cur_len = 1\n\n # cache compute states\n self.cache = {\"slen\": 0}\n\n # done sentences\n done = [False for _ in range(bs)]\n\n while cur_len < max_len:\n\n # compute word scores\n tensor = self.forward(\n \"fwd\",\n x=generated[:cur_len],\n lengths=src_len.new(bs * beam_size).fill_(cur_len),\n positions=positions[:cur_len],\n causal=True,\n src_enc=src_enc,\n src_len=src_len,\n use_cache=True,\n )\n\n assert tensor.size() == (1, bs * beam_size, self.dim)\n if self.apex:\n tensor = tensor.data[-1, :, :].to(self.dtype) # (bs * beam_size, dim)\n else:\n tensor = tensor.data[-1, :, :] # .to(self.dtype) # (bs * beam_size, dim)\n scores = self.proj(tensor) # (bs * beam_size, n_words)\n scores = F.log_softmax(scores.float(), dim=-1) # (bs * beam_size, n_words)\n assert scores.size() == (bs * beam_size, n_words)\n\n # select next words with scores\n _scores = scores + beam_scores[:, None].expand_as(\n scores\n ) # (bs * beam_size, n_words)\n _scores = _scores.view(bs, beam_size * n_words) # (bs, beam_size * n_words)\n\n next_scores, next_words = torch.topk(\n _scores, 2 * beam_size, dim=1, largest=True, sorted=True\n )\n assert next_scores.size() == next_words.size() == (bs, 2 * beam_size)\n\n # next batch beam content\n # list of (bs * beam_size) tuple(next hypothesis score, next word, current position in the batch)\n next_batch_beam = []\n\n # for each sentence\n for sent_id in range(bs):\n\n # if we are done with this sentence\n done[sent_id] = done[sent_id] or generated_hyps[sent_id].is_done(\n next_scores[sent_id].max().item()\n )\n if done[sent_id]:\n next_batch_beam.extend(\n [(0, self.pad_index, 0)] * beam_size\n ) # pad the batch\n continue\n\n # next sentence beam content\n next_sent_beam = []\n\n # next words for this sentence\n for idx, value in zip(next_words[sent_id], next_scores[sent_id]):\n\n # get beam and word IDs\n beam_id = idx // n_words\n word_id = idx % n_words\n\n # end of sentence, or next word\n if word_id == self.eos_index or cur_len + 1 == max_len:\n generated_hyps[sent_id].add(\n generated[:cur_len, sent_id * beam_size + beam_id]\n .clone()\n .cpu(),\n value.item(),\n )\n else:\n next_sent_beam.append(\n (value, word_id, sent_id * beam_size + beam_id)\n )\n\n # the beam for next step is full\n if len(next_sent_beam) == beam_size:\n break\n\n # update next beam content\n assert len(next_sent_beam) == 0 if cur_len + 1 == max_len else beam_size\n if len(next_sent_beam) == 0:\n next_sent_beam = [\n (0, self.pad_index, 0)\n ] * beam_size # pad the batch\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == beam_size * (sent_id + 1)\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == bs * beam_size\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_words = generated.new([x[1] for x in next_batch_beam])\n beam_idx = src_len.new([x[2] for x in next_batch_beam])\n\n # re-order batch and internal states\n generated = generated[:, beam_idx]\n generated[cur_len] = beam_words\n for k in self.cache.keys():\n if k != \"slen\":\n self.cache[k] = (\n self.cache[k][0][beam_idx],\n self.cache[k][1][beam_idx],\n )\n\n # update current length\n cur_len = cur_len + 1\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n # def get_coeffs(s):\n # roots = [int(s[i + 2]) for i, c in enumerate(s) if c == 'x']\n # poly = np.poly1d(roots, r=True)\n # coeffs = list(poly.coefficients.astype(np.int64))\n # return [c % 10 for c in coeffs], coeffs\n\n # visualize hypotheses\n # print([len(x) for x in generated_hyps], cur_len)\n # globals().update( locals() );\n # !import code; code.interact(local=vars())\n # for ii in range(bs):\n # for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):\n # hh = \" \".join(self.id2word[x] for x in ww.tolist())\n # print(f\"{ss:+.4f} {hh}\")\n # # cc = get_coeffs(hh[4:])\n # # print(f\"{ss:+.4f} {hh} || {cc[0]} || {cc[1]}\")\n # print(\"\")\n\n # select the best hypotheses\n tgt_len = src_len.new(bs)\n best = []\n\n for i, hypotheses in enumerate(generated_hyps):\n best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]\n tgt_len[i] = len(best_hyp) + 1 # +1 for the <EOS> symbol\n best.append(best_hyp)\n\n # generate target batch\n decoded = src_len.new(tgt_len.max().item(), bs).fill_(self.pad_index)\n for i, hypo in enumerate(best):\n decoded[: tgt_len[i] - 1, i] = hypo\n decoded[tgt_len[i] - 1, i] = self.eos_index\n\n # sanity check\n assert (decoded == self.eos_index).sum() == 2 * bs\n\n return decoded, tgt_len, generated_hyps" }, { "identifier": "to_cuda", "path": "src/utils.py", "snippet": "def to_cuda(*args):\n \"\"\"\n Move tensors to CUDA.\n \"\"\"\n if not CUDA:\n return args\n return [None if x is None else x.cuda() for x in args]" } ]
import ast import os import time import pickle import numpy as np import torch from collections import OrderedDict from logging import getLogger from scipy import stats from src.train.model import TransformerModel from src.utils import to_cuda
6,779
except Exception as e: logger.info(f'secret recovery: {self.secret_recovery}') logger.info(f'Exception when saving secret_recovery details: {e}') class Evaluator(object): def __init__(self, trainer, test_dataloader): """ Initialize evaluator. """ self.trainer = trainer self.iterator = test_dataloader self.modules = trainer.modules self.params = trainer.params self.env = trainer.env self.secret_check = SecretCheck(trainer, test_dataloader.dataset) def run_all_evals(self): """ Run all evaluations. """ scores = OrderedDict({"epoch": self.trainer.epoch}) with torch.no_grad(): encoder = ( self.modules["encoder"].module if self.params.multi_gpu else self.modules["encoder"] ) decoder = ( self.modules["decoder"].module if self.params.multi_gpu and hasattr(self.modules["decoder"], 'module') else self.modules["decoder"] ) encoder.eval() decoder.eval() self.run_distinguisher(encoder, decoder) self.run_direct_recovery(encoder, decoder) self.recover_secret_from_crossattention(encoder, decoder, scores) # cross attention (+ circular regression) self.hybrid() self.secret_check.store_results(self.params.dump_path, self.trainer.epoch) return scores def ordered_idx_from_scores(self, secret_scores): ''' Takes bit-wise scores (length N) and return sorted list<(idx, score)> and sorted list<idx>. ''' idx_with_scores = list(enumerate(secret_scores)) # a list of (idx, score) sorted_idx_by_scores = sorted(idx_with_scores, key=lambda item: item[1], reverse=True) # descending return sorted_idx_by_scores, [t[0] for t in sorted_idx_by_scores] def hybrid(self): ''' Hybrid secret recovery that combines direct secret recovery, distinguisher and CA ''' methods_dict = { 'direct': self.direct_results, 'distinguisher': self.distinguisher_results, 'ca': self.ca_results, } combos = [['direct', 'ca'], ['direct', 'distinguisher'], ['ca', 'distinguisher'], ['direct', 'ca', 'distinguisher']] for combo in combos: logger.info(f'Hybrid: {", ".join(combo)}') self.hybrid_sub([methods_dict[m] for m in combo], ", ".join(combo)) def hybrid_sub(self, methods, combo_name): for results in methods: if max(results) == 0: # the scores are non-negative. Hybrid on this combo is useless. return None sum_and_max = np.zeros((4,self.params.N)) for results in methods: # Normalized, sum and max sum_and_max[0] += results/max(results) sum_and_max[1] = np.max((sum_and_max[1], results/max(results)), axis=0) # Ranking, sum and max rank = stats.rankdata(results, method='min') sum_and_max[2] += rank sum_and_max[3] = np.max((sum_and_max[3], rank), axis=0) for i, name in enumerate(['Sum Normalized', 'Max Normalized', 'Sum Rank', 'Max Rank']): idx_w_scores, indices = self.ordered_idx_from_scores(sum_and_max[i]) self.secret_check.match_secret_iter(indices, idx_w_scores, f'{combo_name} - {name}') ######################################################## # CODE TO RUN DIRECT SECRET RECOVERY AND DISTINGUISHER # ######################################################## def run_beam_generation(self, x1_, len1_, encoder, decoder): # Run beam generation to get output. encoded = encoder("fwd", x=x1_, lengths=len1_, causal=False) _, _, generations= decoder.generate_beam(encoded.transpose(0, 1), len1_, beam_size=self.params.beam_size, length_penalty=self.params.beam_length_penalty, early_stopping=self.params.beam_early_stopping, max_len=self.params.max_output_len) beam_log = [] for i in range(len(generations)): sorted_hyp = sorted(generations[i].hyp, key=lambda x: x[0], reverse=True) if len(sorted_hyp) == 0: beam_log.append(0) else: _, hyp = sorted_hyp[0] output = [self.trainer.env.id2word[wid] for wid in hyp[1:].tolist()] try: beam_log.append(self.env.output_encoder.decode(output)[0]) except Exception as e: beam_log.append(-1) return beam_log def predict_outputs(self, A, encoder, decoder, intermediate=False): ''' if intermediate is False then output integers if intermediate is True then output distributions ''' preds = [] # Encodes data in format expected by model encA = self.env.input_encoder.encode(A) encA = [torch.LongTensor([self.env.word2id[w] for w in seq]) for seq in encA] for k in range(0, len(encA), self.params.batch_size): x = encA[k:k+self.params.batch_size] x1, len1 = self.env.batch_sequences(x)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = getLogger() class SecretCheck(object): def __init__(self, trainer, dataset): self.trainer = trainer self.params = trainer.params self.orig_A, self.orig_b = dataset.orig_A, dataset.orig_b self.secret_recovery = { 'success': [] } def match_secret(self, guess, method_name): ''' Takes an int or bool (binary) list or array as secret guess and check against the original tiny dataset. ''' guess = np.array(guess).astype(int) if self.params.secret_type in ['gaussian', 'binomial']: # only check if nonzeros are identified for gaussian and binomial secrets matched = np.all((self.params.secret != 0) == (guess != 0)) elif self.orig_A is None: # Old data, original dataset not available. Directly check the secret. matched = np.all(self.params.secret == guess) else: err_pred = (self.orig_A @ guess - self.orig_b) % self.params.Q err_pred[err_pred > self.params.Q // 2] -= self.params.Q matched = np.std(err_pred) < 2*self.params.sigma if matched: logger.info(f'{method_name}: all bits in secret have been recovered!') if method_name not in self.secret_recovery['success']: self.secret_recovery['success'].append(method_name) self.trainer.secret_match = True return True def match_secret_iter(self, idx_list, sorted_idx_with_scores, method_name): ''' Takes a list of indices sorted by scores (descending, high score means more likely to be 1) and iteratively matches the secret. ''' self.secret_recovery[method_name] = sorted_idx_with_scores or idx_list guess = np.zeros(self.params.N) for i in range(min(self.params.N // 5, len(idx_list))): # sparse assumption guess[idx_list[i]] = 1 if self.match_secret(guess, method_name): return True logger.info(f'{method_name}: secret not predicted.') return False def add_log(self, k, v): self.secret_recovery[k] = v def store_results(self, path, epoch): try: pickle.dump(self.secret_recovery, open(os.path.join(path, f'secret_recovery_{epoch}.pkl'), 'wb')) except Exception as e: logger.info(f'secret recovery: {self.secret_recovery}') logger.info(f'Exception when saving secret_recovery details: {e}') class Evaluator(object): def __init__(self, trainer, test_dataloader): """ Initialize evaluator. """ self.trainer = trainer self.iterator = test_dataloader self.modules = trainer.modules self.params = trainer.params self.env = trainer.env self.secret_check = SecretCheck(trainer, test_dataloader.dataset) def run_all_evals(self): """ Run all evaluations. """ scores = OrderedDict({"epoch": self.trainer.epoch}) with torch.no_grad(): encoder = ( self.modules["encoder"].module if self.params.multi_gpu else self.modules["encoder"] ) decoder = ( self.modules["decoder"].module if self.params.multi_gpu and hasattr(self.modules["decoder"], 'module') else self.modules["decoder"] ) encoder.eval() decoder.eval() self.run_distinguisher(encoder, decoder) self.run_direct_recovery(encoder, decoder) self.recover_secret_from_crossattention(encoder, decoder, scores) # cross attention (+ circular regression) self.hybrid() self.secret_check.store_results(self.params.dump_path, self.trainer.epoch) return scores def ordered_idx_from_scores(self, secret_scores): ''' Takes bit-wise scores (length N) and return sorted list<(idx, score)> and sorted list<idx>. ''' idx_with_scores = list(enumerate(secret_scores)) # a list of (idx, score) sorted_idx_by_scores = sorted(idx_with_scores, key=lambda item: item[1], reverse=True) # descending return sorted_idx_by_scores, [t[0] for t in sorted_idx_by_scores] def hybrid(self): ''' Hybrid secret recovery that combines direct secret recovery, distinguisher and CA ''' methods_dict = { 'direct': self.direct_results, 'distinguisher': self.distinguisher_results, 'ca': self.ca_results, } combos = [['direct', 'ca'], ['direct', 'distinguisher'], ['ca', 'distinguisher'], ['direct', 'ca', 'distinguisher']] for combo in combos: logger.info(f'Hybrid: {", ".join(combo)}') self.hybrid_sub([methods_dict[m] for m in combo], ", ".join(combo)) def hybrid_sub(self, methods, combo_name): for results in methods: if max(results) == 0: # the scores are non-negative. Hybrid on this combo is useless. return None sum_and_max = np.zeros((4,self.params.N)) for results in methods: # Normalized, sum and max sum_and_max[0] += results/max(results) sum_and_max[1] = np.max((sum_and_max[1], results/max(results)), axis=0) # Ranking, sum and max rank = stats.rankdata(results, method='min') sum_and_max[2] += rank sum_and_max[3] = np.max((sum_and_max[3], rank), axis=0) for i, name in enumerate(['Sum Normalized', 'Max Normalized', 'Sum Rank', 'Max Rank']): idx_w_scores, indices = self.ordered_idx_from_scores(sum_and_max[i]) self.secret_check.match_secret_iter(indices, idx_w_scores, f'{combo_name} - {name}') ######################################################## # CODE TO RUN DIRECT SECRET RECOVERY AND DISTINGUISHER # ######################################################## def run_beam_generation(self, x1_, len1_, encoder, decoder): # Run beam generation to get output. encoded = encoder("fwd", x=x1_, lengths=len1_, causal=False) _, _, generations= decoder.generate_beam(encoded.transpose(0, 1), len1_, beam_size=self.params.beam_size, length_penalty=self.params.beam_length_penalty, early_stopping=self.params.beam_early_stopping, max_len=self.params.max_output_len) beam_log = [] for i in range(len(generations)): sorted_hyp = sorted(generations[i].hyp, key=lambda x: x[0], reverse=True) if len(sorted_hyp) == 0: beam_log.append(0) else: _, hyp = sorted_hyp[0] output = [self.trainer.env.id2word[wid] for wid in hyp[1:].tolist()] try: beam_log.append(self.env.output_encoder.decode(output)[0]) except Exception as e: beam_log.append(-1) return beam_log def predict_outputs(self, A, encoder, decoder, intermediate=False): ''' if intermediate is False then output integers if intermediate is True then output distributions ''' preds = [] # Encodes data in format expected by model encA = self.env.input_encoder.encode(A) encA = [torch.LongTensor([self.env.word2id[w] for w in seq]) for seq in encA] for k in range(0, len(encA), self.params.batch_size): x = encA[k:k+self.params.batch_size] x1, len1 = self.env.batch_sequences(x)
x1_, len1_ = to_cuda(x1, len1)
1
2023-10-30 17:53:57+00:00
8k
andriioreshk1118/python-second
linear_algebra/src/test_linear_algebra.py
[ { "identifier": "Matrix", "path": "linear_algebra/src/lib.py", "snippet": "class Matrix:\n \"\"\"\n class: Matrix\n This class represents an arbitrary matrix.\n\n Overview of the methods:\n\n __init__():\n __str__(): returns a string representation\n __add__(other: Matrix): matrix addition\n __sub__(other: Matrix): matrix subtraction\n __mul__(other: float): scalar multiplication\n __mul__(other: Vector): vector multiplication\n height() : returns height\n width() : returns width\n component(x: int, y: int): returns specified component\n change_component(x: int, y: int, value: float): changes specified component\n minor(x: int, y: int): returns minor along (x, y)\n cofactor(x: int, y: int): returns cofactor along (x, y)\n determinant() : returns determinant\n \"\"\"\n\n def __init__(self, matrix: list[list[float]], w: int, h: int) -> None:\n \"\"\"\n simple constructor for initializing the matrix with components.\n \"\"\"\n self.__matrix = matrix\n self.__width = w\n self.__height = h\n\n def __str__(self) -> str:\n \"\"\"\n returns a string representation of this matrix.\n \"\"\"\n ans = \"\"\n for i in range(self.__height):\n ans += \"|\"\n for j in range(self.__width):\n if j < self.__width - 1:\n ans += str(self.__matrix[i][j]) + \",\"\n else:\n ans += str(self.__matrix[i][j]) + \"|\\n\"\n return ans\n\n def __add__(self, other: Matrix) -> Matrix:\n \"\"\"\n implements matrix addition.\n \"\"\"\n if self.__width == other.width() and self.__height == other.height():\n matrix = []\n for i in range(self.__height):\n row = [\n self.__matrix[i][j] + other.component(i, j)\n for j in range(self.__width)\n ]\n matrix.append(row)\n return Matrix(matrix, self.__width, self.__height)\n else:\n raise Exception(\"matrix must have the same dimension!\")\n\n def __sub__(self, other: Matrix) -> Matrix:\n \"\"\"\n implements matrix subtraction.\n \"\"\"\n if self.__width == other.width() and self.__height == other.height():\n matrix = []\n for i in range(self.__height):\n row = [\n self.__matrix[i][j] - other.component(i, j)\n for j in range(self.__width)\n ]\n matrix.append(row)\n return Matrix(matrix, self.__width, self.__height)\n else:\n raise Exception(\"matrices must have the same dimension!\")\n\n @overload\n def __mul__(self, other: float) -> Matrix:\n ...\n\n @overload\n def __mul__(self, other: Vector) -> Vector:\n ...\n\n def __mul__(self, other: float | Vector) -> Vector | Matrix:\n \"\"\"\n implements the matrix-vector multiplication.\n implements the matrix-scalar multiplication\n \"\"\"\n if isinstance(other, Vector): # matrix-vector\n if len(other) == self.__width:\n ans = zero_vector(self.__height)\n for i in range(self.__height):\n prods = [\n self.__matrix[i][j] * other.component(j)\n for j in range(self.__width)\n ]\n ans.change_component(i, sum(prods))\n return ans\n else:\n raise Exception(\n \"vector must have the same size as the \"\n \"number of columns of the matrix!\"\n )\n elif isinstance(other, (int, float)): # matrix-scalar\n matrix = [\n [self.__matrix[i][j] * other for j in range(self.__width)]\n for i in range(self.__height)\n ]\n return Matrix(matrix, self.__width, self.__height)\n return None\n\n def height(self) -> int:\n \"\"\"\n getter for the height\n \"\"\"\n return self.__height\n\n def width(self) -> int:\n \"\"\"\n getter for the width\n \"\"\"\n return self.__width\n\n def component(self, x: int, y: int) -> float:\n \"\"\"\n returns the specified (x,y) component\n \"\"\"\n if 0 <= x < self.__height and 0 <= y < self.__width:\n return self.__matrix[x][y]\n else:\n raise Exception(\"change_component: indices out of bounds\")\n\n def change_component(self, x: int, y: int, value: float) -> None:\n \"\"\"\n changes the x-y component of this matrix\n \"\"\"\n if 0 <= x < self.__height and 0 <= y < self.__width:\n self.__matrix[x][y] = value\n else:\n raise Exception(\"change_component: indices out of bounds\")\n\n def minor(self, x: int, y: int) -> float:\n \"\"\"\n returns the minor along (x, y)\n \"\"\"\n if self.__height != self.__width:\n raise Exception(\"Matrix is not square\")\n minor = self.__matrix[:x] + self.__matrix[x + 1 :]\n for i in range(len(minor)):\n minor[i] = minor[i][:y] + minor[i][y + 1 :]\n return Matrix(minor, self.__width - 1, self.__height - 1).determinant()\n\n def cofactor(self, x: int, y: int) -> float:\n \"\"\"\n returns the cofactor (signed minor) along (x, y)\n \"\"\"\n if self.__height != self.__width:\n raise Exception(\"Matrix is not square\")\n if 0 <= x < self.__height and 0 <= y < self.__width:\n return (-1) ** (x + y) * self.minor(x, y)\n else:\n raise Exception(\"Indices out of bounds\")\n\n def determinant(self) -> float:\n \"\"\"\n returns the determinant of an nxn matrix using Laplace expansion\n \"\"\"\n if self.__height != self.__width:\n raise Exception(\"Matrix is not square\")\n if self.__height < 1:\n raise Exception(\"Matrix has no element\")\n elif self.__height == 1:\n return self.__matrix[0][0]\n elif self.__height == 2:\n return (\n self.__matrix[0][0] * self.__matrix[1][1]\n - self.__matrix[0][1] * self.__matrix[1][0]\n )\n else:\n cofactor_prods = [\n self.__matrix[0][y] * self.cofactor(0, y) for y in range(self.__width)\n ]\n return sum(cofactor_prods)" }, { "identifier": "Vector", "path": "linear_algebra/src/lib.py", "snippet": "class Vector:\n \"\"\"\n This class represents a vector of arbitrary size.\n You need to give the vector components.\n\n Overview of the methods:\n\n __init__(components: Collection[float] | None): init the vector\n __len__(): gets the size of the vector (number of components)\n __str__(): returns a string representation\n __add__(other: Vector): vector addition\n __sub__(other: Vector): vector subtraction\n __mul__(other: float): scalar multiplication\n __mul__(other: Vector): dot product\n copy(): copies this vector and returns it\n component(i): gets the i-th component (0-indexed)\n change_component(pos: int, value: float): changes specified component\n euclidean_length(): returns the euclidean length of the vector\n angle(other: Vector, deg: bool): returns the angle between two vectors\n TODO: compare-operator\n \"\"\"\n\n def __init__(self, components: Collection[float] | None = None) -> None:\n \"\"\"\n input: components or nothing\n simple constructor for init the vector\n \"\"\"\n if components is None:\n components = []\n self.__components = list(components)\n\n def __len__(self) -> int:\n \"\"\"\n returns the size of the vector\n \"\"\"\n return len(self.__components)\n\n def __str__(self) -> str:\n \"\"\"\n returns a string representation of the vector\n \"\"\"\n return \"(\" + \",\".join(map(str, self.__components)) + \")\"\n\n def __add__(self, other: Vector) -> Vector:\n \"\"\"\n input: other vector\n assumes: other vector has the same size\n returns a new vector that represents the sum.\n \"\"\"\n size = len(self)\n if size == len(other):\n result = [self.__components[i] + other.component(i) for i in range(size)]\n return Vector(result)\n else:\n raise Exception(\"must have the same size\")\n\n def __sub__(self, other: Vector) -> Vector:\n \"\"\"\n input: other vector\n assumes: other vector has the same size\n returns a new vector that represents the difference.\n \"\"\"\n size = len(self)\n if size == len(other):\n result = [self.__components[i] - other.component(i) for i in range(size)]\n return Vector(result)\n else: # error case\n raise Exception(\"must have the same size\")\n\n @overload\n def __mul__(self, other: float) -> Vector:\n ...\n\n @overload\n def __mul__(self, other: Vector) -> float:\n ...\n\n def __mul__(self, other: float | Vector) -> float | Vector:\n \"\"\"\n mul implements the scalar multiplication\n and the dot-product\n \"\"\"\n if isinstance(other, (float, int)):\n ans = [c * other for c in self.__components]\n return Vector(ans)\n elif isinstance(other, Vector) and len(self) == len(other):\n size = len(self)\n prods = [self.__components[i] * other.component(i) for i in range(size)]\n return sum(prods)\n else: # error case\n raise Exception(\"invalid operand!\")\n\n def copy(self) -> Vector:\n \"\"\"\n copies this vector and returns it.\n \"\"\"\n return Vector(self.__components)\n\n def component(self, i: int) -> float:\n \"\"\"\n input: index (0-indexed)\n output: the i-th component of the vector.\n \"\"\"\n if isinstance(i, int) and -len(self.__components) <= i < len(self.__components):\n return self.__components[i]\n else:\n raise Exception(\"index out of range\")\n\n def change_component(self, pos: int, value: float) -> None:\n \"\"\"\n input: an index (pos) and a value\n changes the specified component (pos) with the\n 'value'\n \"\"\"\n # precondition\n assert -len(self.__components) <= pos < len(self.__components)\n self.__components[pos] = value\n\n def euclidean_length(self) -> float:\n \"\"\"\n returns the euclidean length of the vector\n\n >>> Vector([2, 3, 4]).euclidean_length()\n 5.385164807134504\n >>> Vector([1]).euclidean_length()\n 1.0\n >>> Vector([0, -1, -2, -3, 4, 5, 6]).euclidean_length()\n 9.539392014169456\n >>> Vector([]).euclidean_length()\n Traceback (most recent call last):\n ...\n Exception: Vector is empty\n \"\"\"\n if len(self.__components) == 0:\n raise Exception(\"Vector is empty\")\n squares = [c**2 for c in self.__components]\n return math.sqrt(sum(squares))\n\n def angle(self, other: Vector, deg: bool = False) -> float:\n \"\"\"\n find angle between two Vector (self, Vector)\n\n >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]))\n 1.4906464636572374\n >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]), deg = True)\n 85.40775111366095\n >>> Vector([3, 4, -1]).angle(Vector([2, -1]))\n Traceback (most recent call last):\n ...\n Exception: invalid operand!\n \"\"\"\n num = self * other\n den = self.euclidean_length() * other.euclidean_length()\n if deg:\n return math.degrees(math.acos(num / den))\n else:\n return math.acos(num / den)" }, { "identifier": "axpy", "path": "linear_algebra/src/lib.py", "snippet": "def axpy(scalar: float, x: Vector, y: Vector) -> Vector:\n \"\"\"\n input: a 'scalar' and two vectors 'x' and 'y'\n output: a vector\n computes the axpy operation\n \"\"\"\n # precondition\n assert isinstance(x, Vector)\n assert isinstance(y, Vector)\n assert isinstance(scalar, (int, float))\n return x * scalar + y" }, { "identifier": "square_zero_matrix", "path": "linear_algebra/src/lib.py", "snippet": "def square_zero_matrix(n: int) -> Matrix:\n \"\"\"\n returns a square zero-matrix of dimension NxN\n \"\"\"\n ans: list[list[float]] = [[0] * n for _ in range(n)]\n return Matrix(ans, n, n)" }, { "identifier": "unit_basis_vector", "path": "linear_algebra/src/lib.py", "snippet": "def unit_basis_vector(dimension: int, pos: int) -> Vector:\n \"\"\"\n returns a unit basis vector with a One\n at index 'pos' (indexing at 0)\n \"\"\"\n # precondition\n assert isinstance(dimension, int)\n assert isinstance(pos, int)\n ans = [0] * dimension\n ans[pos] = 1\n return Vector(ans)" }, { "identifier": "zero_vector", "path": "linear_algebra/src/lib.py", "snippet": "def zero_vector(dimension: int) -> Vector:\n \"\"\"\n returns a zero-vector of size 'dimension'\n \"\"\"\n # precondition\n assert isinstance(dimension, int)\n return Vector([0] * dimension)" } ]
import unittest import pytest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, )
4,266
""" Created on Mon Feb 26 15:40:07 2018 @author: Christian Bender @license: MIT-license This file contains the test-suite for the linear algebra library. """ class Test(unittest.TestCase): def test_component(self) -> None: """ test for method component() """ x = Vector([1, 2, 3]) assert x.component(0) == 1 assert x.component(2) == 3 _ = Vector() def test_str(self) -> None: """ test for method toString() """ x = Vector([0, 0, 0, 0, 0, 1]) assert str(x) == "(0,0,0,0,0,1)" def test_size(self) -> None: """ test for method size() """ x = Vector([1, 2, 3, 4]) assert len(x) == 4 def test_euclidean_length(self) -> None: """ test for method euclidean_length() """ x = Vector([1, 2]) y = Vector([1, 2, 3, 4, 5]) z = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) w = Vector([1, -1, 1, -1, 2, -3, 4, -5]) assert x.euclidean_length() == pytest.approx(2.236, abs=1e-3) assert y.euclidean_length() == pytest.approx(7.416, abs=1e-3) assert z.euclidean_length() == 0 assert w.euclidean_length() == pytest.approx(7.616, abs=1e-3) def test_add(self) -> None: """ test for + operator """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) assert (x + y).component(0) == 2 assert (x + y).component(1) == 3 assert (x + y).component(2) == 4 def test_sub(self) -> None: """ test for - operator """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) assert (x - y).component(0) == 0 assert (x - y).component(1) == 1 assert (x - y).component(2) == 2 def test_mul(self) -> None: """ test for * operator """ x = Vector([1, 2, 3]) a = Vector([2, -1, 4]) # for test of dot product b = Vector([1, -2, -1]) assert str(x * 3.0) == "(3.0,6.0,9.0)" assert a * b == 0 def test_zero_vector(self) -> None: """ test for global function zero_vector() """ assert str(zero_vector(10)).count("0") == 10 def test_unit_basis_vector(self) -> None: """ test for global function unit_basis_vector() """ assert str(unit_basis_vector(3, 1)) == "(0,1,0)" def test_axpy(self) -> None: """ test for global function axpy() (operation) """ x = Vector([1, 2, 3]) y = Vector([1, 0, 1])
""" Created on Mon Feb 26 15:40:07 2018 @author: Christian Bender @license: MIT-license This file contains the test-suite for the linear algebra library. """ class Test(unittest.TestCase): def test_component(self) -> None: """ test for method component() """ x = Vector([1, 2, 3]) assert x.component(0) == 1 assert x.component(2) == 3 _ = Vector() def test_str(self) -> None: """ test for method toString() """ x = Vector([0, 0, 0, 0, 0, 1]) assert str(x) == "(0,0,0,0,0,1)" def test_size(self) -> None: """ test for method size() """ x = Vector([1, 2, 3, 4]) assert len(x) == 4 def test_euclidean_length(self) -> None: """ test for method euclidean_length() """ x = Vector([1, 2]) y = Vector([1, 2, 3, 4, 5]) z = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) w = Vector([1, -1, 1, -1, 2, -3, 4, -5]) assert x.euclidean_length() == pytest.approx(2.236, abs=1e-3) assert y.euclidean_length() == pytest.approx(7.416, abs=1e-3) assert z.euclidean_length() == 0 assert w.euclidean_length() == pytest.approx(7.616, abs=1e-3) def test_add(self) -> None: """ test for + operator """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) assert (x + y).component(0) == 2 assert (x + y).component(1) == 3 assert (x + y).component(2) == 4 def test_sub(self) -> None: """ test for - operator """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) assert (x - y).component(0) == 0 assert (x - y).component(1) == 1 assert (x - y).component(2) == 2 def test_mul(self) -> None: """ test for * operator """ x = Vector([1, 2, 3]) a = Vector([2, -1, 4]) # for test of dot product b = Vector([1, -2, -1]) assert str(x * 3.0) == "(3.0,6.0,9.0)" assert a * b == 0 def test_zero_vector(self) -> None: """ test for global function zero_vector() """ assert str(zero_vector(10)).count("0") == 10 def test_unit_basis_vector(self) -> None: """ test for global function unit_basis_vector() """ assert str(unit_basis_vector(3, 1)) == "(0,1,0)" def test_axpy(self) -> None: """ test for global function axpy() (operation) """ x = Vector([1, 2, 3]) y = Vector([1, 0, 1])
assert str(axpy(2, x, y)) == "(3,4,7)"
2
2023-10-26 12:00:23+00:00
8k
Doubling-Open-Source/git_calculator
tests/test_git_obj.py
[ { "identifier": "ToyRepoCreator", "path": "src/util/toy_repo.py", "snippet": "class ToyRepoCreator:\n \"\"\"\n A utility class for creating and managing a Git repository with custom commit patterns.\n\n This class allows for initializing a new Git repository in a specified directory \n and creating commits with configurable time intervals and authors. It supports \n creating both evenly and unevenly spaced commits.\n\n Attributes:\n directory (str): The directory where the Git repository will be initialized.\n authors (list of tuple): A list of authors (name, email) to be used for commits.\n start_date (datetime): The starting date for the first commit.\n\n Methods:\n initialize_repo(): Initializes a new Git repository in the specified directory.\n create_commit(file_index, author_name, author_email, commit_date):\n Creates a commit in the repository.\n create_custom_commits(commit_intervals): Creates multiple commits in the \n repository based on provided intervals.\n\n # Example usage\n creator = ToyRepoCreator('/path/to/repo')\n even_intervals = [7 * i for i in range(12)] # Weekly intervals\n uneven_intervals = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048] # Exponential intervals\n\n creator.create_custom_commits(even_intervals)\n # or\n creator.create_custom_commits(uneven_intervals)\n \"\"\"\n\n def __init__(self, directory):\n self.directory = directory\n self.authors = [\n ('Author 1', '[email protected]'),\n ('Author 2', '[email protected]'),\n ('Author 3', '[email protected]'),\n ('Author 4', '[email protected]'),\n ]\n self.start_date = datetime.datetime(2023, 9, 1)\n\n def initialize_repo(self):\n os.chdir(self.directory)\n git_util.git_run('init')\n\n def create_commit(self, file_index, author_name, author_email, commit_date):\n filename = f'file{file_index}.txt'\n\n with open(filename, 'w') as file:\n file.write(f'Commit {file_index} by {author_name}')\n\n git_util.git_run('add', filename)\n\n formatted_date = commit_date.strftime('%Y-%m-%dT%H:%M:%S')\n os.environ['GIT_COMMITTER_DATE'] = formatted_date\n os.environ['GIT_AUTHOR_DATE'] = formatted_date\n\n # Modify commit message to include 'bugfix' or 'hotfix'\n commit_msg = f\"Commit {file_index} by {author_name}\"\n if file_index % 4 == 0: # Every 4th commit\n commit_msg += \" - hotfix\"\n elif file_index % 3 == 0: # Every 3rd commit\n commit_msg += \" - bugfix\"\n\n git_util.git_run('commit', '-m', commit_msg, '--author', f'{author_name} <{author_email}>')\n\n del os.environ['GIT_COMMITTER_DATE']\n del os.environ['GIT_AUTHOR_DATE']\n\n def create_custom_commits(self, commit_intervals):\n self.initialize_repo()\n\n for i, interval in enumerate(commit_intervals, start=1):\n logging.debug('======= i =======: \\n%s', i)\n author_name, author_email = self.authors[i % len(self.authors)]\n logging.debug('======= author_name =======: \\n%s', author_name)\n logging.debug('======= author_email =======: \\n%s', author_email)\n commit_date = self.start_date + datetime.timedelta(days=interval)\n logging.debug('======= commit_date =======: \\n%s', commit_date)\n self.create_commit(i, author_name, author_email, commit_date)\n\n\n def create_custom_commits_single_author(self, commit_intervals):\n self.initialize_repo()\n\n for i, interval in enumerate(commit_intervals, start=1):\n logging.debug('======= i =======: \\n%s', i)\n author_name, author_email = self.authors[0][0], self.authors[0][1] \n logging.debug('======= author_name =======: \\n%s', author_name)\n logging.debug('======= author_email =======: \\n%s', author_email)\n commit_date = self.start_date + datetime.timedelta(days=interval)\n logging.debug('======= commit_date =======: \\n%s', commit_date)\n self.create_commit(i, author_name, author_email, commit_date)" }, { "identifier": "all_objects", "path": "src/git_ir.py", "snippet": "def all_objects():\n \"\"\"\n Retrieve a list of unique Git objects (e.g., commits, blobs, trees) present in the entire Git repository.\n\n This function uses Git's 'rev-list' command with the '--all' and '--objects' options to list all objects\n reachable from any branch or reference in the repository. It then processes the output to extract and return\n a list of unique Git object hashes.\n\n Returns:\n list of str: A list containing the unique Git object hashes found in the repository.\n\n Example:\n >>> all_objects()\n ['d1a7f4b29c79a11f08f2cdac7fe13c3d9ec19025', '6a2e78cf73ea38c614f96e8950a245b52ad7fe7c']\n \"\"\"\n cmd = git_run('rev-list', '--all', '--objects')\n res = {git_sha(line.split()[0]): None for line in cmd.stdout.splitlines()}\n res = list(res.keys()) # Sorted uniq\n return res\n\n#def git_log():\n \"\"\"\n Retrieve and parse Git commit log entries from the entire Git repository.\n\n This function uses Git's 'log' command with various options to obtain commit log entries from all branches and\n reflogs in the repository. It parses each log entry and creates Git commit objects with attributes such as\n commit timestamp, SHA hash, tree hash, parent commits, author email, and author name.\n\n After parsing, it links parent-child relationships between commits and calibrates the minimum SHA hash length.\n\n Returns:\n list of GitCommit: A list containing parsed Git commit objects representing the commit history.\n\n Note:\n The function assumes the availability of the 'git_run', 'git_obj', and 'git_sha' modules for running Git\n commands, creating Git commit objects, and handling SHA hashes, respectively.\n\n Example:\n >>> git_log()\n [\n GitCommit(\n timestamp=1591272869,\n sha='d1a7f4b29c79a11f08f2cdac7fe13c3d9ec19025',\n tree_sha='6a2e78cf73ea38c614f96e8950a245b52ad7fe7c',\n parents=['8d9a6d22dded20b4f6642ac21c64efab8dd9e78b'],\n author_email='[email protected]',\n author_name='Author Name'\n ),\n ...\n ]\n \"\"\"" }, { "identifier": "git_obj", "path": "src/git_ir.py", "snippet": "class git_obj(git_sha):\n __all_obj__ = {}\n\n def __new__(cls, sha):\n \"\"\"\n Overrides the default method for object creation, ensuring each 'git_obj' instance is unique\n by its 'sha' and stored in the '__all_obj__' dictionary.\n\n Parameters:\n -----------\n sha : str\n The unique SHA hash representing a Git object.\n\n Returns:\n --------\n git_obj\n The newly created 'git_obj' instance.\n \"\"\"\n res = super().__new__(cls, sha)\n cls.__all_obj__[sha] = res\n return res\n \n def _link(self):\n \"\"\"\n Identifies and links parent objects to their children, establishing a bidirectional\n relationship in the Git history graph.\n\n Ensures that the current object is registered as a child of each of its parents.\n \"\"\"\n for p in self._parents:\n try:\n p = self.obj(p)\n if self not in p._children:\n p._children.append(self)\n except KeyError:\n pass\n\n @classmethod\n def link_children(cls):\n \"\"\"\n Iterates through all instantiated 'git_obj' objects and ensures they are properly linked\n to their parent objects. This method helps in building the complete Git history graph.\n \"\"\"\n for o in cls.__all_obj__.values():\n o._link()\n\n @classmethod \n def _from_cat_file(cls, sha):\n \"\"\"\n Generates a 'git_obj' instance based on the content extracted from the 'git cat-file' command,\n parsing necessary information such as tree, parents, and author details.\n\n Parameters:\n -----------\n sha : str\n The unique SHA hash for a Git object.\n\n Returns:\n --------\n git_obj\n The newly created 'git_obj' instance with properties extracted from 'git cat-file'.\n \"\"\"\n cmd = git_run('cat-file','-p', sha)\n res = git_obj(sha)\n\n tree = auth = None\n res._parents = []\n for line in cmd.stdout.splitlines():\n denom, _ ,line = line.strip().partition(' ')\n if denom == 'tree':\n tree = line\n elif denom == 'parent':\n res._parents.append(line)\n elif denom == 'committer':\n line, timestamp, _tz = line.rsplit(' ', 5)\n res._when = int(timestamp) # TODO: Do something with tz\n if line.endswith('>'):\n auth, _, email= line[:-1].partition('<')\n auth = auth.strip()\n res._author = (auth, email)\n else:\n res._author = (line.strip(), None)\n\n\n logging.debug('======= res in _from_cat_file =======: \\n%s', res)\n return res\n\n @classmethod\n def _from_show(cls, sha): \n \"\"\"\n Constructs a 'git_obj' instance based on the output of the 'git show' command. It parses the\n command's output to extract detailed commit information.\n\n Parameters:\n -----------\n sha : str\n The unique SHA hash for a Git object.\n\n Returns:\n --------\n git_obj\n The 'git_obj' instance initialized with commit details.\n \"\"\" \n cmd = git_run('show', r'--format=%ct|%H|%T|%P|%ae|%an', '-s', ''+sha)\n line = cmd.stdout.strip()\n parts = line.split('|', 5)\n parts[3] = parts[3].split() # Multiple parents\n return git_obj.commit(*parts)\n\n @classmethod\n def obj(cls, sha):\n \"\"\"\n Retrieves the 'git_obj' instance corresponding to the given SHA if it exists. Otherwise, it\n tries to generate the 'git_obj' from existing data or by using the 'git show' command.\n\n Parameters:\n -----------\n sha : str\n The unique SHA hash for a Git object.\n\n Returns:\n --------\n git_obj\n The corresponding 'git_obj' instance.\n \"\"\"\n try:\n return cls.__all_obj__[sha]\n except KeyError:\n for k, v in cls.__all_obj__.items():\n if k.startswith(sha):\n return v\n return cls._from_show(sha)\n \n @classmethod\n def commit(cls, commit_time, commit_hash, tree_hash, parent_hashs, author_email, author_name):\n \"\"\"\n Instantiates and initializes a 'git_obj' instance that represents a detailed Git commit,\n including information about the commit's time, tree, parents, and author.\n\n Parameters:\n -----------\n commit_time : str\n The timestamp of the commit.\n commit_hash : str\n The unique SHA hash of the commit.\n tree_hash : str\n The SHA hash of the tree object this commit points to.\n parent_hashs : list\n A list of SHA hashes for the parents of the commit.\n author_email : str\n The email address of the author of the commit.\n author_name : str\n The name of the author of the commit.\n\n Returns:\n --------\n git_obj\n The newly initialized 'git_obj' instance representing a commit.\n \"\"\" \n res = cls(commit_hash)\n res._type = '<<' if len(parent_hashs) > 1 else '<'\n res._when = int(commit_time)\n res._author = (author_email, author_name)\n res._tree = git_sha(tree_hash)\n res._children = []\n res._parents = tuple(git_sha(p) for p in parent_hashs)\n return res\n \n def __repr__(self):\n \"\"\"\n Generates a human-readable representation of the 'git_obj' instance, primarily for debugging\n and logging purposes. It includes the SHA, type of commit, parents, and author information.\n\n Returns:\n --------\n str\n A string representation of the 'git_obj' instance.\n \"\"\" \n auth = self._author[0] if '@' in self._author[0] else repr(self._author[1])\n par = ''\n if len(self._parents) > 1:\n par = ','.join(repr(p) for p in self._parents)\n elif len(self._parents) == 1:\n par = repr(self._parents[0]) \n return f\"{self!s} {self._type} {par} {auth}\"" }, { "identifier": "git_log", "path": "src/git_ir.py", "snippet": "def git_log():\n def to_obj(line):\n parts = line.split('|', 5)\n parts[3] = parts[3].split() # Multiple parents\n return git_obj.commit(*parts)\n res = [\n to_obj(line)\n for line in git_run('log','--all','--reflog',r'--format=%ct|%H|%T|%P|%ae|%an').stdout.splitlines()\n ]\n git_obj.link_children()\n git_sha.calibrate_min()\n return res" }, { "identifier": "git_run", "path": "src/util/git_util.py", "snippet": "def git_run(*args):\n \"\"\"\n Execute a Git command with its arguments, print the command for reference,\n run it using a subprocess, capture its output, and return the result.\n\n This function allows you to interact with Git from within a Python script\n and access the results of Git commands programmatically.\n\n Args:\n *args: A variable number of arguments representing the Git command\n and its options and arguments.\n\n Returns:\n CompletedProcess: An object containing information about the executed\n command, including its return code, standard output, and standard error.\n \"\"\"\n print('# $> git', *args)\n res = sp_run(['git']+list(args), check=True, text=True, capture_output=True)\n return res" } ]
import pytest import tempfile import logging import subprocess import os from src.util.toy_repo import ToyRepoCreator from src.git_ir import all_objects, git_obj, git_log from src.util.git_util import git_run
3,850
@pytest.fixture(scope="function") def setup_logging(): logging.basicConfig( level=logging.DEBUG, # Set the desired log level format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) @pytest.fixture(scope="function") def temp_directory(): # Create a temporary directory for each test function temp_dir = tempfile.mkdtemp() yield temp_dir # Provide the temporary directory as a fixture # Clean up: remove the temporary directory and its contents subprocess.run(['rm', '-rf', temp_dir]) def test_new_object_creation(temp_directory): """ Test the __new__ method to ensure no duplicate objects are created for the same SHA. """ trc = ToyRepoCreator(temp_directory) even_intervals = [7 * i for i in range(12)] # Weekly intervals trc.create_custom_commits(even_intervals) res = git_run('log') def test_all_objects(temp_directory): """ Test the all_objects() method. """ trc = ToyRepoCreator(temp_directory) even_intervals = [7 * i for i in range(12)] # Weekly intervals trc.create_custom_commits(even_intervals)
@pytest.fixture(scope="function") def setup_logging(): logging.basicConfig( level=logging.DEBUG, # Set the desired log level format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) @pytest.fixture(scope="function") def temp_directory(): # Create a temporary directory for each test function temp_dir = tempfile.mkdtemp() yield temp_dir # Provide the temporary directory as a fixture # Clean up: remove the temporary directory and its contents subprocess.run(['rm', '-rf', temp_dir]) def test_new_object_creation(temp_directory): """ Test the __new__ method to ensure no duplicate objects are created for the same SHA. """ trc = ToyRepoCreator(temp_directory) even_intervals = [7 * i for i in range(12)] # Weekly intervals trc.create_custom_commits(even_intervals) res = git_run('log') def test_all_objects(temp_directory): """ Test the all_objects() method. """ trc = ToyRepoCreator(temp_directory) even_intervals = [7 * i for i in range(12)] # Weekly intervals trc.create_custom_commits(even_intervals)
result = all_objects()
1
2023-10-28 13:43:03+00:00
8k
sisl/SceneInformer
sceneinformer/model/model.py
[ { "identifier": "Encoder", "path": "sceneinformer/model/encoder.py", "snippet": "class Encoder(pl.LightningModule):\n def __init__(self, config: dict) -> None:\n super(Encoder, self).__init__()\n self.config = config\n\n self.hidden_dim = config['d_model']\n\n if 'point_enc' in config.keys():\n if config['point_enc'] == 'mlp':\n self.veh_encoder = MLPPointEncoder(config['vehicle_encoder'])\n self.ped_encoder = MLPPointEncoder(config['pedestrian_encoder'])\n self.bike_encoder = MLPPointEncoder(config['bike_encoder'])\n elif config['point_enc'] == 'pointnet':\n self.veh_encoder = PointEncoder(config['vehicle_encoder'])\n self.ped_encoder = PointEncoder(config['pedestrian_encoder'])\n self.bike_encoder = PointEncoder(config['bike_encoder'])\n else:\n self.veh_encoder = MLPPointEncoder(config['vehicle_encoder'])\n self.ped_encoder = MLPPointEncoder(config['pedestrian_encoder'])\n self.bike_encoder = MLPPointEncoder(config['bike_encoder'])\n\n self.poly_encoder = PointEncoder(config['map_encoder'])\n\n encoder_layer = nn.TransformerEncoderLayer( \\\n d_model=config['d_model'], \\\n nhead=config['nhead'], \\\n dim_feedforward=config['dim_feedforward'], \\\n batch_first=True)\n\n self.transformer_encoder = nn.TransformerEncoder( \\\n encoder_layer, \\\n num_layers=config['num_layers'])\n \n if config['compile_transformer']:\n self.transformer_encoder = torch.compile(self.transformer_encoder, mode=\"reduce-overhead\")\n\n def _get_the_mask(self, objects: torch.Tensor) -> torch.Tensor:\n B, N, T, D = objects.shape\n objects = objects[:,:,:,:2].reshape(B, N, T*2) # (x,y)\n nan_objects_ind = torch.argwhere(torch.isnan(objects).all(2))\n nan_objects_mask = torch.zeros((B,N)).to(self.device).bool()\n nan_objects_mask[nan_objects_ind[:,0], nan_objects_ind[:,1]] = True \n nan_objects_mask = nan_objects_mask.reshape(B, N) \n return nan_objects_mask\n\n\n def forward(self, sample: dict) -> torch.Tensor:\n objects = sample['observed_trajectories'] # (B, Na+Np+Nc, T, D)\n polylines = sample['polylines'] # (B, Nm, n, D)\n\n # Generate the masks to ignore NaN values\n objects_mask = self._get_the_mask(objects)\n polylines_mask = self._get_the_mask(polylines)\n src_key_padding_mask = torch.cat([objects_mask, polylines_mask], dim=1)\n\n # Reshape the objects tensor and extract the object types\n B, N, T, D = objects.shape\n objects = objects.reshape(B * N, T, D)\n objects_types = objects[:, 0, -1]\n objects = objects[:, :, :-1]\n\n # Generate masks for each object type\n veh_ind_mask = objects_types == 0\n ped_ind_mask = objects_types == 1\n bike_ind_mask = objects_types == 2\n\n objects = torch.nan_to_num(objects, nan=0) # -99?\n\n vehs = objects[veh_ind_mask]\n peds = objects[ped_ind_mask]\n bike = objects[bike_ind_mask]\n\n vehs = vehs.permute(0, 2, 1) if vehs.shape[0] > 0 else torch.empty(0, 11, T, device=self.device, dtype=vehs.dtype)\n peds = peds.permute(0, 2, 1) if peds.shape[0] > 0 else torch.empty(0, 11, T, device=self.device, dtype=peds.dtype)\n bike = bike.permute(0, 2, 1) if bike.shape[0] > 0 else torch.empty(0, 11, T, device=self.device, dtype=bike.dtype)\n\n # Encode the objects using the appropriate encoder for each object type\n vehs = self.veh_encoder(vehs) if vehs.shape[0] > 0 else torch.empty(0, self.hidden_dim, device=self.device)\n peds = self.ped_encoder(peds) if peds.shape[0] > 0 else torch.empty(0, self.hidden_dim, device=self.device)\n bike = self.bike_encoder(bike) if bike.shape[0] > 0 else torch.empty(0, self.hidden_dim, device=self.device)\n\n peds = peds.type(vehs.dtype)\n bike = bike.type(vehs.dtype)\n\n processed_objects = torch.zeros(B * N, self.hidden_dim, device=self.device, dtype=vehs.dtype)\n\n processed_objects[veh_ind_mask] = vehs\n processed_objects[ped_ind_mask] = peds\n processed_objects[bike_ind_mask] = bike\n processed_objects = processed_objects.reshape(B, N, -1) # (B, Na+Np+Nc, D)\n\n polylines = torch.nan_to_num(polylines, nan=0)\n B, Nm, Np, D = polylines.shape \n polylines = polylines.reshape(B*Nm, Np, D)\n polylines = polylines.permute(0, 2, 1)\n processed_polylines = self.poly_encoder(polylines) #(B, Nm, D)\n processed_polylines = processed_polylines.reshape(B, Nm, -1) #(B, Nm, D)\n\n obs_tokens = torch.cat([processed_objects, processed_polylines], dim=1)\n encoded_obs = self.transformer_encoder(obs_tokens, src_key_padding_mask=src_key_padding_mask) #CHECK\n\n assert not torch.isnan(encoded_obs).any(), 'NaNs in the encoded observations!'\n\n return {\n 'encoded_obs': encoded_obs,\n 'src_key_padding_mask': src_key_padding_mask\n }" }, { "identifier": "Decoder", "path": "sceneinformer/model/decoder.py", "snippet": "class Decoder(pl.LightningModule):\n def __init__(self, config: dict) -> None:\n super(Decoder, self).__init__()\n self.config = config\n\n self.anchor_encoding = MLP(config['anchor_encoding'])\n\n decoder_layer = nn.TransformerDecoderLayer( \\\n d_model=config['d_model'], \\\n nhead=config['nhead'], \\\n dim_feedforward=config['dim_feedforward'], \\\n batch_first=True)\n\n self.transformer_decoder = nn.TransformerDecoder( \\\n decoder_layer, \\\n num_layers=config['num_layers'])\n\n if config['compile_transformer']:\n self.transformer_decoder = torch.compile(self.transformer_decoder, mode=\"reduce-overhead\")\n\n self.token_decoder = MLP(config['token_decoder'])\n self.classifier_traj = MLP(config['classifier_traj'])\n self.classifier_occ = MLP(config['classifier_occ'])\n self.predictor = MLP(config['predictor'])\n\n def forward(self, anchors: torch.Tensor, memory_tokens: torch.Tensor, memory_mask: torch.Tensor) -> torch.Tensor:\n B, N, D = anchors.shape\n\n invalid_anchors = torch.argwhere(torch.isnan(anchors))\n\n invalid_anchors_mask = torch.ones(anchors.shape[:2]).to(anchors.device)\n invalid_anchors_mask[invalid_anchors[:,0], invalid_anchors[:,1]] = 0\n\n bool_tgt_anchor_mask = torch.zeros(anchors.shape[:2]).to(anchors.device).bool()\n bool_tgt_anchor_mask[invalid_anchors[:,0], invalid_anchors[:,1]] = True\n\n anchors = torch.nan_to_num(anchors, nan=0)\n\n # Encode anchors with MLP.\n anchors = anchors.reshape(B * N, D)\n anchor_tokens = self.anchor_encoding(anchors)\n anchor_tokens = anchor_tokens.reshape(B, N, -1)\n\n decoded_obs = self.transformer_decoder(anchor_tokens, \n memory_tokens, \n tgt_key_padding_mask=bool_tgt_anchor_mask, \n memory_key_padding_mask=memory_mask) \n decoded_obs = decoded_obs.reshape(B * N, -1)\n\n decoded_obs = self.token_decoder(decoded_obs)\n logits_traj = self.classifier_traj(decoded_obs) \n logits_occ = self.classifier_occ(decoded_obs)\n predictions = self.predictor(decoded_obs)\n\n predictions = predictions.reshape(B, N, -1)\n predictions = predictions.reshape(B, N, self.config['num_modes'], -1, self.step_dim)\n anchors = anchors.reshape(B, N, 1, 1, 2)\n\n predictions[:,:,:,:,:2] = predictions[:,:,:,:,:2] + anchors \n\n logits_traj = logits_traj.reshape(B, N, -1)\n logits_occ = logits_occ.reshape(B, N, -1)\n\n logits_traj = logits_traj * invalid_anchors_mask.reshape(B,N,1)\n logits_occ = logits_occ * invalid_anchors_mask.reshape(B,N,1)\n predictions = predictions * invalid_anchors_mask.reshape(B, N, 1, 1, 1)\n\n return {\n 'logits_traj': logits_traj,\n 'logits_occ': logits_occ,\n 'predictions': predictions,\n }" }, { "identifier": "compute_loss", "path": "sceneinformer/model/loss.py", "snippet": "def compute_loss(output: Dict, sample: Dict, configs: Dict, metrics_data_return: bool = False) -> Tuple[torch.Tensor, Dict]:\n predictions = output['predictions']\n logits_traj = output['logits_traj']\n logits_occ = output['logits_occ']\n\n anchors = sample['anchors'].float()\n labels = sample['labels'].float()\n\n B, N, K, T, D = predictions.shape\n device = predictions.device\n \n ############# 1. Get anchors #############\n gt_anchors = labels[:,:,0,-1] \n anchor_indices = torch.argwhere(~torch.isnan(gt_anchors)) \n label_indices = anchor_indices.clone() \n anchor_indices[:,1] = gt_anchors[anchor_indices[:,0], anchor_indices[:,1]] \n\n ############# 2. Get masks #############\n # If the anchor is invalid (nan, not in a map, etc), it is set to 0.0.\n invalid_anchors_mask = torch.ones(B, N).to(device) \n invalid_anchors = torch.argwhere(torch.isnan(anchors[:,:,0])) \n invalid_anchors_mask[invalid_anchors[:,0], invalid_anchors[:,1]] = 0.0\n\n # If anchor is used (appears in labels), it is set to 1.0. There are valid anchors that are not used in occlusions. \n used_anchors_mask = torch.zeros(B, N).to(device)\n used_anchors_mask[anchor_indices[:,0], anchor_indices[:,1]] = 1.0 \n\n ############# 3. We populate gt_trajectory with all labels in the correct positins. Some of them could be nans #############\n gt_trajectory = torch.zeros((B,N,1,T,2)).to(device) \n gt_trajectory[anchor_indices[:,0], anchor_indices[:,1], :, :, :] = (labels[label_indices[:,0], label_indices[:,1],:,0:2]).unsqueeze(1)\n \n gt_trajectory[invalid_anchors[:,0], invalid_anchors[:,1]] = 0 \n true_valid_labels = ~torch.isnan(gt_trajectory)\n gt_trajectory = torch.nan_to_num(gt_trajectory, nan=0) \n \n gt_valid_anchor = used_anchors_mask * invalid_anchors_mask \n gt_valid_mask = true_valid_labels * used_anchors_mask[:,:,None, None, None] * invalid_anchors_mask[:,:,None, None, None] \n sample['gt_valid_mask'] = gt_valid_mask\n pred_trajs = predictions.reshape(B*N,K,T,D) \n gt_trajs = gt_trajectory.reshape(B*N,1,T,2) \n gt_valid_mask = gt_valid_mask.reshape(B*N,1,T,2)\n gt_valid_anchor = gt_valid_anchor.reshape(B*N, 1) \n pred_scores = torch.softmax(logits_traj, dim=-1).reshape(B*N,K)\n regression_loss, regression_indices = nll_loss_gmm_direct(pred_scores, pred_trajs, gt_trajs, gt_valid_mask, gt_valid_anchor)\n regression_indices = regression_indices.reshape(B,N)\n regression_loss = regression_loss.reshape(B,N)\n regression_loss = regression_loss.mean(1)\n ############################################################################################\n \n\n ############# 5. Evaluate classification loss ############# \n targets_traj = torch.zeros((B,N)).long().to(device)\n targets_occ = torch.zeros((B,N)).long().to(device)\n\n targets_traj[anchor_indices[:,0], anchor_indices[:,1]] = regression_indices[anchor_indices[:,0], anchor_indices[:,1]] \n targets_occ[anchor_indices[:,0], anchor_indices[:,1]] = 1 \n targets_occ = targets_occ.reshape(B*N)\n targets_traj = targets_traj.reshape(B*N)\n logits_traj = logits_traj.reshape(B*N, -1)\n logits_occ = logits_occ.reshape(B*N, -1)\n\n occ_weights = torch.ones(logits_occ.shape[1]).to(device)\n occ_weights[1] = configs['entropy_weight']\n\n occ_entropy_loss_fcn = torch.nn.CrossEntropyLoss(weight=occ_weights, reduction='none')\n occ_entropy_loss = occ_entropy_loss_fcn(logits_occ, targets_occ).to(device) #(B,N)\n\n traj_entropy_loss_fcn = torch.nn.CrossEntropyLoss(reduction='none')\n traj_entropy_loss = traj_entropy_loss_fcn(logits_traj, targets_traj).to(device) #(B,N)\n\n occ_entropy_loss = occ_entropy_loss.reshape(B,N)\n traj_entropy_loss = traj_entropy_loss.reshape(B,N)\n\n entropy_mask = torch.ones_like(occ_entropy_loss).to(device)\n\n entropy_mask[invalid_anchors[:,0], invalid_anchors[:,1]] = 0.0\n occ_entropy_loss *= entropy_mask\n traj_entropy_loss *= entropy_mask\n\n occ_entropy_loss = occ_entropy_loss.mean((1)) #(B,)\n traj_entropy_loss = traj_entropy_loss.mean((1)) #(B,)\n\n total_loss = (configs['reg_const'] * regression_loss + \\\n configs['occ_class_const'] * occ_entropy_loss + \\\n configs['traj_class_const'] * traj_entropy_loss).mean(0) \n\n metrics_dict = {\n 'total_loss': total_loss,\n 'regression_loss': regression_loss.mean(),\n 'occ_entropy_loss': occ_entropy_loss.mean(),\n 'traj_entropy_loss': traj_entropy_loss.mean(),\n }\n\n if metrics_data_return:\n metrics_data = {\n 'predictions': pred_trajs,\n 'gt_trajectory': gt_trajs,\n 'gt_valid_mask': gt_valid_mask,\n }\n\n return total_loss, metrics_dict, metrics_data\n else:\n return total_loss, metrics_dict" } ]
import torch import lightning.pytorch as pl from sceneinformer.model.encoder import Encoder from sceneinformer.model.decoder import Decoder from sceneinformer.model.loss import compute_loss
3,733
class SceneInformer(pl.LightningModule): def __init__(self, config): super(SceneInformer, self).__init__() config.decoder.num_modes = config.k_modes config.decoder.predictor.out_dim = config.k_modes * (config.n_future_steps) * config.step_dim config.decoder.classifier_traj.out_dim = config.k_modes self.config = config self.learning_rate = config.learning_rate self.loss_config = config.loss self.encoder = Encoder(config.encoder) self.decoder = Decoder(config.decoder) self.decoder.step_dim = config.step_dim self.batch = None def forward(self, sample): encoder_dict = self.encoder(sample) decoder_dict = self.decoder(sample['anchors'], encoder_dict['encoded_obs'], encoder_dict['src_key_padding_mask']) return decoder_dict def training_step(self, batch, batch_idx): prediction_dict = self(batch)
class SceneInformer(pl.LightningModule): def __init__(self, config): super(SceneInformer, self).__init__() config.decoder.num_modes = config.k_modes config.decoder.predictor.out_dim = config.k_modes * (config.n_future_steps) * config.step_dim config.decoder.classifier_traj.out_dim = config.k_modes self.config = config self.learning_rate = config.learning_rate self.loss_config = config.loss self.encoder = Encoder(config.encoder) self.decoder = Decoder(config.decoder) self.decoder.step_dim = config.step_dim self.batch = None def forward(self, sample): encoder_dict = self.encoder(sample) decoder_dict = self.decoder(sample['anchors'], encoder_dict['encoded_obs'], encoder_dict['src_key_padding_mask']) return decoder_dict def training_step(self, batch, batch_idx): prediction_dict = self(batch)
loss, metrics = compute_loss(prediction_dict, batch, self.loss_config)
2
2023-10-31 08:08:26+00:00
8k
artificial-scientist-lab/XLuminA
xlumina/vectorized_optics.py
[ { "identifier": "profile", "path": "xlumina/toolbox.py", "snippet": "def profile(data_2d, x, y, point1='', point2=''):\n \"\"\"\n Determine profile for a given input without using interpolation.\n \n Parameters:\n data_2d (jnp.array): Input 2D array from which extract the profile.\n point1 (float, float): Initial point.\n point2 (float, float): Final point.\n \n Returns the profile (h and z) of the input (jnp.array).\n \"\"\"\n x1, y1 = point1\n x2, y2 = point2\n\n ix1, value, distance = nearest(x, x1)\n ix2, value, distance = nearest(x, x2)\n iy1, value, distance = nearest(y, y1)\n iy2, value, distance = nearest(y, y2)\n\n # Create a set of x and y points along the line between point1 and point2\n x_points = jnp.linspace(ix1, ix2, int(jnp.hypot(ix2-ix1, iy2-iy1)))\n y_points = jnp.linspace(iy1, iy2, int(jnp.hypot(ix2-ix1, iy2-iy1)))\n\n h = jnp.linspace(0, jnp.sqrt((y2 - y1)**2 + (x2 - x1)**2), len(x_points))\n h = h - h[-1] / 2\n\n z_profile = extract_profile(data_2d, x_points, y_points)\n\n return h, z_profile" }, { "identifier": "build_grid", "path": "xlumina/wave_optics.py", "snippet": "def build_grid(x, y):\n \"\"\"[From RS_propagation]: Returns the grid where the transfer function is defined.\"\"\"\n nx = len(x)\n ny = len(y)\n dx = x[1] - x[0]\n dy = y[1] - y[0]\n # Build 2N-1 x 2N-1 (X, Y) space:\n x_padded = jnp.pad((x[0] - x[::-1]), (0, jnp.size(x) - 1), 'reflect')\n y_padded = jnp.pad((y[0] - y[::-1]), (0, jnp.size(y) - 1), 'reflect')\n # Convert the right half into positive values:\n I = jnp.ones((1, int(len(x_padded) / 2) + 1))\n II = -jnp.ones((1, int(len(x_padded) / 2)))\n III = jnp.ravel(jnp.concatenate((I, II), 1))\n Xext, Yext = jnp.meshgrid(x_padded * III, y_padded * III)\n return nx, ny, dx, dy, Xext, Yext" }, { "identifier": "RS_propagation_jit", "path": "xlumina/wave_optics.py", "snippet": "@partial(jit, static_argnums=(2, 3, 4, 5, 8))\ndef RS_propagation_jit(input_field, z, nx, ny, dx, dy, Xext, Yext, k):\n \"\"\"[From RS_propagation]: JIT function for Equation (10) in [Ref 1].\"\"\"\n # input_field is jnp.array of (N, N)\n H = transfer_function_RS(z, Xext, Yext, k)\n U = jnp.zeros((2 * ny - 1, 2 * nx - 1), dtype=complex)\n U = U.at[0:ny, 0:nx].set(input_field)\n output_field = (jnp.fft.ifft2(jnp.fft.fft2(U) * jnp.fft.fft2(H)) * dx * dy)[ny - 1:, nx - 1:]\n return output_field" }, { "identifier": "build_CZT_grid", "path": "xlumina/wave_optics.py", "snippet": "def build_CZT_grid(z, wavelength, xin, yin, xout, yout):\n \"\"\"\n [From CZT]: Defines the resolution / sampling of initial and output planes.\n \n Parameters:\n xin (jnp.array): Array with the x-positions of the input plane.\n yin (jnp.array): Array with the y-positions of the input plane.\n xout (jnp.array): Array with the x-positions of the output plane.\n yout (jnp.array): Array with the y-positions of the output plane.\n \n Returns the set of parameters: nx, ny, Xout, Yout, dx, dy, delta_out, Dm, fy_1, fy_2, fx_1 and fx_2.\n \"\"\"\n # Resolution of the output plane:\n nx = len(xout)\n ny = len(yout)\n Xout, Yout = jnp.meshgrid(xout, yout)\n \n # Sampling of initial plane:\n dx = xin[1] - xin[0]\n dy = yin[1] - yin[0]\n \n # For Bluestein method implementation: \n # Dimension of the output field - Eq. (11) in [Ref].\n Dm = wavelength * z / dx\n \n # (1) for FFT in Y-dimension:\n fy_1 = yout[0] + Dm / 2\n fy_2 = yout[-1] + Dm / 2\n # (1) for FFT in X-dimension:\n fx_1 = xout[0] + Dm / 2\n fx_2 = xout[-1] + Dm / 2\n \n return nx, ny, dx, dy, Xout, Yout, Dm, fy_1, fy_2, fx_1, fx_2" }, { "identifier": "CZT_jit", "path": "xlumina/wave_optics.py", "snippet": "def CZT_jit(field, z, wavelength, k, nx, ny, dx, dy, Xout, Yout, X, Y, Dm, fy_1, fy_2, fx_1, fx_2):\n \"\"\"\n [From CZT]: Diffraction integral implementation using Bluestein method.\n [Ref] Hu, Y., et al. Light Sci Appl 9, 119 (2020).\n \"\"\" \n # Compute the scalar diffraction integral using RS transfer function:\n # See Eq.(3) in [Ref].\n F0 = transfer_function_RS(z, Xout, Yout, k)\n F = transfer_function_RS(z, X, Y, k)\n \n # Compute (E0 x F) in Eq.(6) in [Ref].\n field = field * F\n \n # Bluestein method implementation:\n \n # (1) FFT in Y-dimension:\n U = Bluestein_method(field, fy_1, fy_2, Dm, ny)\n\n # (2) FFT in X-dimension using output from (1):\n U = Bluestein_method(U, fx_1, fx_2, Dm, nx)\n \n # Compute Eq.(6) in [Ref].\n field_at_z = F0 * U * z * dx * dy * wavelength\n \n return field_at_z" }, { "identifier": "CZT_for_high_NA_jit", "path": "xlumina/wave_optics.py", "snippet": "def CZT_for_high_NA_jit(field, nx, ny, Dm, fy_1, fy_2, fx_1, fx_2):\n \"\"\"\n [From VCZT_objective_lens - in optical_elements.py]: Function for Debye integral implementation using Bluestein method.\n [Ref] Hu, Y., et al. Light Sci Appl 9, 119 (2020).\n \"\"\"\n # Bluestein method implementation:\n # (1) FFT in Y-dimension:\n U = Bluestein_method(field, fy_1, fy_2, Dm, ny)\n\n # (2) FFT in X-dimension using output from (1):\n U = Bluestein_method(U, fx_1, fx_2, Dm, nx)\n \n return U" } ]
import numpy as np import jax.numpy as jnp import matplotlib.pyplot as plt import time from jax import jit, vmap, config from functools import partial from .toolbox import profile from .wave_optics import build_grid, RS_propagation_jit, build_CZT_grid, CZT_jit, CZT_for_high_NA_jit
5,598
quality_factor = dr_ideal / dr_real # Stack the input field in a (3, N, N) shape and pass to jit. E_in = jnp.stack([self.Ex, self.Ey, Ez], axis=0) E_out = VRS_propagation_jit(E_in, z, nx, ny, dx, dy, Xext, Yext, self.k) E_out = jnp.moveaxis(E_out, [0, 1, 2], [2, 0, 1]) # Define the output light: light_out = VectorizedLight(self.x, self.y, self.wavelength) light_out.Ex = E_out[:, :, 0] light_out.Ey = E_out[:, :, 1] light_out.Ez = E_out[:, :, 2] print("Time taken to perform one VRS propagation (in seconds):", time.perf_counter() - tic) return light_out, quality_factor def get_VRS_minimum_z(self, n=1, quality_factor=1): """ Given a quality factor, determines the minimum available (trustworthy) distance for VRS_propagation(). [Ref 1: Laser Phys. Lett., 10(6), 065004 (2013)]. Parameters: n (float): refraction index of the surrounding medium. quality_factor (int): Defaults to 1. Returns the minimum distance z (in microns) necessary to achieve qualities larger than quality_factor. >> Diffractio-adapted function (https://pypi.org/project/diffractio/) << """ # Check sampling range_x = self.x[-1] - self.x[0] range_y = self.y[-1] - self.y[0] num_x = jnp.size(self.x) num_y = jnp.size(self.y) dx = range_x / num_x dy = range_y / num_y # Delta rho dr_real = jnp.sqrt(dx**2 + dy**2) # Rho rmax = jnp.sqrt(range_x**2 + range_y**2) factor = (((quality_factor * dr_real + rmax)**2 - (self.wavelength / n)**2 - rmax**2) / (2 * self.wavelength / n))**2 - rmax**2 if factor > 0: z_min = jnp.sqrt(factor) else: z_min = 0 return print("Minimum distance to propagate (in um):", z_min) def VCZT(self, z, xout, yout): """ Vectorial version of the Chirped z-transform propagation - efficient RS diffraction using the Bluestein method. Useful for imaging light in the focal plane: allows high resolution zoom in z-plane. [Ref] Hu, Y., et al. Light Sci Appl 9, 119 (2020). Parameters: z (float): Propagation distance. xout (jnp.array): Array with the x-positions for the output plane. Returns VectorizedLight object after propagation. """ tic = time.perf_counter() if xout is None: xout = self.x if yout is None: yout = self.y # Define r: r = jnp.sqrt(self.X ** 2 + self.Y ** 2 + z ** 2) # Set the value of Ez: Ez = jnp.array((self.Ex * self.X / r + self.Ey * self.Y / r) * z / r) # Define main set of parameters nx, ny, dx, dy, Xout, Yout, Dm, fy_1, fy_2, fx_1, fx_2 = build_CZT_grid(z, self.wavelength, self.x, self.y, xout, yout) # Stack the input field in a (3, N, N) shape and pass to jit. E_in = jnp.stack([self.Ex, self.Ey, Ez], axis=0) E_out = VCZT_jit(E_in, z, self.wavelength, self.k, nx, ny, dx, dy, Xout, Yout, self.X, self.Y, Dm, fy_1, fy_2, fx_1, fx_2) E_out = jnp.moveaxis(E_out, [0, 1, 2], [2, 0, 1]) # Define the output light: light_out = VectorizedLight(xout, yout, self.wavelength) light_out.Ex = E_out[:, :, 0] light_out.Ey = E_out[:, :, 1] light_out.Ez = E_out[:, :, 2] print("Time taken to perform one VCZT propagation (in seconds):", time.perf_counter() - tic) return light_out @partial(jit, static_argnums=(2, 3, 4, 5, 8)) def VRS_propagation_jit(input_field, z, nx, ny, dx, dy, Xext, Yext, k): """[From VRS_propagation]: JIT function that vectorizes the propagation and calls RS_propagation_jit from wave_optics.py.""" # Input field has (3, N, N) shape vectorized_RS_propagation = vmap(RS_propagation_jit, in_axes=(0, None, None, None, None, None, None, None, None)) # Call the vectorized function E_out = vectorized_RS_propagation(input_field, z, nx, ny, dx, dy, Xext, Yext, k) return E_out # (3, N, N) -> ([Ex, Ey, Ez], N, N) def VCZT_jit(field, z, wavelength, k, nx, ny, dx, dy, Xout, Yout, X, Y, Dm, fy_1, fy_2, fx_1, fx_2): """[From CZT]: JIT function that vectorizes the propagation and calls CZT_jit from wave_optics.py.""" # Input field has (3, N, N) shape vectorized_CZT = vmap(CZT_jit, in_axes=(0, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)) # Call the vectorized function E_out = vectorized_CZT(field, z, wavelength, k, nx, ny, dx, dy, Xout, Yout, X, Y, Dm, fy_1, fy_2, fx_1, fx_2) return E_out # (3, N, N) -> ([Ex, Ey, Ez], N, N) def vectorized_CZT_for_high_NA(field, nx, ny, Dm, fy_1, fy_2, fx_1, fx_2): """[From VCZT_objective_lens - in optical_elements.py]: JIT function that vectorizes the propagation and calls CZT_for_high_NA_jit from wave_optics.py.""" # Input field has (3, N, N) shape
# Comment this line if float32 is enough precision for you. config.update("jax_enable_x64", True) """ Module for vectorized optical fields: - VectorizedLight: - draw - draw_intensity_profile - VRS_propagation - get_VRS_minimum_z - VCZT - VRS_propagation_jit - VCZT_jit - vectorized_CZT_for_high_NA - PolarizedLightSource: - gaussian_beam - plane_wave """ class VectorizedLight: """ Class for Vectorial EM fields - (Ex, Ey, Ez) """ def __init__(self, x=None, y=None, wavelength=None): self.x = x self.y = y self.X, self.Y = jnp.meshgrid(self.x, self.y) self.wavelength = wavelength self.k = 2 * jnp.pi / wavelength self.n = 1 shape = (jnp.shape(x)[0], jnp.shape(y)[0]) self.Ex = jnp.zeros(shape, dtype=jnp.complex128) self.Ey = jnp.zeros(shape, dtype=jnp.complex128) self.Ez = jnp.zeros(shape, dtype=jnp.complex128) self.info = 'Vectorized light' def draw(self, xlim='', ylim='', kind='', extra_title='', save_file=False, filename=''): """ Plots VectorizedLight. Parameters: xlim (float, float): x-axis limit for plot purpose. ylim (float, float): y-axis limit for plot purpose. kind (str): Feature to plot: 'Intensity', 'Phase' or 'Field'. extra_title (str): Adds extra info to the plot title. save_file (bool): If True, saves the figure. filename (str): Name of the figure. """ extent = [xlim[0], xlim[1], ylim[0], ylim[1]] if kind == 'Intensity': # Compute intensity Ix = jnp.abs(self.Ex) ** 2 # Ex Iy = jnp.abs(self.Ey) ** 2 # Ey Iz = jnp.abs(self.Ez) ** 2 # Ez Ir = Ix + Iy # Er fig, axes = plt.subplots(2, 3, figsize=(14, 7)) cmap = 'gist_heat' ax = axes[0,0] im = ax.imshow(Ix, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Intensity x. {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=jnp.min(Ix), vmax=jnp.max(Ix)) ax = axes[0,1] im = ax.imshow(Iy, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Intensity y. {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=jnp.min(Iy), vmax=jnp.max(Iy)) ax = axes[1,0] im = ax.imshow(Iz, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Intensity z. {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=jnp.min(Iz), vmax=jnp.max(Iz)) ax = axes[0,2] im = ax.imshow(Ir, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Intensity r. {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=jnp.min(Ir), vmax=jnp.max(Ir)) axes[1,1].axis('off') axes[1,2].axis('off') plt.subplots_adjust(wspace=0.6, hspace=0.6) elif kind == 'Phase': # Compute phase phi_x = jnp.angle(self.Ex) # Ex phi_y = jnp.angle(self.Ey) # Ey phi_z = jnp.angle(self.Ez) # Ez fig, axes = plt.subplots(1, 3, figsize=(14, 3)) cmap = 'twilight' ax = axes[0] im = ax.imshow(phi_x, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Phase x (in radians). {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=-jnp.pi, vmax=jnp.pi) ax = axes[1] im = ax.imshow(phi_y, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Phase y (in radians). {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=-jnp.pi, vmax=jnp.pi) ax = axes[2] im = ax.imshow(phi_z, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Phase z (in radians). {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=-jnp.pi, vmax=jnp.pi) elif kind == 'Field': # Compute field amplitudes Ax = jnp.abs(self.Ex) # Ex Ay = jnp.abs(self.Ey) # Ey Az = jnp.abs(self.Ez) # Ez fig, axes = plt.subplots(1, 3, figsize=(14, 3)) cmap = 'viridis' ax = axes[0] im = ax.imshow(Ax, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Amplitude x. {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=jnp.min(Ax), vmax=jnp.max(Ax)) ax = axes[1] im = ax.imshow(Ay, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Amplitude y. {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=jnp.min(Ay), vmax=jnp.max(Ay)) ax = axes[2] im = ax.imshow(Az, cmap=cmap, extent=extent, origin='lower') ax.set_title(f"Amplitude z. {extra_title}") ax.set_xlabel('$x (\mu m)$') ax.set_ylabel('$y (\mu m)$') fig.colorbar(im, ax=ax) im.set_clim(vmin=jnp.min(Az), vmax=jnp.max(Az)) else: raise ValueError(f"Invalid kind option: {kind}. Please choose 'Intensity', 'Phase' or 'Field'.") plt.tight_layout() if save_file is True: plt.savefig(filename) print(f"Plot saved as {filename}") plt.show() def draw_intensity_profile(self, p1='', p2=''): """ Draws the intensity profile of VectorizedLight. Parameters: p1 (float, float): Initial point. p2 (float, float): Final point. """ h, z_profile_x = profile(jnp.abs(self.Ex)**2, self.x, self.y, point1=p1, point2=p2) _, z_profile_y = profile(jnp.abs(self.Ey)**2, self.x, self.y, point1=p1, point2=p2) _, z_profile_z = profile(jnp.abs(self.Ez)**2, self.x, self.y, point1=p1, point2=p2) _, z_profile_r = profile(jnp.abs(self.Ex)**2 + jnp.abs(self.Ey)**2, self.x, self.y, point1=p1, point2=p2) _, z_profile_total = profile(jnp.abs(self.Ex)**2 + jnp.abs(self.Ey)**2 + jnp.abs(self.Ez)**2, self.x, self.y, point1=p1, point2=p2) fig, axes = plt.subplots(3, 2, figsize=(14, 14)) ax = axes[0, 0] im = ax.plot(h, z_profile_x, 'k', lw=2) ax.set_title(f"Ix profile") ax.set_xlabel('$\mu m$') ax.set_ylabel('$Ix$') ax.set(xlim=(h.min(), h.max()), ylim=(z_profile_x.min(), z_profile_x.max())) ax = axes[0, 1] im = ax.plot(h, z_profile_y, 'k', lw=2) ax.set_title(f"Iy profile") ax.set_xlabel('$\mu m$') ax.set_ylabel('$Iy$') ax.set(xlim=(h.min(), h.max()), ylim=(z_profile_y.min(), z_profile_y.max())) ax = axes[1, 0] im = ax.plot(h, z_profile_z, 'k', lw=2) ax.set_title(f"Iz profile") ax.set_xlabel('$\mu m$') ax.set_ylabel('$Iz$') ax.set(xlim=(h.min(), h.max()), ylim=(z_profile_z.min(), z_profile_z.max())) ax = axes[1, 1] im = ax.plot(h, z_profile_r, 'k', lw=2) ax.set_title(f"Ir profile") ax.set_xlabel('$\mu m$') ax.set_ylabel('$Ir$') ax.set(xlim=(h.min(), h.max()), ylim=(z_profile_r.min(), z_profile_r.max())) ax = axes[2, 0] im = ax.plot(h, z_profile_total, 'k', lw=2) ax.set_title(f"Itotal profile") ax.set_xlabel('$\mu m$') ax.set_ylabel('$Itotal$') ax.set(xlim=(h.min(), h.max()), ylim=(z_profile_total.min(), z_profile_total.max())) axes[2, 1].axis('off') plt.subplots_adjust(wspace=0.3, hspace=0.4) plt.show() def VRS_propagation(self, z): """ Rayleigh-Sommerfeld diffraction integral in both, z>0 and z<0, for VectorizedLight. [Ref 1: Laser Phys. Lett., 10(6), 065004 (2013)]. [Ref 2: Optics and laser tech., 39(4), 10.1016/j.optlastec.2006.03.006]. [Ref 3: J. Li, Z. Fan, Y. Fu, Proc. SPIE 4915, (2002)]. Parameters: z (float): Distance to propagate. Returns VectorizedLight object after propagation and the quality factor of the algorithm. """ tic = time.perf_counter() # Define r [From Ref 1, eq. 1a-1c]: r = jnp.sqrt(self.X ** 2 + self.Y ** 2 + z ** 2) # Set the value of Ez: Ez = jnp.array(self.Ex * self.X / r + self.Ey * self.Y / r) nx, ny, dx, dy, Xext, Yext = build_grid(self.x, self.y) # Quality factor for accurate simulation [Eq. 22 in Ref1]: dr_real = jnp.sqrt(dx**2 + dy**2) # Rho rmax = jnp.sqrt(jnp.max(self.x**2) + jnp.max(self.y**2)) # Delta rho ideal dr_ideal = jnp.sqrt((self.wavelength)**2 + rmax**2 + 2 * (self.wavelength) * jnp.sqrt(rmax**2 + z**2)) - rmax quality_factor = dr_ideal / dr_real # Stack the input field in a (3, N, N) shape and pass to jit. E_in = jnp.stack([self.Ex, self.Ey, Ez], axis=0) E_out = VRS_propagation_jit(E_in, z, nx, ny, dx, dy, Xext, Yext, self.k) E_out = jnp.moveaxis(E_out, [0, 1, 2], [2, 0, 1]) # Define the output light: light_out = VectorizedLight(self.x, self.y, self.wavelength) light_out.Ex = E_out[:, :, 0] light_out.Ey = E_out[:, :, 1] light_out.Ez = E_out[:, :, 2] print("Time taken to perform one VRS propagation (in seconds):", time.perf_counter() - tic) return light_out, quality_factor def get_VRS_minimum_z(self, n=1, quality_factor=1): """ Given a quality factor, determines the minimum available (trustworthy) distance for VRS_propagation(). [Ref 1: Laser Phys. Lett., 10(6), 065004 (2013)]. Parameters: n (float): refraction index of the surrounding medium. quality_factor (int): Defaults to 1. Returns the minimum distance z (in microns) necessary to achieve qualities larger than quality_factor. >> Diffractio-adapted function (https://pypi.org/project/diffractio/) << """ # Check sampling range_x = self.x[-1] - self.x[0] range_y = self.y[-1] - self.y[0] num_x = jnp.size(self.x) num_y = jnp.size(self.y) dx = range_x / num_x dy = range_y / num_y # Delta rho dr_real = jnp.sqrt(dx**2 + dy**2) # Rho rmax = jnp.sqrt(range_x**2 + range_y**2) factor = (((quality_factor * dr_real + rmax)**2 - (self.wavelength / n)**2 - rmax**2) / (2 * self.wavelength / n))**2 - rmax**2 if factor > 0: z_min = jnp.sqrt(factor) else: z_min = 0 return print("Minimum distance to propagate (in um):", z_min) def VCZT(self, z, xout, yout): """ Vectorial version of the Chirped z-transform propagation - efficient RS diffraction using the Bluestein method. Useful for imaging light in the focal plane: allows high resolution zoom in z-plane. [Ref] Hu, Y., et al. Light Sci Appl 9, 119 (2020). Parameters: z (float): Propagation distance. xout (jnp.array): Array with the x-positions for the output plane. Returns VectorizedLight object after propagation. """ tic = time.perf_counter() if xout is None: xout = self.x if yout is None: yout = self.y # Define r: r = jnp.sqrt(self.X ** 2 + self.Y ** 2 + z ** 2) # Set the value of Ez: Ez = jnp.array((self.Ex * self.X / r + self.Ey * self.Y / r) * z / r) # Define main set of parameters nx, ny, dx, dy, Xout, Yout, Dm, fy_1, fy_2, fx_1, fx_2 = build_CZT_grid(z, self.wavelength, self.x, self.y, xout, yout) # Stack the input field in a (3, N, N) shape and pass to jit. E_in = jnp.stack([self.Ex, self.Ey, Ez], axis=0) E_out = VCZT_jit(E_in, z, self.wavelength, self.k, nx, ny, dx, dy, Xout, Yout, self.X, self.Y, Dm, fy_1, fy_2, fx_1, fx_2) E_out = jnp.moveaxis(E_out, [0, 1, 2], [2, 0, 1]) # Define the output light: light_out = VectorizedLight(xout, yout, self.wavelength) light_out.Ex = E_out[:, :, 0] light_out.Ey = E_out[:, :, 1] light_out.Ez = E_out[:, :, 2] print("Time taken to perform one VCZT propagation (in seconds):", time.perf_counter() - tic) return light_out @partial(jit, static_argnums=(2, 3, 4, 5, 8)) def VRS_propagation_jit(input_field, z, nx, ny, dx, dy, Xext, Yext, k): """[From VRS_propagation]: JIT function that vectorizes the propagation and calls RS_propagation_jit from wave_optics.py.""" # Input field has (3, N, N) shape vectorized_RS_propagation = vmap(RS_propagation_jit, in_axes=(0, None, None, None, None, None, None, None, None)) # Call the vectorized function E_out = vectorized_RS_propagation(input_field, z, nx, ny, dx, dy, Xext, Yext, k) return E_out # (3, N, N) -> ([Ex, Ey, Ez], N, N) def VCZT_jit(field, z, wavelength, k, nx, ny, dx, dy, Xout, Yout, X, Y, Dm, fy_1, fy_2, fx_1, fx_2): """[From CZT]: JIT function that vectorizes the propagation and calls CZT_jit from wave_optics.py.""" # Input field has (3, N, N) shape vectorized_CZT = vmap(CZT_jit, in_axes=(0, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)) # Call the vectorized function E_out = vectorized_CZT(field, z, wavelength, k, nx, ny, dx, dy, Xout, Yout, X, Y, Dm, fy_1, fy_2, fx_1, fx_2) return E_out # (3, N, N) -> ([Ex, Ey, Ez], N, N) def vectorized_CZT_for_high_NA(field, nx, ny, Dm, fy_1, fy_2, fx_1, fx_2): """[From VCZT_objective_lens - in optical_elements.py]: JIT function that vectorizes the propagation and calls CZT_for_high_NA_jit from wave_optics.py.""" # Input field has (3, N, N) shape
vectorized = vmap(CZT_for_high_NA_jit, in_axes=(0, None, None, None, None, None, None, None))
5
2023-10-26 13:17:50+00:00
8k
LFhase/GALA
models/ciga.py
[ { "identifier": "relabel", "path": "utils/get_subgraph.py", "snippet": "def relabel(x, edge_index, batch, pos=None):\n\n num_nodes = x.size(0)\n sub_nodes = torch.unique(edge_index)\n x = x[sub_nodes]\n batch = batch[sub_nodes]\n row, col = edge_index\n # remapping the nodes in the explanatory subgraph to new ids.\n node_idx = row.new_full((num_nodes,), -1)\n node_idx[sub_nodes] = torch.arange(sub_nodes.size(0), device=x.device)\n edge_index = node_idx[edge_index]\n if pos is not None:\n pos = pos[sub_nodes]\n return x, edge_index, batch, pos" }, { "identifier": "split_batch", "path": "utils/get_subgraph.py", "snippet": "def split_batch(g):\n split = degree(g.batch[g.edge_index[0]], dtype=torch.long).tolist()\n edge_indices = torch.split(g.edge_index, split, dim=1)\n num_nodes = degree(g.batch, dtype=torch.long)\n cum_nodes = torch.cat([g.batch.new_zeros(1), num_nodes.cumsum(dim=0)[:-1]])\n num_edges = torch.tensor([e.size(1) for e in edge_indices], dtype=torch.long).to(g.x.device)\n cum_edges = torch.cat([g.batch.new_zeros(1), num_edges.cumsum(dim=0)[:-1]])\n\n return edge_indices, num_nodes, cum_nodes, num_edges, cum_edges" }, { "identifier": "clear_masks", "path": "utils/mask.py", "snippet": "def clear_masks(model: nn.Module):\n for module in model.modules():\n if isinstance(module, MessagePassing):\n #PyG 2.0.4\n module._explain = False\n module._edge_mask = None\n # module._apply_sigmoid = True\n #PyG 1.7.2\n module.__explain__ = False\n module.__edge_mask__ = None" }, { "identifier": "set_masks", "path": "utils/mask.py", "snippet": "def set_masks(mask: Tensor, model: nn.Module):\n for module in model.modules():\n if isinstance(module, MessagePassing):\n #PyG 2.0.4\n module._explain = True\n module._edge_mask = mask\n module._apply_sigmoid = False\n #PyG 1.7.2\n module.__explain__ = True\n module.__edge_mask__ = mask" }, { "identifier": "GNN_node", "path": "models/conv.py", "snippet": "class GNN_node(torch.nn.Module):\n \"\"\"\n Output:\n node representations\n \"\"\"\n\n def __init__(self,\n num_layer,\n emb_dim,\n input_dim=1,\n drop_ratio=0.5,\n JK=\"last\",\n residual=False,\n gnn_type='gin',\n edge_dim=-1):\n '''\n emb_dim (int): node embedding dimensionality\n num_layer (int): number of GNN message passing layers\n\n '''\n\n super(GNN_node, self).__init__()\n self.num_layer = num_layer\n self.drop_ratio = drop_ratio\n self.JK = JK\n ### add residual connection or not\n self.residual = residual\n\n # if self.num_layer < 2:\n # raise ValueError(\"Number of GNN layers must be greater than 1.\")\n\n if input_dim == 1:\n self.node_encoder = AtomEncoder(emb_dim) # uniform input node embedding\n self.edge_dim = 1\n elif input_dim == -1:\n # ogbg-ppa\n self.node_encoder = torch.nn.Embedding(1, emb_dim) # uniform input node embedding\n self.edge_dim = 7\n elif edge_dim != -1:\n # drugood\n self.node_encoder = torch.nn.Linear(input_dim, emb_dim) # uniform input node embedding\n self.edge_dim = edge_dim\n else:\n # only for spmotif dataset\n self.node_encoder = torch.nn.Linear(input_dim, emb_dim)\n self.edge_dim = -1\n ###List of GNNs\n self.convs = torch.nn.ModuleList()\n self.batch_norms = torch.nn.ModuleList()\n\n for layer in range(num_layer):\n if gnn_type == 'gin':\n self.convs.append(GINConv(emb_dim, edge_dim=self.edge_dim))\n # self.convs.append(GINConv2(GINConv2.MLP(emb_dim, emb_dim)))\n elif gnn_type == 'gcn':\n self.convs.append(GCNConv(emb_dim, edge_dim=self.edge_dim))\n else:\n ValueError('Undefined GNN type called {}'.format(gnn_type))\n\n self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))\n\n def forward(self, batched_data,edge_att=None):\n x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch\n ### computing input node embedding\n h_list = [self.node_encoder(x)]\n for layer in range(self.num_layer):\n h = self.convs[layer](h_list[layer], edge_index, edge_attr)\n h = self.batch_norms[layer](h)\n\n if layer == self.num_layer - 1:\n #remove relu for the last layer\n h = F.dropout(h, self.drop_ratio, training=self.training)\n else:\n h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)\n\n if self.residual:\n h += h_list[layer]\n\n h_list.append(h)\n\n ### Different implementations of Jk-concat\n if self.JK == \"last\":\n node_representation = h_list[-1]\n elif self.JK == \"sum\":\n node_representation = 0\n for layer in range(self.num_layer):\n node_representation += h_list[layer]\n\n return node_representation" }, { "identifier": "GNN_node_Virtualnode", "path": "models/conv.py", "snippet": "class GNN_node_Virtualnode(torch.nn.Module):\n \"\"\"\n Output:\n node representations\n \"\"\"\n\n def __init__(self,\n num_layer,\n emb_dim,\n input_dim=1,\n drop_ratio=0.5,\n JK=\"last\",\n residual=False,\n gnn_type='gin',\n edge_dim=-1):\n '''\n emb_dim (int): node embedding dimensionality\n '''\n\n super(GNN_node_Virtualnode, self).__init__()\n self.num_layer = num_layer\n self.drop_ratio = drop_ratio\n self.JK = JK\n ### add residual connection or not\n self.residual = residual\n\n if self.num_layer < 2:\n raise ValueError(\"Number of GNN layers must be greater than 1.\")\n\n if input_dim == 1:\n self.node_encoder = AtomEncoder(emb_dim) # uniform input node embedding\n self.edge_dim = 1\n elif input_dim == -1:\n # ogbg-ppa\n self.node_encoder = torch.nn.Embedding(1, emb_dim) # uniform input node embedding\n self.edge_dim = 7\n elif edge_dim != -1:\n # drugood\n self.node_encoder = torch.nn.Linear(input_dim, emb_dim) # uniform input node embedding\n self.edge_dim = edge_dim\n else:\n # only for spmotif dataset\n self.node_encoder = torch.nn.Linear(input_dim, emb_dim)\n self.edge_dim = -1\n ### set the initial virtual node embedding to 0.\n self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)\n torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)\n\n ### List of GNNs\n self.convs = torch.nn.ModuleList()\n ### batch norms applied to node embeddings\n self.batch_norms = torch.nn.ModuleList()\n\n ### List of MLPs to transform virtual node at every layer\n self.mlp_virtualnode_list = torch.nn.ModuleList()\n\n for layer in range(num_layer):\n if gnn_type == 'gin':\n self.convs.append(GINConv(emb_dim, edge_dim=self.edge_dim))\n elif gnn_type == 'gcn':\n self.convs.append(GCNConv(emb_dim, edge_dim=self.edge_dim))\n else:\n ValueError('Undefined GNN type called {}'.format(gnn_type))\n\n self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))\n\n for layer in range(num_layer - 1):\n # https://discuss.pytorch.org/t/batchnorm1d-cuda-error-an-illegal-memory-access-was-encountered/127641/5\n self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), \\\n torch.nn.Linear(2*emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU()))\n\n def forward(self, batched_data):\n\n x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch\n\n ### virtual node embeddings for graphs\n virtualnode_embedding = self.virtualnode_embedding(\n torch.zeros(batch[-1].item() + 1).to(edge_index.dtype).to(edge_index.device))\n h_list = [self.node_encoder(x)]\n for layer in range(self.num_layer):\n ### add message from virtual nodes to graph nodes\n h_list[layer] = h_list[layer] + virtualnode_embedding[batch]\n\n ### Message passing among graph nodes\n h = self.convs[layer](h_list[layer], edge_index, edge_attr)\n\n h = self.batch_norms[layer](h)\n if layer == self.num_layer - 1:\n #remove relu for the last layer\n h = F.dropout(h, self.drop_ratio, training=self.training)\n else:\n h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)\n\n if self.residual:\n h = h + h_list[layer]\n\n h_list.append(h)\n\n ### update the virtual nodes\n if layer < self.num_layer - 1:\n ### add message from graph nodes to virtual nodes\n virtualnode_embedding_temp = global_add_pool(h_list[layer], batch) + virtualnode_embedding\n ### transform virtual nodes using MLP\n\n if self.residual:\n virtualnode_embedding = virtualnode_embedding + F.dropout(\n self.mlp_virtualnode_list[layer](virtualnode_embedding_temp),\n self.drop_ratio,\n training=self.training)\n else:\n virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp),\n self.drop_ratio,\n training=self.training)\n\n ### Different implementations of Jk-concat\n if self.JK == \"last\":\n node_representation = h_list[-1]\n elif self.JK == \"sum\":\n node_representation = 0\n for layer in range(self.num_layer):\n node_representation += h_list[layer]\n\n return node_representation" }, { "identifier": "GNN", "path": "models/gnn.py", "snippet": "class GNN(torch.nn.Module):\n\n def __init__(self,\n num_class,\n num_layer=5,\n emb_dim=300,\n input_dim=1,\n gnn_type='gin',\n virtual_node=True,\n residual=False,\n drop_ratio=0.5,\n JK=\"last\",\n graph_pooling=\"mean\",\n pred_head=\"cls\",\n edge_dim=-1):\n '''\n num_tasks (int): number of labels to be predicted\n virtual_node (bool): whether to add virtual node or not\n '''\n\n super(GNN, self).__init__()\n\n self.num_layer = num_layer\n self.drop_ratio = drop_ratio\n self.JK = JK\n self.emb_dim = emb_dim\n self.num_class = num_class\n self.graph_pooling = graph_pooling\n\n # if self.num_layer < 2:\n # raise ValueError(\"Number of GNN layers must be greater than 1.\")\n\n ### GNN to generate node embeddings\n if gnn_type.lower() == \"le\":\n self.gnn_node = LeGNN(in_channels=input_dim,\n hid_channels=emb_dim,\n num_layer=num_layer,\n drop_ratio=drop_ratio,\n num_classes=num_class,\n edge_dim=edge_dim)\n else:\n if virtual_node:\n self.gnn_node = GNN_node_Virtualnode(num_layer,\n emb_dim,\n input_dim=input_dim,\n JK=JK,\n drop_ratio=drop_ratio,\n residual=residual,\n gnn_type=gnn_type,\n edge_dim=edge_dim)\n else:\n self.gnn_node = GNN_node(num_layer,\n emb_dim,\n input_dim=input_dim,\n JK=JK,\n drop_ratio=drop_ratio,\n residual=residual,\n gnn_type=gnn_type,\n edge_dim=edge_dim)\n\n ### Pooling function to generate whole-graph embeddings\n if self.graph_pooling == \"sum\":\n self.pool = global_add_pool\n elif self.graph_pooling == \"mean\":\n self.pool = global_mean_pool\n elif self.graph_pooling == \"max\":\n self.pool = global_max_pool\n elif self.graph_pooling == \"attention\":\n self.pool = GlobalAttention(gate_nn=torch.nn.Sequential(torch.nn.Linear(\n emb_dim, 2 * emb_dim), torch.nn.BatchNorm1d(2 *\n emb_dim), torch.nn.ReLU(), torch.nn.Linear(2 * emb_dim, 1)))\n elif self.graph_pooling == \"set2set\":\n self.pool = Set2Set(emb_dim, processing_steps=2)\n else:\n raise ValueError(\"Invalid graph pooling type.\")\n\n if pred_head == \"cls\":\n if graph_pooling == \"set2set\":\n self.graph_pred_linear = torch.nn.Linear(2 * self.emb_dim, self.num_class)\n else:\n self.graph_pred_linear = torch.nn.Linear(self.emb_dim, self.num_class)\n elif pred_head == \"inv\":\n self.graph_pred_linear = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n\n self.spu_mlp = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n # self.graph_pred_linear = torch.nn.Linear(self.emb_dim, self.num_class)\n # self.spu_mlp = torch.nn.Linear(self.emb_dim, self.num_class)\n self.cq = nn.Linear(self.num_class, self.num_class)\n self.spu_fw = torch.nn.Sequential(self.spu_mlp, self.cq)\n elif pred_head == \"spu\":\n self.graph_pred_linear = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n self.spu_gcn = GNN_node(num_layer=1,\n emb_dim=emb_dim,\n input_dim=emb_dim,\n JK=JK,\n drop_ratio=drop_ratio,\n residual=residual,\n gnn_type=gnn_type)\n self.spu_mlp = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n self.cq = nn.Linear(self.num_class, self.num_class)\n self.spu_fw = torch.nn.Sequential(self.spu_mlp, self.cq)\n\n def get_spu_pred_forward(self, batched_data, get_rep=False):\n # if using DIR, won't consider gradients for encoder\n # h_node = self.gnn_node(batched_data)\n # h_graph = self.pool(h_node, batched_data.batch).detach()\n h_node = self.spu_gcn(batched_data)\n h_graph = self.pool(h_node, batched_data.batch)\n\n if get_rep:\n return self.spu_fw(h_graph), h_graph\n return self.spu_fw(h_graph)\n\n def get_spu_pred(self, batched_data, get_rep=False, grad=False):\n # if using DIR, won't consider gradients for encoder\n \n if not grad:\n self.gnn_node.eval()\n h_node = self.gnn_node(batched_data)\n h_graph = self.pool(h_node, batched_data.batch)\n h_graph = h_graph.detach()\n if self.gnn_node.training:\n self.gnn_node.train()\n else:\n h_node = self.gnn_node(batched_data)\n h_graph = self.pool(h_node, batched_data.batch)\n\n if get_rep:\n return self.spu_fw(h_graph), h_graph\n return self.spu_fw(h_graph)\n\n def forward(self, batched_data, get_rep=False,edge_att=None):\n h_node = self.gnn_node(batched_data,edge_att)\n\n h_graph = self.pool(h_node, batched_data.batch)\n\n if get_rep:\n return self.graph_pred_linear(h_graph), h_graph\n return self.graph_pred_linear(h_graph)\n\n def forward_rep(self, batched_data):\n h_node = self.gnn_node(batched_data)\n h_graph = self.pool(h_node, batched_data.batch)\n return h_graph\n\n def forward_cls(self, h_graph):\n return self.graph_pred_linear(h_graph)\n\n def forward_spu_cls(self, h_graph):\n return self.spu_fw(h_graph)\n\n def forward_cl(self, batched_data):\n h_node = self.gnn_node(batched_data)\n\n h_graph = self.pool(h_node, batched_data.batch)\n z = self.proj_head(h_graph)\n return z\n\n def loss_cl(self, x1, x2):\n T = 0.5\n batch_size, _ = x1.size()\n\n x1_abs = x1.norm(dim=1)\n x2_abs = x2.norm(dim=1)\n\n sim_matrix = torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)\n sim_matrix = torch.exp(sim_matrix / T)\n pos_sim = sim_matrix[range(batch_size), range(batch_size)]\n loss = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)\n loss = -torch.log(loss).mean()\n return loss" }, { "identifier": "LeGNN", "path": "models/gnn.py", "snippet": "class LeGNN(torch.nn.Module):\n\n def __init__(self, in_channels, hid_channels=64, num_classes=3, num_layer=2, drop_ratio=0.5, edge_dim=-1):\n super().__init__()\n\n self.num_layer = num_layer\n self.node_emb = nn.Linear(in_channels, hid_channels)\n self.drop_ratio = drop_ratio\n self.convs = nn.ModuleList()\n self.relus = nn.ModuleList()\n for i in range(num_layer):\n conv = LEConv(in_channels=hid_channels, out_channels=hid_channels)\n self.convs.append(conv)\n self.relus.append(nn.ReLU())\n\n def forward(self, batched_data):\n x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch\n\n node_x = self.get_node_reps(x, edge_index, edge_attr, batch)\n return node_x\n\n def get_node_reps(self, x, edge_index, edge_attr, batch):\n x = self.node_emb(x)\n for conv, ReLU in zip(self.convs, self.relus):\n x = conv(x=x, edge_index=edge_index, edge_weight=edge_attr)\n x = F.dropout(x, p=self.drop_ratio, training=self.training)\n x = ReLU(x)\n node_x = x\n return node_x\n\n def get_graph_rep(self, x, edge_index, edge_attr, batch):\n\n node_x = self.get_node_reps(x, edge_index, edge_attr, batch)\n graph_x = global_mean_pool(node_x, batch)\n return graph_x\n\n def get_causal_pred(self, causal_graph_x):\n pred = self.causal_mlp(causal_graph_x)\n return pred\n\n def get_spu_pred(self, spu_graph_x):\n pred = self.spu_fw(spu_graph_x)\n return pred\n\n def get_comb_pred(self, causal_graph_x, spu_graph_x):\n causal_pred = self.causal_mlp(causal_graph_x)\n spu_pred = self.spu_mlp(spu_graph_x).detach()\n return torch.sigmoid(spu_pred) * causal_pred\n\n def reset_parameters(self):\n with torch.no_grad():\n for param in self.parameters():\n param.uniform_(-1.0, 1.0)" } ]
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_geometric.data.batch as DataBatch import torch_scatter from torch_geometric.nn import (ASAPooling, global_add_pool, global_max_pool, global_mean_pool) from utils.get_subgraph import relabel, split_batch from utils.mask import clear_masks, set_masks from models.conv import GNN_node, GNN_node_Virtualnode from models.gnn import GNN, LeGNN from torch.distributions.normal import Normal from torch_geometric.nn import InstanceNorm from torch_geometric.utils import degree from torch_geometric.utils import degree from torch_geometric.utils import degree
7,076
self.prior = discrete_gaussian(self.num_envs) def get_env_loss(self,batch,criterion): h_graph = self.gnn.forward_rep(batch) y_part = torch.nan_to_num(batch.y).float().unsqueeze(1) env_prob = self.env_pred_linear(torch.cat([h_graph, y_part], dim=-1)) q_e = torch.softmax(env_prob, dim=-1) batch_size = h_graph.size(0) device = h_graph.device losses = [] for dom in range(self.num_envs): domain_info = torch.ones(batch_size).long().to(device) domain_feat = torch.index_select(self.class_emb, 0, domain_info*dom) p_ye = self.env_label_pred_linear(torch.cat([h_graph, domain_feat], dim=1)) labeled = batch.y == batch.y # there are nan in the labels so use this to mask them # and this is a multitask binary classification # data_belong = torch.arange(batch_size).long() # data_belong = data_belong.unsqueeze(dim=-1).to(device) # data_belong = data_belong.repeat(1, self.num_tasks) # [batch_size, num_tasks] same as p_ye loss = criterion(p_ye[labeled], batch.y[labeled],reduction='none') # shape: [numbers of not nan gts] # batch_loss = torch_scatter.scatter( # loss, dim=0, index=data_belong[labeled], # reduce='mean' # ) # [batch_size] # considering the dataset is a multitask binary # classification task, the process above is to # get a average loss among all the tasks, # when there is only one task, it's equilvant to # bce_with_logit without reduction losses.append(loss) losses = torch.stack(losses, dim=1) # [batch_size, num_domain] Eq = torch.mean(torch.sum(q_e * losses, dim=-1)) ELBO = Eq + KLDist(q_e, self.prior.to(device)) return ELBO def forward_env(self,batch,criterion): batch_size = batch.y.size(0) device = batch.y.device labeled = batch.y == batch.y data_belong = torch.arange(batch_size).long() data_belong = data_belong.unsqueeze(dim=-1).to(device) data_belong = data_belong.repeat(1, self.num_tasks) with torch.no_grad(): self.eval() h_graph = self.gnn.forward_rep(batch) cond_result = [] for dom in range(self.num_envs): domain_info = torch.ones(batch_size).long().to(device) # domain_info = (domain_info * dom).to(device) domain_feat = torch.index_select(self.class_emb, 0, domain_info*dom) cond_term = criterion( self.env_label_pred_linear(torch.cat([h_graph, domain_feat], dim=1))[labeled], batch.y[labeled], reduction='none' ) # cond_term = torch_scatter.scatter( # cond_term, dim=0, index=data_belong[labeled], # reduce='mean' # ) cond_result.append(cond_term) cond_result = torch.stack(cond_result, dim=0) # [num_domain, batch_size] cond_result = torch.matmul(self.prior.to(device), cond_result) # cond_result = torch.mean(cond_result, dim=0) # [batch_size] y_part = torch.nan_to_num(batch.y).unsqueeze(1).float() env_prob = self.env_pred_linear(torch.cat([h_graph, y_part], dim=-1)) env = torch.argmax(env_prob, dim=-1) # [batch_size] return env, cond_result, data_belong def forward(self, batch, return_data="pred"): causal_pred, causal_rep = self.gnn(batch, get_rep=True) if return_data.lower() == "pred": return causal_pred elif return_data.lower() == "rep": return causal_pred, causal_rep elif return_data.lower() == "feat": #Nothing will happen for ERM return causal_pred, causal_rep else: raise Exception("Not support return type") class GNNPooling(nn.Module): def __init__(self, input_dim, out_dim, edge_dim=-1, emb_dim=300, num_layers=5, ratio=0.25, pooling='asap', gnn_type='gin', virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean"): super(GNNPooling, self).__init__() if pooling.lower() == 'asap': # Cancel out the edge attribute when using ASAP pooling # since (1) ASAP not compatible with edge attr # (2) performance of DrugOOD will not be affected w/o edge attr self.pool = ASAPooling(emb_dim, ratio, dropout=drop_ratio) edge_dim = -1 ### GNN to generate node embeddings if gnn_type.lower() == "le": self.gnn_encoder = LeGNN(in_channels=input_dim, hid_channels=emb_dim, num_layer=num_layers, drop_ratio=drop_ratio, num_classes=out_dim, edge_dim=edge_dim) else: if virtual_node:
class GNNERM(nn.Module): def __init__(self, input_dim, out_dim, edge_dim=-1, emb_dim=300, num_layers=5, ratio=0.25, gnn_type='gin', virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean"): super(GNNERM, self).__init__() self.classifier = GNN(gnn_type=gnn_type, input_dim=input_dim, num_class=out_dim, num_layer=num_layers, emb_dim=emb_dim, drop_ratio=drop_ratio, virtual_node=virtual_node, graph_pooling=graph_pooling, residual=residual, JK=JK, edge_dim=edge_dim) def forward(self, batch, return_data="pred"): causal_pred, causal_rep = self.classifier(batch, get_rep=True) if return_data.lower() == "pred": return causal_pred elif return_data.lower() == "rep": return causal_pred, causal_rep elif return_data.lower() == "feat": #Nothing will happen for ERM return causal_pred, causal_rep else: raise Exception("Not support return type") def bce_log(pred, gt, eps=1e-8): prob = torch.sigmoid(pred) return -(gt * torch.log(prob + eps) + (1 - gt) * torch.log(1 - prob + eps)) def discrete_gaussian(nums, std=1): Dist = Normal(loc=0, scale=1) plen, halflen = std * 6 / nums, std * 3 / nums posx = torch.arange(-3 * std + halflen, 3 * std, plen) result = Dist.cdf(posx + halflen) - Dist.cdf(posx - halflen) return result / result.sum() def KLDist(p, q, eps=1e-8): log_p, log_q = torch.log(p + eps), torch.log(q + eps) return torch.sum(p * (log_p - log_q)) class GNNEnv(nn.Module): def __init__(self, input_dim, out_dim, edge_dim=-1, emb_dim=300, num_layers=5, ratio=0.25, gnn_type='gin', virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean", num_envs=2, prior="uniform"): super(GNNEnv, self).__init__() self.gnn = GNN(gnn_type=gnn_type, input_dim=input_dim, num_class=out_dim, num_layer=num_layers, emb_dim=emb_dim, drop_ratio=drop_ratio, virtual_node=virtual_node, graph_pooling=graph_pooling, residual=residual, JK=JK, edge_dim=edge_dim) self.num_envs = num_envs self.num_tasks = out_dim # env inference self.env_pred_linear = torch.nn.Linear(emb_dim+1, num_envs) # conditional gnn self.class_emb = torch.nn.Parameter( torch.zeros(num_envs, emb_dim) ) self.env_label_pred_linear = torch.nn.Linear(emb_dim + emb_dim, out_dim) # main gnn self.graph_label_pred_linear = torch.nn.Linear(emb_dim, out_dim) if prior == 'uniform': self.prior = torch.ones(self.num_envs) / self.num_envs else: self.prior = discrete_gaussian(self.num_envs) def get_env_loss(self,batch,criterion): h_graph = self.gnn.forward_rep(batch) y_part = torch.nan_to_num(batch.y).float().unsqueeze(1) env_prob = self.env_pred_linear(torch.cat([h_graph, y_part], dim=-1)) q_e = torch.softmax(env_prob, dim=-1) batch_size = h_graph.size(0) device = h_graph.device losses = [] for dom in range(self.num_envs): domain_info = torch.ones(batch_size).long().to(device) domain_feat = torch.index_select(self.class_emb, 0, domain_info*dom) p_ye = self.env_label_pred_linear(torch.cat([h_graph, domain_feat], dim=1)) labeled = batch.y == batch.y # there are nan in the labels so use this to mask them # and this is a multitask binary classification # data_belong = torch.arange(batch_size).long() # data_belong = data_belong.unsqueeze(dim=-1).to(device) # data_belong = data_belong.repeat(1, self.num_tasks) # [batch_size, num_tasks] same as p_ye loss = criterion(p_ye[labeled], batch.y[labeled],reduction='none') # shape: [numbers of not nan gts] # batch_loss = torch_scatter.scatter( # loss, dim=0, index=data_belong[labeled], # reduce='mean' # ) # [batch_size] # considering the dataset is a multitask binary # classification task, the process above is to # get a average loss among all the tasks, # when there is only one task, it's equilvant to # bce_with_logit without reduction losses.append(loss) losses = torch.stack(losses, dim=1) # [batch_size, num_domain] Eq = torch.mean(torch.sum(q_e * losses, dim=-1)) ELBO = Eq + KLDist(q_e, self.prior.to(device)) return ELBO def forward_env(self,batch,criterion): batch_size = batch.y.size(0) device = batch.y.device labeled = batch.y == batch.y data_belong = torch.arange(batch_size).long() data_belong = data_belong.unsqueeze(dim=-1).to(device) data_belong = data_belong.repeat(1, self.num_tasks) with torch.no_grad(): self.eval() h_graph = self.gnn.forward_rep(batch) cond_result = [] for dom in range(self.num_envs): domain_info = torch.ones(batch_size).long().to(device) # domain_info = (domain_info * dom).to(device) domain_feat = torch.index_select(self.class_emb, 0, domain_info*dom) cond_term = criterion( self.env_label_pred_linear(torch.cat([h_graph, domain_feat], dim=1))[labeled], batch.y[labeled], reduction='none' ) # cond_term = torch_scatter.scatter( # cond_term, dim=0, index=data_belong[labeled], # reduce='mean' # ) cond_result.append(cond_term) cond_result = torch.stack(cond_result, dim=0) # [num_domain, batch_size] cond_result = torch.matmul(self.prior.to(device), cond_result) # cond_result = torch.mean(cond_result, dim=0) # [batch_size] y_part = torch.nan_to_num(batch.y).unsqueeze(1).float() env_prob = self.env_pred_linear(torch.cat([h_graph, y_part], dim=-1)) env = torch.argmax(env_prob, dim=-1) # [batch_size] return env, cond_result, data_belong def forward(self, batch, return_data="pred"): causal_pred, causal_rep = self.gnn(batch, get_rep=True) if return_data.lower() == "pred": return causal_pred elif return_data.lower() == "rep": return causal_pred, causal_rep elif return_data.lower() == "feat": #Nothing will happen for ERM return causal_pred, causal_rep else: raise Exception("Not support return type") class GNNPooling(nn.Module): def __init__(self, input_dim, out_dim, edge_dim=-1, emb_dim=300, num_layers=5, ratio=0.25, pooling='asap', gnn_type='gin', virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean"): super(GNNPooling, self).__init__() if pooling.lower() == 'asap': # Cancel out the edge attribute when using ASAP pooling # since (1) ASAP not compatible with edge attr # (2) performance of DrugOOD will not be affected w/o edge attr self.pool = ASAPooling(emb_dim, ratio, dropout=drop_ratio) edge_dim = -1 ### GNN to generate node embeddings if gnn_type.lower() == "le": self.gnn_encoder = LeGNN(in_channels=input_dim, hid_channels=emb_dim, num_layer=num_layers, drop_ratio=drop_ratio, num_classes=out_dim, edge_dim=edge_dim) else: if virtual_node:
self.gnn_encoder = GNN_node_Virtualnode(num_layers,
5
2023-10-30 16:57:56+00:00
8k
Graph-and-Geometric-Learning/D4Explainer
evaluation/robustness.py
[ { "identifier": "feature_dict", "path": "constants.py", "snippet": "" }, { "identifier": "Explainer", "path": "explainers/base.py", "snippet": "class Explainer(object):\n def __init__(self, device, gnn_model_path, task=\"gc\"):\n self.device = device\n self.model = torch.load(gnn_model_path, map_location=self.device).to(self.device)\n self.model.eval()\n self.model_name = self.model.__class__.__name__\n self.name = self.__class__.__name__\n\n self.path = gnn_model_path\n self.last_result = None\n self.vis_dict = None\n self.task = task\n\n def explain_graph(self, graph, **kwargs):\n \"\"\"\n Main part for different graph attribution methods\n :param graph: target graph instance to be explained\n :param kwargs: other parameters\n :return: edge_imp, i.e., attributions for edges, which are derived from the attribution methods.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def get_rank(lst, r=1):\n topk_idx = list(np.argsort(-lst))\n top_pred = np.zeros_like(lst)\n n = len(lst)\n k = int(r * n)\n for i in range(k):\n top_pred[topk_idx[i]] = n - i\n return top_pred\n\n @staticmethod\n def norm_imp(imp):\n imp[imp < 0] = 0\n imp += 1e-16\n return imp / imp.sum()\n\n def __relabel__(self, g, edge_index):\n sub_nodes = torch.unique(edge_index)\n x = g.x[sub_nodes]\n batch = g.batch[sub_nodes]\n row, col = edge_index\n pos = None\n try:\n pos = g.pos[sub_nodes]\n except Exception:\n pass\n\n # remapping the nodes in the explanatory subgraph to new ids.\n node_idx = row.new_full((g.num_nodes,), -1)\n node_idx[sub_nodes] = torch.arange(sub_nodes.size(0), device=row.device)\n edge_index = node_idx[edge_index]\n return x, edge_index, batch, pos\n\n def __reparameterize__(self, log_alpha, beta=0.1, training=True):\n if training:\n random_noise = torch.rand(log_alpha.size()).to(self.device)\n gate_inputs = torch.log2(random_noise) - torch.log2(1.0 - random_noise)\n gate_inputs = (gate_inputs + log_alpha) / beta + EPS\n gate_inputs = gate_inputs.sigmoid()\n else:\n gate_inputs = log_alpha.sigmoid()\n\n return gate_inputs\n\n def pack_explanatory_subgraph(self, top_ratio=0.2, graph=None, imp=None, relabel=False, if_cf=False):\n \"\"\"\n Pack the explanatory subgraph from the original graph\n :param top_ratio: the ratio of edges to be selected\n :param graph: the original graph\n :param imp: the attribution scores for edges\n :param relabel: whether to relabel the nodes in the explanatory subgraph\n :param if_cf: whether to use the CF method\n :return: the explanatory subgraph\n \"\"\"\n if graph is None:\n graph, imp = self.last_result\n assert len(imp) == graph.num_edges, \"length mismatch\"\n\n top_idx = torch.LongTensor([])\n graph_map = graph.batch[graph.edge_index[0, :]]\n exp_subgraph = graph.clone()\n exp_subgraph.y = graph.y if self.task == \"gc\" else graph.self_y\n for i in range(graph.num_graphs):\n edge_indicator = torch.where(graph_map == i)[0].detach().cpu()\n Gi_n_edge = len(edge_indicator)\n topk = min(max(math.ceil(top_ratio * Gi_n_edge), 1), Gi_n_edge)\n if not if_cf:\n Gi_pos_edge_idx = np.argsort(-imp[edge_indicator])[:topk]\n else:\n Gi_pos_edge_idx = np.argsort(-imp[edge_indicator])[topk:]\n top_idx = torch.cat([top_idx, edge_indicator[Gi_pos_edge_idx]])\n try:\n exp_subgraph.edge_attr = graph.edge_attr[top_idx]\n except Exception:\n pass\n exp_subgraph.edge_index = graph.edge_index[:, top_idx]\n\n exp_subgraph.x = graph.x\n if relabel:\n (exp_subgraph.x, exp_subgraph.edge_index, exp_subgraph.batch, exp_subgraph.pos) = self.__relabel__(\n exp_subgraph, exp_subgraph.edge_index\n )\n return exp_subgraph\n\n def evaluate_acc(self, top_ratio_list, graph=None, imp=None, if_cf=False):\n \"\"\"\n Evaluate the accuracy of the explanatory subgraph\n :param top_ratio_list: the ratio of edges to be selected\n :param graph: the original graph\n :param imp: the attribution scores for edges\n :param if_cf: whether to generate cf explanation\n :return: the accuracy of the explanatory subgraph\n \"\"\"\n if graph is None:\n assert self.last_result is not None\n graph, imp = self.last_result\n acc = np.array([[]])\n fidelity = np.array([[]])\n if self.task == \"nc\":\n output_prob, _ = self.model.get_node_pred_subgraph(\n x=graph.x, edge_index=graph.edge_index, mapping=graph.mapping\n )\n else:\n output_prob, _ = self.model.get_pred(x=graph.x, edge_index=graph.edge_index, batch=graph.batch)\n y_pred = output_prob.argmax(dim=-1)\n for idx, top_ratio in enumerate(top_ratio_list):\n exp_subgraph = self.pack_explanatory_subgraph(top_ratio, graph=graph, imp=imp, if_cf=if_cf)\n if self.task == \"nc\":\n soft_pred, _ = self.model.get_node_pred_subgraph(\n x=exp_subgraph.x, edge_index=exp_subgraph.edge_index, mapping=exp_subgraph.mapping\n )\n else:\n soft_pred, _ = self.model.get_pred(\n x=exp_subgraph.x, edge_index=exp_subgraph.edge_index, batch=exp_subgraph.batch\n )\n # soft_pred: [bsz, num_class]\n res_acc = (y_pred == soft_pred.argmax(dim=-1)).detach().cpu().float().view(-1, 1).numpy()\n labels = torch.LongTensor([[i] for i in y_pred]).to(y_pred.device)\n if not if_cf:\n res_fid = soft_pred.gather(1, labels).detach().cpu().float().view(-1, 1).numpy()\n else:\n res_fid = (1 - soft_pred.gather(1, labels)).detach().cpu().float().view(-1, 1).numpy()\n acc = np.concatenate([acc, res_acc], axis=1) # [bsz, len_ratio_list]\n fidelity = np.concatenate([fidelity, res_fid], axis=1)\n return acc, fidelity\n\n def visualize(\n self, graph=None, edge_imp=None, counter_edge_index=None, vis_ratio=0.2, save=False, layout=False, name=None\n ):\n \"\"\"\n Visualize the attribution scores for edges (xx-Motif / Mutag)\n # TODO: visualization for BBBP / node classification\n :param graph: the original graph\n :param edge_imp: the attribution scores for edges\n :param counter_edge_index: the counterfactual edges\n :param vis_ratio: the ratio of edges to be visualized\n :param save: whether to save the visualization\n :param layout: whether to use the layout\n :param name: the name of the visualization\n :return: None\n \"\"\"\n if graph is None:\n assert self.last_result is not None\n graph, edge_imp = self.last_result\n\n topk = max(int(vis_ratio * graph.num_edges), 1)\n idx = np.argsort(-edge_imp)[:topk]\n G = nx.DiGraph()\n G.add_nodes_from(range(graph.num_nodes))\n G.add_edges_from(list(graph.edge_index.cpu().numpy().T))\n\n if counter_edge_index is not None:\n G.add_edges_from(list(counter_edge_index.cpu().numpy().T))\n if self.vis_dict is None:\n self.vis_dict = vis_dict[self.model_name] if self.model_name in vis_dict.keys() else vis_dict[\"default\"]\n\n folder = Path(r\"image/%s\" % (self.model_name))\n if save and not os.path.exists(folder):\n os.makedirs(folder)\n\n edge_pos_mask = np.zeros(graph.num_edges, dtype=np.bool_)\n edge_pos_mask[idx] = True\n vmax = sum(edge_pos_mask)\n node_pos_mask = np.zeros(graph.num_nodes, dtype=np.bool_)\n node_neg_mask = np.zeros(graph.num_nodes, dtype=np.bool_)\n node_pos_idx = np.unique(graph.edge_index[:, edge_pos_mask].cpu().numpy()).tolist()\n node_neg_idx = list(set([i for i in range(graph.num_nodes)]) - set(node_pos_idx))\n node_pos_mask[node_pos_idx] = True\n node_neg_mask[node_neg_idx] = True\n\n if \"Motif\" in self.model_name:\n plt.figure(figsize=(8, 6), dpi=100)\n pos = graph.pos[0]\n nx.draw_networkx_nodes(\n G,\n pos={i: pos[i] for i in node_pos_idx},\n nodelist=node_pos_idx,\n node_size=self.vis_dict[\"node_size\"],\n node_color=graph.z[0][node_pos_idx],\n alpha=1,\n cmap=\"winter\",\n linewidths=self.vis_dict[\"linewidths\"],\n edgecolors=\"red\",\n vmin=-max(graph.z[0]),\n vmax=max(graph.z[0]),\n )\n nx.draw_networkx_nodes(\n G,\n pos={i: pos[i] for i in node_neg_idx},\n nodelist=node_neg_idx,\n node_size=self.vis_dict[\"node_size\"],\n node_color=graph.z[0][node_neg_idx],\n alpha=0.2,\n cmap=\"winter\",\n linewidths=self.vis_dict[\"linewidths\"],\n edgecolors=\"whitesmoke\",\n vmin=-max(graph.z[0]),\n vmax=max(graph.z[0]),\n )\n nx.draw_networkx_edges(\n G,\n pos=pos,\n edgelist=list(graph.edge_index.cpu().numpy().T),\n edge_color=\"whitesmoke\",\n width=self.vis_dict[\"width\"],\n arrows=False,\n )\n nx.draw_networkx_edges(\n G,\n pos=pos,\n edgelist=list(graph.edge_index[:, edge_pos_mask].cpu().numpy().T),\n edge_color=self.get_rank(edge_imp[edge_pos_mask]),\n # np.ones(len(edge_imp[edge_pos_mask])),\n width=self.vis_dict[\"width\"],\n edge_cmap=cm.get_cmap(\"bwr\"),\n edge_vmin=-vmax,\n edge_vmax=vmax,\n arrows=False,\n )\n if counter_edge_index is not None:\n nx.draw_networkx_edges(\n G,\n pos=pos,\n edgelist=list(counter_edge_index.cpu().numpy().T),\n edge_color=\"mediumturquoise\",\n width=self.vis_dict[\"width\"] / 3.0,\n arrows=False,\n )\n\n if \"Mutag\" in self.model_name:\n from rdkit.Chem.Draw import rdMolDraw2D\n\n idx = [int(i / 2) for i in idx]\n x = graph.x.detach().cpu().tolist()\n edge_index = graph.edge_index.T.detach().cpu().tolist()\n edge_attr = graph.edge_attr.detach().cpu().tolist()\n mol = graph_to_mol(x, edge_index, edge_attr)\n d = rdMolDraw2D.MolDraw2DCairo(500, 500)\n hit_at = np.unique(graph.edge_index[:, idx].detach().cpu().numpy()).tolist()\n\n def add_atom_index(mol):\n atoms = mol.GetNumAtoms()\n for i in range(atoms):\n mol.GetAtomWithIdx(i).SetProp(\"molAtomMapNumber\", str(mol.GetAtomWithIdx(i).GetIdx()))\n return mol\n\n hit_bonds = []\n for u, v in graph.edge_index.T[idx]:\n hit_bonds.append(mol.GetBondBetweenAtoms(int(u), int(v)).GetIdx())\n rdMolDraw2D.PrepareAndDrawMolecule(\n d,\n mol,\n highlightAtoms=hit_at,\n highlightBonds=hit_bonds,\n highlightAtomColors={i: (0, 1, 0) for i in hit_at},\n highlightBondColors={i: (0, 1, 0) for i in hit_bonds},\n )\n d.FinishDrawing()\n bindata = d.GetDrawingText()\n iobuf = io.BytesIO(bindata)\n image = Image.open(iobuf)\n image.show()\n if save:\n if name:\n d.WriteDrawingText(\"image/%s/%s-%d-%s.png\" % (self.model_name, name, int(graph.y[0]), self.name))\n else:\n d.WriteDrawingText(\n \"image/%s/%s-%d-%s.png\" % (self.model_name, str(graph.name[0]), int(graph.y[0]), self.name)\n )" }, { "identifier": "Powerful", "path": "explainers/diff_explainer.py", "snippet": "def model_save(args, model, mean_train_loss, best_sparsity, mean_test_acc):\ndef loss_func_bce(score_list, groundtruth, sigma_list, mask, device, sparsity_level):\ndef sparsity(score, groundtruth, mask, threshold=0.5):\ndef gnn_pred(graph_batch, graph_batch_sub, gnn_model, ds, task):\ndef loss_cf_exp(gnn_model, graph_batch, score, y_pred, y_exp, full_edge, mask, ds, task=\"nc\"):\n def __init__(self, device, gnn_model_path):\n def explain_graph_task(self, args, train_dataset, test_dataset):\n def train(self, args, model, gnn_model, train_dataset, test_dataset):\n def explain_evaluation(self, args, graph):\n def one_step_model_level(self, args, random_adj, node_feature, sigma):\n BCE = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction=\"none\")\nclass DiffExplainer(Explainer):" }, { "identifier": "gen_list_of_data_single", "path": "explainers/diffusion/graph_utils.py", "snippet": "def gen_list_of_data_single(train_x_b, train_adj_b, train_node_flag_b, sigma_list, args):\n \"\"\"\n Generate the list of data with different noise levels\n :param train_x_b: [batch_size, N, F_in], batch of feature vectors of nodes\n :param train_adj_b: [batch_size, N, N], batch of original adjacency matrices\n :param train_node_flag_b: [batch_size, N], the flags for the existence of nodes\n :param sigma_list: list of noise levels\n :returns:\n train_x_b: [len(sigma_list) * batch_size, N, F_in], batch of feature vectors of nodes\n train_ori_adj_b: [len(sigma_list) * batch_size, N, N], batch of original adjacency matrix (considered as the groundtruth)\n train_node_flag_b: [len(sigma_list) * batch_size, N], the flags for the existence of nodes\n train_noise_adj_b: [len(sigma_list) * batch_size, N, N], batch of noisy adjacency matrices\n noise_list: [len(sigma_list) * batch_size, N, N], the noise added to graph\n \"\"\"\n assert isinstance(sigma_list, list)\n train_noise_adj_b_list = []\n noise_list = []\n for i, sigma_i in enumerate(sigma_list):\n train_noise_adj_b, true_noise = discretenoise_single(\n train_adj_b, node_flags=train_node_flag_b, sigma=sigma_i, device=args.device\n )\n\n train_noise_adj_b_list.append(train_noise_adj_b)\n noise_list.append(true_noise)\n\n train_noise_adj_b = torch.cat(train_noise_adj_b_list, dim=0).to(args.device)\n noise_list = torch.cat(noise_list, dim=0).to(args.device)\n train_x_b = train_x_b.repeat(len(sigma_list), 1, 1)\n train_ori_adj_b = train_adj_b.repeat(len(sigma_list), 1, 1)\n train_node_flag_sigma = train_node_flag_b.repeat(len(sigma_list), 1)\n return (\n train_x_b,\n train_ori_adj_b,\n train_node_flag_sigma,\n train_noise_adj_b,\n noise_list,\n )" }, { "identifier": "generate_mask", "path": "explainers/diffusion/graph_utils.py", "snippet": "def generate_mask(node_flags):\n \"\"\"\n Generate the mask matrix for the existence of nodes\n :param node_flags: [bsz, N], the flags for the existence of nodes\n :return: groundtruth: [bsz, N, N]\n \"\"\"\n flag2 = node_flags.unsqueeze(1) # [bsz,1,N]\n flag1 = node_flags.unsqueeze(-1) # [bsz,N,1]\n mask_matrix = torch.bmm(flag1, flag2) # [bsz, N, N]\n groundtruth = torch.where(mask_matrix > 0.9, 1, 0).to(node_flags.device)\n return groundtruth" }, { "identifier": "graph2tensor", "path": "explainers/diffusion/graph_utils.py", "snippet": "def graph2tensor(graph, device):\n \"\"\"\n Convert graph batch to tensor batch\n :param graph: graph batch\n :param device: device\n :returns:\n adj: [bsz, N, N]\n x: [bsz, N, C]\n \"\"\"\n bsz = graph.num_graphs\n edge_index = graph.edge_index # [2, E_total]\n adj = to_dense_adj(edge_index, batch=graph.batch) # [bsz, max_num_node, max_num_node]\n max_num_node = adj.size(-1)\n node_features = graph.x # [N_total, C]\n feature_dim = node_features.size(-1)\n node_sizes = degree(graph.batch, dtype=torch.long).tolist()\n x_split = node_features.split(node_sizes, dim=0) # list of tensor\n x_tensor = torch.empty((bsz, max_num_node, feature_dim)).to(device)\n assert len(x_split) == bsz\n for i in range(bsz):\n Gi_x = x_split[i]\n num_node = Gi_x.size(0)\n zero_tensor = torch.zeros((max_num_node - num_node, feature_dim)).to(device)\n Gi_x = torch.cat((Gi_x, zero_tensor), dim=0)\n assert Gi_x.size(0) == max_num_node\n x_tensor[i] = Gi_x\n return adj, x_tensor" }, { "identifier": "get_datasets", "path": "utils/dataset.py", "snippet": "def get_datasets(name, root=\"data/\"):\n \"\"\"\n Get preloaded datasets by name\n :param name: name of the dataset\n :param root: root path of the dataset\n :return: train_dataset, test_dataset, val_dataset\n \"\"\"\n if name == \"mutag\":\n folder = os.path.join(root, \"MUTAG\")\n train_dataset = Mutagenicity(folder, mode=\"training\")\n test_dataset = Mutagenicity(folder, mode=\"testing\")\n val_dataset = Mutagenicity(folder, mode=\"evaluation\")\n elif name == \"NCI1\":\n folder = os.path.join(root, \"NCI1\")\n train_dataset = NCI1(folder, mode=\"training\")\n test_dataset = NCI1(folder, mode=\"testing\")\n val_dataset = NCI1(folder, mode=\"evaluation\")\n elif name == \"ba3\":\n folder = os.path.join(root, \"BA3\")\n train_dataset = BA3Motif(folder, mode=\"training\")\n test_dataset = BA3Motif(folder, mode=\"testing\")\n val_dataset = BA3Motif(folder, mode=\"evaluation\")\n elif name == \"BA_shapes\":\n folder = os.path.join(root)\n test_dataset = SynGraphDataset(folder, mode=\"testing\", name=\"BA_shapes\")\n val_dataset = SynGraphDataset(folder, mode=\"evaluating\", name=\"BA_shapes\")\n train_dataset = SynGraphDataset(folder, mode=\"training\", name=\"BA_shapes\")\n elif name == \"Tree_Cycle\":\n folder = os.path.join(root)\n test_dataset = SynGraphDataset(folder, mode=\"testing\", name=\"Tree_Cycle\")\n val_dataset = SynGraphDataset(folder, mode=\"evaluating\", name=\"Tree_Cycle\")\n train_dataset = SynGraphDataset(folder, mode=\"training\", name=\"Tree_Cycle\")\n elif name == \"Tree_Grids\":\n folder = os.path.join(root)\n test_dataset = SynGraphDataset(folder, mode=\"testing\", name=\"Tree_Grids\")\n val_dataset = SynGraphDataset(folder, mode=\"evaluating\", name=\"Tree_Grids\")\n train_dataset = SynGraphDataset(folder, mode=\"training\", name=\"Tree_Grids\")\n elif name == \"bbbp\":\n folder = os.path.join(root, \"bbbp\")\n dataset = bbbp(folder)\n test_dataset = dataset[:200]\n val_dataset = dataset[200:400]\n train_dataset = dataset[400:]\n elif name == \"cornell\":\n folder = os.path.join(root)\n test_dataset = WebDataset(folder, mode=\"testing\", name=name)\n val_dataset = WebDataset(folder, mode=\"evaluating\", name=name)\n train_dataset = WebDataset(folder, mode=\"training\", name=name)\n else:\n raise ValueError\n return train_dataset, val_dataset, test_dataset" } ]
import argparse import copy import math import os import sys import numpy as np import torch from torch_geometric.data import DataLoader from tqdm import tqdm from constants import feature_dict, task_type from explainers import * from explainers.base import Explainer as BaseExplainer from explainers.diff_explainer import Powerful, sparsity from explainers.diffusion.graph_utils import ( gen_list_of_data_single, generate_mask, graph2tensor, ) from gnns import * from utils.dataset import get_datasets
5,397
sys.path.append("..") class DiffExplainer(BaseExplainer): def __init__(self, device, gnn_model_path, task, args): super(DiffExplainer, self).__init__(device, gnn_model_path, task) self.device = device self.model = Powerful(args).to(args.device) exp_dir = f"{args.root}/{args.dataset}/" self.model.load_state_dict(torch.load(os.path.join(exp_dir, "best_model.pth"), map_location="cuda:0")["model"]) self.model.eval() def explain_graph(self, model, graph, adj_b, x_b, node_flag_b, sigma_list, args): sigma_list = [sigma / 20 for sigma in sigma_list] _, _, _, test_noise_adj_b, _ = gen_list_of_data_single(x_b, adj_b, node_flag_b, sigma_list, args) test_noise_adj_b_chunked = test_noise_adj_b.chunk(len(sigma_list), dim=0) score = []
sys.path.append("..") class DiffExplainer(BaseExplainer): def __init__(self, device, gnn_model_path, task, args): super(DiffExplainer, self).__init__(device, gnn_model_path, task) self.device = device self.model = Powerful(args).to(args.device) exp_dir = f"{args.root}/{args.dataset}/" self.model.load_state_dict(torch.load(os.path.join(exp_dir, "best_model.pth"), map_location="cuda:0")["model"]) self.model.eval() def explain_graph(self, model, graph, adj_b, x_b, node_flag_b, sigma_list, args): sigma_list = [sigma / 20 for sigma in sigma_list] _, _, _, test_noise_adj_b, _ = gen_list_of_data_single(x_b, adj_b, node_flag_b, sigma_list, args) test_noise_adj_b_chunked = test_noise_adj_b.chunk(len(sigma_list), dim=0) score = []
mask = generate_mask(node_flag_b)
4
2023-10-28 19:58:40+00:00
8k
pytabular-ai/auto-scikit-dl
models/node_model.py
[ { "identifier": "TabModel", "path": "models/abstract.py", "snippet": "class TabModel(ABC):\n def __init__(self):\n self.model: Optional[nn.Module] = None # true model\n self.base_name = None # model type name\n self.device = None\n self.saved_model_config = None\n self.training_config = None\n self.meta_config = None\n self.post_init()\n\n def post_init(self):\n self.history = {\n 'train': {'loss': [], 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0}, \n 'val': {\n 'metric_name': None, 'metric': [], 'best_metric': None, \n 'log_loss': [], 'best_log_loss': None,\n 'best_epoch': None, 'best_step': None,\n 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0\n }, \n # 'test': {'loss': [], 'metric': [], 'final_metric': None},\n 'device': torch.cuda.get_device_name(),\n } # save metrics\n self.no_improvement = 0 # for dnn early stop\n \n def preproc_config(self, model_config: dict):\n \"\"\"default preprocessing for model configurations\"\"\"\n self.saved_model_config = model_config\n return model_config\n \n @abstractmethod\n def fit(\n self,\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n eval_set: Optional[Tuple[Union[torch.Tensor, np.ndarray]]],\n patience: int,\n task: str,\n training_args: dict,\n meta_args: Optional[dict],\n ):\n \"\"\"\n Training Model with Early Stop(optional)\n load best weights at the end\n \"\"\"\n pass\n \n def dnn_fit(\n self,\n *,\n dnn_fit_func: Optional[DNN_FIT_API] = None,\n # API for specical sampler like curriculum learning\n train_loader: Optional[Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal dataloader sampler if is None\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None,\n y_std: Optional[float] = None, # for RMSE\n eval_set: Tuple[torch.Tensor, np.ndarray] = None, # similar API as sk-learn\n patience: int = 0, # <= 0 without early stop\n task: str,\n training_args: dict,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_fit)\n if dnn_fit_func is None:\n dnn_fit_func = default_dnn_fit\n # meta args\n if meta_args is None:\n meta_args = {}\n meta_args.setdefault('save_path', f'results/{self.base_name}')\n if not os.path.exists(meta_args['save_path']):\n print('create new results dir: ', meta_args['save_path'])\n os.makedirs(meta_args['save_path'])\n self.meta_config = meta_args\n # optimzier and scheduler\n training_args.setdefault('optimizer', 'adamw')\n optimizer, scheduler = TabModel.make_optimizer(self.model, training_args)\n # data loader\n training_args.setdefault('batch_size', 64)\n training_args.setdefault('ghost_batch_size', None)\n if train_loader is not None:\n train_loader, missing_idx = train_loader\n training_args['batch_size'] = train_loader.batch_size\n else:\n train_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=training_args['batch_size'],\n shuffle=True,\n )\n if eval_set is not None:\n eval_set = eval_set[0] # only use the first dev set\n dev_loader = TabModel.prepare_tensor_loader(\n X_num=eval_set[0], X_cat=eval_set[1], ys=eval_set[2],\n batch_size=training_args['batch_size'],\n )\n else:\n dev_loader = None\n # training loops\n training_args.setdefault('max_epochs', 1000)\n # training_args.setdefault('report_frequency', 100) # same as save_freq\n # training_args.setdefault('save_frequency', 100) # save per 100 steps\n training_args.setdefault('patience', patience)\n training_args.setdefault('save_frequency', 'epoch') # save per epoch\n self.training_config = training_args\n\n steps_per_backward = 1 if training_args['ghost_batch_size'] is None \\\n else training_args['batch_size'] // training_args['ghost_batch_size']\n steps_per_epoch = len(train_loader)\n tot_step, tot_time = 0, 0\n for e in range(training_args['max_epochs']):\n self.model.train()\n tot_loss = 0\n for step, batch in enumerate(train_loader):\n optimizer.zero_grad()\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n logits, forward_time = dnn_fit_func(self.model, x_num, x_cat, y)\n loss = TabModel.compute_loss(logits, y, task)\n # backward\n start_time = time.time()\n loss.backward()\n backward_time = time.time() - start_time\n self.gradient_policy()\n tot_time += forward_time + backward_time\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n # print or save infos\n tot_step += 1\n tot_loss += loss.cpu().item()\n if isinstance(training_args['save_frequency'], int) \\\n and tot_step % training_args['save_frequency'] == 0:\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n if training_args['save_frequency'] == 'epoch':\n if hasattr(self.model, 'layer_masks'):\n print('layer_mask: ', self.model.layer_masks > 0)\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n \n @abstractmethod\n def predict(\n self,\n dev_loader: Optional[DataLoader],\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n task: str,\n return_probs: bool = True,\n return_metric: bool = True,\n return_loss: bool = True,\n meta_args: Optional[dict] = None,\n ):\n \"\"\"\n Prediction\n \"\"\"\n pass\n \n def dnn_predict(\n self,\n *,\n dnn_predict_func: Optional[DNN_PREDICT_API] = None,\n dev_loader: Optional[Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None, \n y_std: Optional[float] = None, # for RMSE\n task: str,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_predict)\n if dnn_predict_func is None:\n dnn_predict_func = default_dnn_predict\n if dev_loader is None:\n dev_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=128,\n )\n else:\n dev_loader, missing_idx = dev_loader\n # print(\"Evaluate...\")\n predictions, golds = [], []\n tot_time = 0\n self.model.eval()\n for batch in dev_loader:\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n with torch.no_grad():\n logits, used_time = dnn_predict_func(self.model, x_num, x_cat)\n tot_time += used_time\n predictions.append(logits)\n golds.append(y)\n self.model.train()\n predictions = torch.cat(predictions).squeeze(-1)\n golds = torch.cat(golds)\n if return_loss:\n loss = TabModel.compute_loss(predictions, golds, task).cpu().item()\n else:\n loss = None\n if return_probs and task != 'regression':\n predictions = (\n predictions.sigmoid()\n if task == 'binclass'\n else predictions.softmax(-1)\n )\n prediction_type = 'probs'\n elif task == 'regression':\n prediction_type = None\n else:\n prediction_type = 'logits'\n predictions = predictions.cpu().numpy()\n golds = golds.cpu().numpy()\n if return_metric:\n metric = TabModel.calculate_metric(\n golds, predictions,\n task, prediction_type, y_std\n )\n logloss = (\n log_loss(golds, np.stack([1-predictions, predictions], axis=1), labels=[0,1])\n if task == 'binclass'\n else log_loss(golds, predictions, labels=list(range(len(set(golds)))))\n if task == 'multiclass'\n else None\n )\n else:\n metric, logloss = None, None\n results = {'loss': loss, 'metric': metric, 'time': tot_time, 'log_loss': logloss}\n if meta_args is not None:\n self.save_prediction(meta_args['save_path'], results)\n return predictions, results\n \n def gradient_policy(self):\n \"\"\"For post porcess model gradient\"\"\"\n pass\n \n @abstractmethod\n def save(self, output_dir):\n \"\"\"\n Save model weights and configs,\n the following default save functions\n can be combined to override this function\n \"\"\"\n pass\n\n def save_pt_model(self, output_dir):\n print('saving pt model weights...')\n # save model params\n torch.save(self.model.state_dict(), Path(output_dir) / 'final.bin')\n \n def save_tree_model(self, output_dir):\n print('saving tree model...')\n pass\n\n def save_history(self, output_dir):\n # save metrics\n with open(Path(output_dir) / 'results.json', 'w') as f:\n json.dump(self.history, f, indent=4)\n \n def save_prediction(self, output_dir, results, file='prediction'):\n check_dir(output_dir)\n # save test results\n print(\"saving prediction results\")\n saved_results = {\n 'loss': results['loss'], \n 'metric_name': results['metric'][1], \n 'metric': results['metric'][0], \n 'time': results['time'],\n 'log_loss': results['log_loss'],\n }\n with open(Path(output_dir) / f'{file}.json', 'w') as f:\n json.dump(saved_results, f, indent=4)\n \n def save_config(self, output_dir):\n def serialize(config: dict):\n for key in config:\n # serialized object to store yaml or json files \n if any(isinstance(config[key], obj) for obj in [Path, ]):\n config[key] = str(config[key])\n return config\n # save all configs\n with open(Path(output_dir) / 'configs.yaml', 'w') as f:\n configs = {\n 'model': self.saved_model_config, \n 'training': self.training_config,\n 'meta': serialize(self.meta_config)\n }\n yaml.dump(configs, f, indent=2)\n\n @staticmethod\n def make_optimizer(\n model: nn.Module,\n training_args: dict,\n ) -> Tuple[optim.Optimizer, optim.lr_scheduler._LRScheduler]:\n training_args.setdefault('optimizer', 'adamw')\n training_args.setdefault('no_wd_group', None)\n training_args.setdefault('scheduler', None)\n # optimizer\n if training_args['no_wd_group'] is not None:\n assert isinstance(training_args['no_wd_group'], list)\n def needs_wd(name):\n return all(x not in name for x in training_args['no_wd_group'])\n parameters_with_wd = [v for k, v in model.named_parameters() if needs_wd(k)]\n parameters_without_wd = [v for k, v in model.named_parameters() if not needs_wd(k)]\n model_params = [\n {'params': parameters_with_wd},\n {'params': parameters_without_wd, 'weight_decay': 0.0},\n ]\n else:\n model_params = model.parameters()\n optimizer = make_optimizer(\n training_args['optimizer'],\n model_params,\n training_args['lr'],\n training_args['weight_decay'],\n )\n # scheduler\n if training_args['scheduler'] is not None:\n scheduler = None\n else:\n scheduler = None\n\n return optimizer, scheduler\n \n @staticmethod\n def prepare_tensor_loader(\n X_num: Optional[torch.Tensor],\n X_cat: Optional[torch.Tensor],\n ys: torch.Tensor,\n batch_size: int = 64,\n shuffle: bool = False,\n ):\n assert not all(x is None for x in [X_num, X_cat])\n missing_placeholder = 0 if X_num is None else 1 if X_cat is None else -1\n datas = [x for x in [X_num, X_cat, ys] if x is not None]\n tensor_dataset = TensorDataset(*datas)\n tensor_loader = DataLoader(\n tensor_dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n )\n return tensor_loader, missing_placeholder\n \n @staticmethod\n def parse_batch(batch: Tuple[torch.Tensor], missing_idx, device: torch.device):\n if batch[0].device.type != device.type:\n # if batch[0].device != device: # initialize self.device with model.device rather than torch.device()\n # batch = (x.to(device) for x in batch) # generator\n batch = tuple([x.to(device) for x in batch]) # list\n if missing_idx == -1:\n return batch\n else:\n return batch[:missing_idx] + [None,] + batch[missing_idx:]\n \n @staticmethod\n def compute_loss(logits: torch.Tensor, targets: torch.Tensor, task: str, reduction: str = 'mean'):\n loss_fn = {\n 'binclass': F.binary_cross_entropy_with_logits,\n 'multiclass': F.cross_entropy,\n 'regression': F.mse_loss,\n }[task]\n return loss_fn(logits.squeeze(-1), targets, reduction=reduction)\n \n @staticmethod\n def calculate_metric(\n golds,\n predictions,\n task: str,\n prediction_type: Optional[str] = None,\n y_std: Optional[float] = None,\n ):\n \"\"\"Calculate metrics\"\"\"\n metric = {\n 'regression': 'rmse', \n 'binclass': 'roc_auc', \n 'multiclass': 'accuracy'\n }[task]\n \n return calculate_metrics(\n golds, predictions,\n task, prediction_type, y_std\n )[metric], metric\n \n def better_result(self, dev_metric, task, is_loss=False):\n if is_loss: # logloss\n best_dev_metric = self.history['val']['best_log_loss']\n if best_dev_metric is None or best_dev_metric > dev_metric:\n self.history['val']['best_log_loss'] = dev_metric\n return True\n else:\n return False\n best_dev_metric = self.history['val']['best_metric']\n if best_dev_metric is None:\n self.history['val']['best_metric'] = dev_metric\n return True\n elif task == 'regression': # rmse\n if best_dev_metric > dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n else:\n if best_dev_metric < dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n \n def early_stop_handler(self, epoch, tot_step, dev_metric, task, patience, save_path):\n if task != 'regression' and self.better_result(dev_metric['log_loss'], task, is_loss=True):\n # record best logloss\n torch.save(self.model.state_dict(), Path(save_path) / 'best-logloss.bin')\n if self.better_result(dev_metric['metric'], task):\n print('<<< Best Dev Result', end='')\n torch.save(self.model.state_dict(), Path(save_path) / 'best.bin')\n self.no_improvement = 0\n self.history['val']['best_epoch'] = epoch\n self.history['val']['best_step'] = tot_step\n else:\n self.no_improvement += 1\n print(f'| [no improvement] {self.no_improvement}', end='')\n if patience <= 0:\n return False\n else:\n return self.no_improvement >= patience\n \n def save_evaluate_dnn(\n self, \n # print and saved infos\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n # evaluate infos\n task, patience, save_path,\n dev_loader, y_std\n ):\n \"\"\"For DNN models\"\"\"\n epoch, step = tot_step // steps_per_epoch, (tot_step - 1) % steps_per_epoch + 1\n avg_loss = tot_loss / step\n self.history['train']['loss'].append(avg_loss)\n self.history['train']['tot_time'] = tot_time\n self.history['train']['avg_step_time'] = tot_time / tot_step\n self.history['train']['avg_epoch_time'] = self.history['train']['avg_step_time'] * steps_per_epoch\n print(f\"[epoch] {epoch} | [step] {step} | [tot_step] {tot_step} | [used time] {tot_time:.4g} | [train_loss] {avg_loss:.4g} \", end='')\n if dev_loader is not None:\n _, results = self.predict(dev_loader=dev_loader, y_std=y_std, task=task, return_metric=True)\n dev_metric, metric_name = results['metric']\n print(f\"| [{metric_name}] {dev_metric:.4g} \", end='')\n if task != 'regression':\n print(f\"| [log-loss] {results['log_loss']:.4g} \", end='')\n self.history['val']['log_loss'].append(results['log_loss'])\n self.history['val']['metric_name'] = metric_name\n self.history['val']['metric'].append(dev_metric)\n self.history['val']['tot_time'] += results['time']\n self.history['val']['avg_step_time'] = self.history['val']['tot_time'] / tot_step\n self.history['val']['avg_epoch_time'] = self.history['val']['avg_step_time'] * steps_per_epoch\n dev_metric = {'metric': dev_metric, 'log_loss': results['log_loss']}\n if self.early_stop_handler(epoch, tot_step, dev_metric, task, patience, save_path):\n print(' <<< Early Stop')\n return True\n print()\n return False\n \n def load_best_dnn(self, save_path, file='best'):\n model_file = Path(save_path) / f\"{file}.bin\"\n if not os.path.exists(model_file):\n print(f'There is no {file} checkpoint, loading the last one...')\n model_file = Path(save_path) / 'final.bin'\n else:\n print(f'Loading {file} model...')\n self.model.load_state_dict(torch.load(model_file))\n print('successfully')" }, { "identifier": "check_dir", "path": "models/abstract.py", "snippet": "def check_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)" } ]
import time import math import typing as ty import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import models.node as node from pathlib import Path from torch.utils.data import DataLoader from torch import Tensor from models.abstract import TabModel, check_dir
5,387
# %% # %% class _NODE(nn.Module): def __init__( self, *, d_in: int, num_layers: int, layer_dim: int, depth: int, tree_dim: int, choice_function: str, bin_function: str, d_out: int, categories: ty.Optional[ty.List[int]], d_embedding: int, ) -> None: super().__init__() if categories is not None: d_in += len(categories) * d_embedding category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0) self.register_buffer('category_offsets', category_offsets) self.category_embeddings = nn.Embedding(sum(categories), d_embedding) nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5)) print(f'{self.category_embeddings.weight.shape}') self.d_out = d_out self.block = node.DenseBlock( input_dim=d_in, num_layers=num_layers, layer_dim=layer_dim, depth=depth, tree_dim=tree_dim, bin_function=getattr(node, bin_function), choice_function=getattr(node, choice_function), flatten_output=False, ) def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor: if x_cat is not None: x_cat = self.category_embeddings(x_cat + self.category_offsets[None]) x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1) else: x = x_num x = self.block(x) x = x[..., : self.d_out].mean(dim=-2) x = x.squeeze(-1) return x # %%
# %% # %% class _NODE(nn.Module): def __init__( self, *, d_in: int, num_layers: int, layer_dim: int, depth: int, tree_dim: int, choice_function: str, bin_function: str, d_out: int, categories: ty.Optional[ty.List[int]], d_embedding: int, ) -> None: super().__init__() if categories is not None: d_in += len(categories) * d_embedding category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0) self.register_buffer('category_offsets', category_offsets) self.category_embeddings = nn.Embedding(sum(categories), d_embedding) nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5)) print(f'{self.category_embeddings.weight.shape}') self.d_out = d_out self.block = node.DenseBlock( input_dim=d_in, num_layers=num_layers, layer_dim=layer_dim, depth=depth, tree_dim=tree_dim, bin_function=getattr(node, bin_function), choice_function=getattr(node, choice_function), flatten_output=False, ) def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor: if x_cat is not None: x_cat = self.category_embeddings(x_cat + self.category_offsets[None]) x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1) else: x = x_num x = self.block(x) x = x[..., : self.d_out].mean(dim=-2) x = x.squeeze(-1) return x # %%
class NODE(TabModel):
0
2023-10-30 14:55:44+00:00
8k
amazon-science/adaptive-in-context-learning
annotation_methods.py
[ { "identifier": "reliability_plot", "path": "utils.py", "snippet": "def reliability_plot(args, label_map, train_examples, phase=0, bins=10, do_plot=False):\n \"\"\"\n Generate a binned reliability plot.\n\n Parameters\n ----------\n bins (int): Number of bins to perform binned statistics in the adjusted score space.\n is_calibrated (bool): whether the score to compare is before or after calibration.\n \"\"\"\n\n label_to_digit = {}\n for k, v in label_map.items():\n label_to_digit[v] = k\n\n results = []\n preds = []\n golds = []\n y_col = []\n\n\n # current_dir = os.getcwd() \n # par_dir = os.path.dirname(current_dir) \n output_dir_ori = args.output_dir #os.path.join(par_dir,output_dir)\n\n #output_dir_ori = output_dir.copy()\n\n if phase != -1:\n candidate_results_files = os.listdir(os.path.join(output_dir_ori,f'results_iteration_{phase}'))\n else:\n candidate_results_files = os.listdir(os.path.join(output_dir_ori,'results_final_test'))\n\n result_files = [f for f in candidate_results_files if f.endswith('.json')]\n\n if phase != -1:\n output_dir = os.path.join(output_dir_ori,f'results_iteration_{phase}')\n else:\n output_dir = os.path.join(output_dir_ori,'results_final_test')\n\n for file in result_files:\n with open(f\"{output_dir}/{file}\", 'r') as f:\n example_pred = json.load(f)\n idx = int(file[:-5])\n y_col.append(-example_pred[1])\n pred = label_to_digit[example_pred[0]]\n preds.append(pred)\n gold = train_examples[idx][\"label\"]\n golds.append(gold)\n if pred == gold: results.append(1)\n else: results.append(0)\n\n ymax = max(y_col)\n ymin = min(y_col)\n \n y_scaled = [ (yi - ymin) / (ymax - ymin) for yi in y_col ]\n\n ece_score = compute_ece(y_scaled, results)\n print(\"ECE error: \", ece_score)\n\n acc = sum(results) / len(results)\n print(\"Train acc: \", acc)\n\n if do_plot:\n scores_compare = np.array(y_scaled)\n scores_true = np.array(results)\n\n quantiles = np.linspace(0, 1, bins+1)\n bin_edges = np.quantile(scores_compare, quantiles)\n bin_assignment = np.digitize(scores_compare, bin_edges, right=True)\n # scores_compare_bin_means = [scores_compare[bin_assignment == i].mean() for i in range(1, len(bin_edges))]\n scores_compare_bin_means = bin_edges[:-1] + (bin_edges[1:] - bin_edges[:-1])/2\n scores_true_bin_means = [scores_true[bin_assignment == i].mean() for i in range(1, len(bin_edges))]\n\n plt.figure()\n #assert label in self.supported_metric_list\n s = sns.JointGrid(x=scores_compare, y=scores_true)\n sns.histplot(x=scores_compare, ax=s.ax_marg_x, color=\"limegreen\", alpha=0.4, bins=60)\n sns.histplot(y=scores_true, ax=s.ax_marg_y, color=\"blueviolet\", alpha=0.4, bins=60)\n \n scores_compare_bin_means = np.array(scores_compare_bin_means)\n scores_true_bin_means = np.array(scores_true_bin_means)\n\n ax = s.ax_joint\n ax.bar(scores_compare_bin_means, scores_true_bin_means, color='dodgerblue', alpha=0.6, width=0.05)\n ax.plot([min(scores_compare), max(scores_compare) ], [0, 1], 'deeppink', linestyle='--', linewidth=2, alpha=0.7)\n ax.grid(True)\n s.ax_marg_y.grid(False)\n\n ax.set_ylabel(\"Accuracy\", fontsize=16)\n ax.set_xlabel(\"Confidence\", fontsize=16)\n \n ax.set_ylim([0, 1])\n ax.set_xlim([0, 1])\n ax.tick_params(direction=\"in\", labelsize=14)\n ax.set_yticklabels([])\n ax.grid(True)\n s.ax_marg_y.grid(False)\n\n output_dir = os.path.join(output_dir,'figs')\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n\n if phase != -1 :\n fig_path = os.path.join(output_dir,\"reliability_plot_\"+str(phase)+\".png\")\n else:\n fig_path = os.path.join(output_dir, \"reliability_plot_final_test.png\")\n fig = ax.get_figure()\n fig.set_size_inches(1.3,2)\n fig.savefig(fig_path)\n\n return ece_score, acc" }, { "identifier": "embedding_plot", "path": "utils.py", "snippet": "def embedding_plot(args, label_map, selected_indices,total_train_embeds, phase=0):\n \"\"\"\n Visualization of PCA (2 components) of the data points in the embedding space (e.g., SBERT)\n\n Args:\n args\n label_map (dict): label mapping\n selected_indices (list): selected data for annotation\n total_train_embeds (npy): embedding space\n phase (int, optional): selection phase. Defaults to 0.\n \"\"\"\n \n from sklearn.decomposition import PCA\n pca = PCA(n_components=2)\n pca_result = pca.fit_transform(total_train_embeds)\n\n output_dir = os.path.join(args.output_dir,'figs')\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n\n label_to_digit = {}\n for k, v in label_map.items():\n label_to_digit[v] = k\n\n x1 = []\n x2 = []\n y_col = []\n\n if phase != -1:\n candidate_results_files = os.listdir(os.path.join(args.output_dir,f'results_iteration_{phase}'))\n else:\n candidate_results_files = os.listdir(os.path.join(args.output_dir,'results_final_test'))\n #print(candidate_prompt_files)\n result_files = [f for f in candidate_results_files if f.endswith('.json')]\n\n if phase != -1:\n output_dir = os.path.join(args.output_dir,f'results_iteration_{phase}')\n else:\n output_dir = os.path.join(args.output_dir,'results_final_test')\n\n for file in result_files:\n with open(f\"{output_dir}/{file}\", 'r') as f:\n example_pred = json.load(f)\n idx = int(file[:-5])\n y_col.append(-example_pred[1])\n x1.append(pca_result[idx, 0])\n x2.append(pca_result[idx, 1])\n\n ymax = max(y_col)\n ymin = min(y_col)\n \n y_scaled = [ (yi - ymin) / (ymax - ymin) for yi in y_col ]\n\n for idx in selected_indices:\n x1.append(pca_result[idx, 0])\n x2.append(pca_result[idx, 1])\n y_scaled.append(1)\n\n cmap = sns.diverging_palette(10, 133, as_cmap=True)\n plt.figure()\n sns_sctter = sns.scatterplot(\n x=x1, y=x2,\n hue=y_scaled,\n palette=cmap,\n legend=False\n )\n\n for idx in selected_indices:\n x1 = pca_result[idx, 0]\n x2 = pca_result[idx, 1]\n plt.text(x = x1, y = x2, s = \"x\", color = \"blue\", fontsize=\"large\") # set colour of line\n\n output_dir = os.path.join(args.output_dir,'figs')\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n\n if phase != -1 :\n fig_path = os.path.join(output_dir,\"embedding_plot_\"+str(phase)+\".png\")\n else:\n fig_path = os.path.join(output_dir, \"embedding_plot_final_test.png\")\n\n fig = sns_sctter.get_figure()\n fig.savefig(fig_path)" }, { "identifier": "cluster", "path": "algorithms.py", "snippet": "def cluster(embeddings,select_num, examples, flag_idx = None, thres=False, reverse=False, clustering_model=None,seed=0):\n\n \"\"\"\n Clustering with K-Means utilities. \n \"\"\"\n if thres:\n len_list = []\n n = len(examples)\n\n for ex in examples:\n if \"content\" in ex:\n sent = ex[\"content\"]\n elif \"sentence1\" in ex:\n sent = ex[\"sentence1\"]\n elif \"sentence\" in ex:\n sent = ex[\"sentence\"]\n elif \"text\" in ex:\n sent = ex[\"text\"]\n elif \"premise\" in ex:\n sent = ex[\"premise\"]\n elif \"ctx\" in ex:\n sent = ex[\"ctx\"]\n elif \"question\" in ex:\n sent = ex[\"question\"]\n sent_len = len(sent.strip().split())\n len_list.append(sent_len)\n assert len(len_list) == n\n\n len_list = sorted(len_list)\n\n \n thres_min = 0 \n thres_max = max(len_list[int(0.75*n)], 400)\n else:\n thres_min = 0 \n thres_max = 20000 \n\n\n corpus_embeddings = embeddings\n num_clusters = select_num\n\n # Perform kmean clustering if no model is given\n if clustering_model is None:\n num_clusters = select_num\n clustering_model = KMeans(n_clusters=num_clusters, random_state=seed)\n clustering_model.fit(corpus_embeddings)\n cluster_assignment = clustering_model.labels_\n else:\n num_clusters = len(clustering_model.cluster_centers_.tolist())\n cluster_assignment = clustering_model.predict(corpus_embeddings)\n \n\n clustered_sentences = [[] for i in range(num_clusters)]\n\n\n #distance matrix for each datapoint and cluster centroid\n dist = clustering_model.transform(corpus_embeddings)\n clustered_dists = [[] for i in range(num_clusters)]\n clustered_idx = [[] for i in range(num_clusters)]\n\n for cluster_id in range(num_clusters):\n for sentence_id, _ in enumerate(cluster_assignment):\n clustered_dists[cluster_id].append(dist[sentence_id][cluster_id])\n clustered_idx[cluster_id].append(sentence_id)\n \n demos = []\n\n #Return closest points. Flag_idx flags the candidate points. Thres is a threshold on the length.\n for i in range(len(clustered_dists)):\n tmp = list(map(list, zip(range(len(clustered_dists[i])), clustered_dists[i])))\n top_min_dist = sorted(tmp, key=lambda x: x[1], reverse=reverse)\n\n ok = 0\n for element in top_min_dist:\n min_idx = element[0]\n idx = clustered_idx[i][min_idx]\n\n if idx in demos:\n continue\n if flag_idx is not None:\n if idx not in flag_idx:\n continue\n\n if thres:\n if \"content\" in examples[idx]:\n sent = examples[idx][\"content\"]\n elif \"sentence1\" in examples[idx]:\n sent = examples[idx][\"sentence1\"]\n elif \"sentence\" in examples[idx]:\n sent = examples[idx][\"sentence\"]\n elif \"text\" in examples[idx]:\n sent = examples[idx][\"text\"]\n elif \"premise\" in examples[idx]:\n sent = examples[idx][\"premise\"]\n elif \"ctx\" in examples[idx]:\n sent = examples[idx][\"ctx\"]\n elif \"question\" in examples[idx]:\n sent = examples[idx][\"question\"]\n if len(sent.strip().split()) >= thres_min and len(sent.strip().split()) <= thres_max:\n demos.append(idx)\n ok = 1\n break\n else:\n demos.append(idx)\n ok = 1\n break\n if ok == 0: #recheck\n for element in top_min_dist:\n min_idx = element[0]\n idx = clustered_idx[i][min_idx]\n if idx in demos:\n continue\n else:\n demos.append(idx)\n break\n return demos, clustering_model" }, { "identifier": "fast_votek_mod", "path": "algorithms.py", "snippet": "def fast_votek_mod(embeddings,selected_indices,select_num,k,vote_file=None):\n \"\"\"\n Fast votek method -- similar to kmeans, but uses a graph.\n\n Args:\n embeddings\n selected_indices: already selected indices (to be excluded)\n select_num: new budget\n k: graph hyperparameter\n vote_file: for saving results. Defaults to None.\n\n Reference: https://arxiv.org/abs/2209.01975\n\n Returns:\n list: New selected indices\n \"\"\"\n \n n = len(embeddings)\n bar = tqdm(range(n),desc=f'voting')\n vote_stat = defaultdict(list)\n for i in range(n):\n cur_emb = embeddings[i].reshape(1, -1)\n cur_scores = np.sum(cosine_similarity(embeddings, cur_emb), axis=1)\n sorted_indices = np.argsort(cur_scores).tolist()[-k-1:-1]\n for idx in sorted_indices:\n if idx!=i:\n vote_stat[idx].append(i)\n bar.update(1)\n if vote_file is not None:\n with open(vote_file,'w') as f:\n json.dump(vote_stat,f)\n votes = sorted(vote_stat.items(),key=lambda x:len(x[1]),reverse=True)\n new_selected_indices = []\n selected_times = defaultdict(int)\n while len(new_selected_indices)<select_num:\n cur_scores = defaultdict(int)\n for idx,candidates in votes:\n if idx in selected_indices+new_selected_indices:\n cur_scores[idx] = -100\n continue\n for one_support in candidates:\n if not one_support in selected_indices:\n cur_scores[idx] += 10 ** (-selected_times[one_support])\n cur_selected_idx = max(cur_scores.items(),key=lambda x:x[1])[0]\n new_selected_indices.append(int(cur_selected_idx))\n for idx_support in vote_stat[cur_selected_idx]:\n selected_times[idx_support] += 1\n return new_selected_indices" }, { "identifier": "uncertainty_ranking", "path": "algorithms.py", "snippet": "def uncertainty_ranking(selected_indices_first, selected_indices_second, train_embs,test_embs,train_examples,test_examples,return_string,format_example,maximum_input_len,\n label_map,single_context_example_len,inference_model,inference_data_module,tokenizer_gpt,args, step=0, return_sorted_dict=True):\n \"\"\"\n Ranks points based on their uncertaintly (from highest to lowest)\n \"\"\"\n if not args.task_name in ['hellaswag', 'xsum','nq']:\n all_labels = []\n label_to_digit = {}\n for k, v in label_map.items():\n all_labels.append(v)\n label_to_digit[v] = k\n batch_count = step\n \n cur_annotated_examples = [train_examples[idx] for idx in selected_indices_first]\n eval_examples = [test_examples[idx] for idx in selected_indices_second]\n\n #Retrieval\n prompt_retrieval(train_embs=train_embs[selected_indices_first],\n test_embs=test_embs[selected_indices_second],\n train_examples=cur_annotated_examples,\n eval_examples=eval_examples,\n return_string=return_string,\n format_example=format_example,\n maximum_input_len=maximum_input_len,\n args=args,label_map=label_map,\n prompt_identifier=f'prompts_{batch_count}',\n single_context_example_len=single_context_example_len\n )\n\n candidate_prompt_files = os.listdir(os.path.join(args.output_dir,f'prompts_{batch_count}'))\n prompt_files = [f for f in candidate_prompt_files if f.endswith('.json')]\n\n\n output_dir = os.path.join(args.output_dir,f'results_iteration_{batch_count}')\n prompt_dir = os.path.join(args.output_dir,f'prompts_{batch_count}')\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n count = step\n \n count += 1\n bar = tqdm(range(len(prompt_files)), desc=f\" prediction iteration {batch_count}\")\n\n #Ranking based on model's loss (see inference_model.do_predict)\n for file in prompt_files:\n bar.update(1)\n \n with open(os.path.join(prompt_dir, file)) as f:\n one_test_example = json.load(f)\n cur_train_data = one_test_example[1]\n for idx in range(len(cur_train_data)):\n cur_train_data[idx]['options'] = all_labels\n\n cur_input = format_example(one_test_example[2],label_map=label_map,args=args)[0]\n inference_data_module.k = len(cur_train_data)\n inference_data_module.tensorize(cur_train_data, [cur_input], options=all_labels)\n prediction = inference_model.do_predict(inference_data_module, require_loss=True)[0]\n with open(f\"{output_dir}/{file}\", 'w') as f:\n json.dump(prediction, f)\n\n\n #Save results and return sorted dictionary: \"id\": [label_prediction, uncertainty_score]\n idx_scores = {}\n idx_preds = {}\n n = len(test_examples)\n for idx in selected_indices_second:\n if idx in selected_indices_first:\n # if args.task_name in ['xsum','nq']:\n # idx_scores[idx] = float('inf')\n # else:\n # idx_scores[idx] = float('inf')\n continue\n \n with open(f\"{output_dir}/{idx}.json\") as f:\n one_pred = json.load(f)\n if args.task_name in ['nq']:\n idx_scores[idx] = sum(one_pred['choices'][0][\"logprobs\"][\"token_logprobs\"]) / len(\n one_pred['choices'][0][\"logprobs\"][\"token_logprobs\"])\n else:\n idx_scores[idx] = (one_pred[0], one_pred[1])\n if args.task_name in ['xsum','nq']:\n sorted_scores = sorted(idx_scores.items(), key=lambda x: x[1][1])\n else:\n sorted_scores = sorted(idx_scores.items(), key=lambda x:x[1][1],reverse=True)\n\n\n sorted_scores_len = len(sorted_scores)\n\n sorted_scores_dict = {}\n selected_indices = []\n for (idx, score) in sorted_scores:\n if score[1] > -10000:\n selected_indices.append(idx)\n sorted_scores_dict[idx] = score\n\n if not return_sorted_dict:\n return selected_indices, sorted_scores\n\n return selected_indices, sorted_scores_dict" }, { "identifier": "votek_mod", "path": "algorithms.py", "snippet": "def votek_mod(selected_indices, pool_idx, train_embs,test_embs,train_examples,test_examples,return_string,format_example,maximum_input_len,\n label_map,single_context_example_len,inference_model,inference_data_module,tokenizer_gpt,args, k=20, step=0):\n \n \"\"\"\n Vote-k method, which uniformly (wrt uncertainty) samples diverse datapoints. \n Reference: https://arxiv.org/abs/2209.01975\n\n \"\"\"\n\n n = len(train_embs)\n bar = tqdm(range(n),desc=f'voting')\n vote_stat = defaultdict(list)\n for i in range(n):\n cur_emb = train_embs[i].reshape(1, -1)\n cur_scores = np.sum(cosine_similarity(train_embs, cur_emb), axis=1)\n sorted_indices = np.argsort(cur_scores).tolist()[-k-1:-1]\n for idx in sorted_indices:\n if idx!=i:\n vote_stat[idx].append(i)\n bar.update(1)\n \n votes = sorted(vote_stat.items(),key=lambda x:len(x[1]),reverse=True)\n\n \n uncertainty_indices, sorted_scores = uncertainty_ranking(selected_indices, \n pool_idx, \n train_embs=train_embs,\n test_embs=test_embs,\n train_examples=train_examples,\n test_examples=test_examples,\n return_string=return_string,\n format_example=format_example,\n maximum_input_len=maximum_input_len,\n label_map=label_map,\n single_context_example_len=single_context_example_len,\n inference_model=inference_model,\n inference_data_module=inference_data_module,\n tokenizer_gpt=tokenizer_gpt,\n args=args,\n step=step,\n return_sorted_dict=False)\n\n\n # if args.evaluate_calibration:\n # ece_score, acc = reliability_plot(args, label_map, train_examples,phase=step)\n # #embedding_plot(args,label_map,selected_indices,train_embs,phase=step)\n\n # with open(os.path.join(args.output_dir,'result_summary.txt'), 'a') as f:\n # f.write(f\"{len(pool_idx)} train examples, accuracy is {acc}, ece is {ece_score}\\n\") \n \n sorted_scores_len = len(sorted_scores)\n\n\n new_selected_indices = []\n selected_times = defaultdict(int)\n select_num_1 = args.annotation_size #+ init_size - len(selected_indices)\n inter = int(len(pool_idx) * 0.9 / select_num_1)\n for prev_idx in selected_indices:\n for idx_support in vote_stat[str(prev_idx)]:\n selected_times[idx_support] += 1\n count_t = 0\n while len(new_selected_indices) < args.annotation_size and count_t * inter < sorted_scores_len:\n cur_scores = defaultdict(int)\n for idx, _ in sorted_scores[count_t * inter:(count_t + 1) * inter]:\n if not str(idx) in vote_stat:\n cur_scores[idx] = 0\n continue\n candidates = vote_stat[str(idx)]\n if idx in selected_indices or idx in new_selected_indices:\n cur_scores[idx] = -100\n continue\n for one_support in candidates:\n if not one_support in selected_indices:\n cur_scores[idx] += 10 ** (-selected_times[one_support])\n cur_selected_idx = max(cur_scores.items(), key=lambda x: x[1])[0]\n new_selected_indices.append(cur_selected_idx)\n if cur_selected_idx in vote_stat:\n for idx_support in vote_stat[cur_selected_idx]:\n selected_times[idx_support] += 1\n count_t += 1\n if len(new_selected_indices) < args.annotation_size :\n unselected_indices = []\n for unselected_i in pool_idx:\n if not unselected_i in selected_indices and not not unselected_i in new_selected_indices:\n unselected_indices.append(unselected_i)\n new_selected_indices += random.sample(unselected_indices, args.annotation_size - len(new_selected_indices))\n print(f\"{args.annotation_size - len(new_selected_indices)} examples are randomly selected\")\n return new_selected_indices" }, { "identifier": "density_max_coverage", "path": "algorithms.py", "snippet": "def density_max_coverage(embeddings,hard_idx, easy_idx, selected_indices,select_num,k,vote_file=None, weighted=False, two_hop = True, thres_graph=False, mc_selection=\"hard\"):\n \"\"\"\n MaxCover porblem formulation and solution.\n\n Args:\n embeddings \n hard_idx: indices the model is uncertain about\n easy_idx: indices the model is confident about\n selected_indices: already annotated indices\n select_num: new budget\n k: graph hyperparameter for k-NN\n vote_file (optional): for saving results. Defaults to None.\n weighted (bool, optional): AdaICL or AdaICL+. Defaults to False.\n two_hop (bool, optional): one-hop or two-hop graph. Defaults to True.\n thres_graph (bool, optional): kNN or threshold graph. Defaults to False.\n mc_selection (str, optional): selecting hard (vs. easy vs. both) examples. Defaults to \"hard\".\n\n Returns:\n list: New annotated data\n \"\"\"\n \n if mc_selection==\"hard\":\n selected = easy_idx.copy() + selected_indices.copy()\n elif mc_selection==\"hard_easy\":\n selected = selected_indices.copy()\n elif mc_selection==\"easy\":\n selected = hard_idx.copy() + selected_indices.copy()\n #selected_indices = easy_idx.copy() + selected_indices.copy()\n n = len(embeddings)\n print(\"2hop graph: \", two_hop)\n \n bar = tqdm(range(n),desc=f'voting')\n vote_stat = defaultdict(list)\n if not thres_graph:\n for i in range(n):\n cur_emb = embeddings[i].reshape(1, -1)\n cur_scores = np.sum(cosine_similarity(embeddings, cur_emb), axis=1)\n sorted_indices = np.argsort(cur_scores).tolist()[-k-1:-1]\n for idx in sorted_indices:\n if idx!=i:\n vote_stat[idx].append(i)\n bar.update(1)\n \n else:\n print(\"Threshold graph\")\n thresholds = []\n for i in range(n):\n cur_emb = embeddings[i].reshape(1, -1)\n cur_scores = np.sum(cosine_similarity(embeddings, cur_emb), axis=1)\n thres_idx = np.argsort(cur_scores).tolist()[-k-1]\n thresholds.append(cur_scores[thres_idx])\n thresholds.sort()\n mean_thres = statistics.median(thresholds) #sum(thresholds) / len(thresholds)\n\n for i in range(n):\n cur_emb = embeddings[i].reshape(1, -1)\n cur_scores = np.sum(cosine_similarity(embeddings, cur_emb), axis=1)\n sorted_indices = np.argsort(cur_scores).tolist()\n for idx in sorted_indices:\n if idx!=i and cur_scores[idx] > mean_thres: # and idx in hard_idx:\n vote_stat[idx].append(i)\n bar.update(1)\n\n if vote_file is not None:\n with open(vote_file,'w') as f:\n json.dump(vote_stat,f)\n\n votes = sorted(vote_stat.items(),key=lambda x:len(x[1]),reverse=True)\n new_selected_indices = []\n \n selected_times = defaultdict(int)\n egonet = defaultdict(list)\n\n #Create egonets\n for idx,candidates in votes:\n for idx_support in candidates:\n if (idx_support in hard_idx) and (idx_support not in egonet[idx]):\n egonet[idx].append(idx_support)\n selected_times[idx] += 1\n if two_hop:\n neigh_2hop = vote_stat[idx_support]\n for node in neigh_2hop:\n if (node in hard_idx) and (node != idx) and (node not in egonet[idx]):\n egonet[idx].append(node)\n selected_times[idx] += 1\n\n \n\n print(\"Distribution of Sets: \", selected_times)\n print(\"Weighted sum:\", weighted)\n\n egonet_greedy = sorted(egonet.items(),key=lambda x:len(x[1]),reverse=True)\n\n selected_weight = defaultdict(int)\n\n #print(\"Egonets:\", egonet_greedy)\n while len(new_selected_indices)<select_num:\n cur_scores = defaultdict(int)\n for idx,candidates in egonet_greedy:\n if idx in selected+new_selected_indices:\n cur_scores[idx] = -100 #sanity check\n continue\n for idx_support in candidates:\n if idx_support in hard_idx: #sanity check\n if weighted:\n cur_scores[idx] += 10 ** (-selected_weight[idx_support])\n else:\n cur_scores[idx] += 1\n\n cur_selected_idx = max(cur_scores.items(),key=lambda x:x[1])[0]\n new_selected_indices.append(int(cur_selected_idx))\n\n for idx_support in egonet[cur_selected_idx]:\n selected_weight[idx_support] += 1\n if (not weighted) and (idx_support in hard_idx):\n hard_idx.remove(idx_support)\n \n \n if len(hard_idx) == 0: #only true for weighted=False\n print(\"All hard examples covered, annotation size:\", len(new_selected_indices) )\n break\n\n return new_selected_indices" } ]
import random import os import json from utils import reliability_plot, embedding_plot from algorithms import cluster, fast_votek_mod, uncertainty_ranking, votek_mod from algorithms import density_max_coverage
6,899
def selective_annotation_single_phase(args,**kwargs): """ Single-step annotation methods: random, fast-votek, votek, hardest, adaicl-base Args: args Returns: list: selected data points for annotation """ random.seed(args.seed) init_size = args.init_size print("init: ", args.init) print("init size: ", args.init_size) ### Initial annotated pool $L_0$ (random, clustering, or none) if args.init == 'random': init_ind = random.sample(range(len(kwargs['train_examples'])),init_size) pool_idx = list(range(len(kwargs['embeddings']))) for i in init_ind: pool_idx.remove(i) #naive clustering -- assign cluster centroids on random points cur_examples = [kwargs["train_examples"][idx] for idx in init_ind]
def selective_annotation_single_phase(args,**kwargs): """ Single-step annotation methods: random, fast-votek, votek, hardest, adaicl-base Args: args Returns: list: selected data points for annotation """ random.seed(args.seed) init_size = args.init_size print("init: ", args.init) print("init size: ", args.init_size) ### Initial annotated pool $L_0$ (random, clustering, or none) if args.init == 'random': init_ind = random.sample(range(len(kwargs['train_examples'])),init_size) pool_idx = list(range(len(kwargs['embeddings']))) for i in init_ind: pool_idx.remove(i) #naive clustering -- assign cluster centroids on random points cur_examples = [kwargs["train_examples"][idx] for idx in init_ind]
_, clustering_model = cluster(embeddings=kwargs['embeddings'][init_ind],select_num=init_size, examples=cur_examples, thres=False, reverse=False)
2
2023-10-30 16:34:21+00:00
8k
endo-yuki-t/MAG
ldm/modules/diffusionmodules/openaimodel.py
[ { "identifier": "checkpoint", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)" }, { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "avg_pool_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "normalization", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None, mask1=None, mask2=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i], mask1=mask1, mask2=mask2)\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" } ]
from abc import abstractmethod from ldm.modules.diffusionmodules.util import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, normalization, timestep_embedding, ) from ldm.modules.attention import SpatialTransformer from ldm.util import exists from omegaconf.listconfig import ListConfig import math import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F
4,068
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None, att_mask=None, **kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = []
# dummy replace def convert_module_to_f16(x): pass def convert_module_to_f32(x): pass ## go class AttentionPool2d(nn.Module): """ Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py """ def __init__( self, spacial_dim: int, embed_dim: int, num_heads_channels: int, output_dim: int = None, ): super().__init__() self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) self.num_heads = embed_dim // num_heads_channels self.attention = QKVAttention(self.num_heads) def forward(self, x): b, c, *_spatial = x.shape x = x.reshape(b, c, -1) # NC(HW) x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) x = self.qkv_proj(x) x = self.attention(x) x = self.c_proj(x) return x[:, :, 0] class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb, context=None, att_mask=None): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context, mask2=att_mask) else: x = layer(x) return x class Upsample(nn.Module): """ An upsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then upsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) def forward(self, x): assert x.shape[1] == self.channels if self.dims == 3: x = F.interpolate( x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" ) else: x = F.interpolate(x, scale_factor=2, mode="nearest") if self.use_conv: x = self.conv(x) return x class TransposedUpsample(nn.Module): 'Learned 2x upsampling without padding' def __init__(self, channels, out_channels=None, ks=5): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) def forward(self,x): return self.up(x) class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) class ResBlock(TimestepBlock): """ A residual block that can optionally change the number of channels. :param channels: the number of input channels. :param emb_channels: the number of timestep embedding channels. :param dropout: the rate of dropout. :param out_channels: if specified, the number of out channels. :param use_conv: if True and out_channels is specified, use a spatial convolution instead of a smaller 1x1 convolution to change the channels in the skip connection. :param dims: determines if the signal is 1D, 2D, or 3D. :param use_checkpoint: if True, use gradient checkpointing on this module. :param up: if True, use this block for upsampling. :param down: if True, use this block for downsampling. """ def __init__( self, channels, emb_channels, dropout, out_channels=None, use_conv=False, use_scale_shift_norm=False, dims=2, use_checkpoint=False, up=False, down=False, ): super().__init__() self.channels = channels self.emb_channels = emb_channels self.dropout = dropout self.out_channels = out_channels or channels self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm self.in_layers = nn.Sequential( normalization(channels), nn.SiLU(), conv_nd(dims, channels, self.out_channels, 3, padding=1), ) self.updown = up or down if up: self.h_upd = Upsample(channels, False, dims) self.x_upd = Upsample(channels, False, dims) elif down: self.h_upd = Downsample(channels, False, dims) self.x_upd = Downsample(channels, False, dims) else: self.h_upd = self.x_upd = nn.Identity() self.emb_layers = nn.Sequential( nn.SiLU(), linear( emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels, ), ) self.out_layers = nn.Sequential( normalization(self.out_channels), nn.SiLU(), nn.Dropout(p=dropout), zero_module( conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) ), ) if self.out_channels == channels: self.skip_connection = nn.Identity() elif use_conv: self.skip_connection = conv_nd( dims, channels, self.out_channels, 3, padding=1 ) else: self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) def forward(self, x, emb): """ Apply the block to a Tensor, conditioned on a timestep embedding. :param x: an [N x C x ...] Tensor of features. :param emb: an [N x emb_channels] Tensor of timestep embeddings. :return: an [N x C x ...] Tensor of outputs. """ return checkpoint( self._forward, (x, emb), self.parameters(), self.use_checkpoint ) def _forward(self, x, emb): if self.updown: in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] h = in_rest(x) h = self.h_upd(h) x = self.x_upd(x) h = in_conv(h) else: h = self.in_layers(x) emb_out = self.emb_layers(emb).type(h.dtype) while len(emb_out.shape) < len(h.shape): emb_out = emb_out[..., None] if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] scale, shift = th.chunk(emb_out, 2, dim=1) h = out_norm(h) * (1 + scale) + shift h = out_rest(h) else: h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h class AttentionBlock(nn.Module): """ An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted to the N-d case. https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. """ def __init__( self, channels, num_heads=1, num_head_channels=-1, use_checkpoint=False, use_new_attention_order=False, ): super().__init__() self.channels = channels if num_head_channels == -1: self.num_heads = num_heads else: assert ( channels % num_head_channels == 0 ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" self.num_heads = channels // num_head_channels self.use_checkpoint = use_checkpoint self.norm = normalization(channels) self.qkv = conv_nd(1, channels, channels * 3, 1) if use_new_attention_order: # split qkv before split heads self.attention = QKVAttention(self.num_heads) else: # split heads before split qkv self.attention = QKVAttentionLegacy(self.num_heads) self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) def forward(self, x): return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! #return pt_checkpoint(self._forward, x) # pytorch def _forward(self, x): b, c, *spatial = x.shape x = x.reshape(b, c, -1) qkv = self.qkv(self.norm(x)) h = self.attention(qkv) h = self.proj_out(h) return (x + h).reshape(b, c, *spatial) def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. matmul_ops = 2 * b * (num_spatial ** 2) * c model.total_ops += th.DoubleTensor([matmul_ops]) class QKVAttentionLegacy(nn.Module): """ A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", q * scale, k * scale ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class QKVAttention(nn.Module): """ A module which performs QKV attention and splits in a different order. """ def __init__(self, n_heads): super().__init__() self.n_heads = n_heads def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", (q * scale).view(bs * self.n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length), ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length) @staticmethod def count_flops(model, _x, y): return count_flops_attn(model, _x, y) class UNetModel(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, out_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) if self.num_classes is not None: if isinstance(self.num_classes, int): self.label_emb = nn.Embedding(num_classes, time_embed_dim) elif self.num_classes == "continuous": print("setting up linear c_adm embedding layer") self.label_emb = nn.Linear(1, time_embed_dim) else: raise ValueError() self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( normalization(ch), nn.SiLU(), zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( normalization(ch), conv_nd(dims, model_channels, n_embed, 1), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None, att_mask=None, **kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
6
2023-10-27 06:56:37+00:00
8k
alexeichhorn/typegpt
tests/test_openai.py
[ { "identifier": "BaseLLMResponse", "path": "typegpt/base.py", "snippet": "class BaseLLMResponse(_InternalBaseLLMResponse, metaclass=LLMBaseMeta):\n if TYPE_CHECKING:\n # populated by the metaclass (ClassPlaceholder used to prevent showing up as type suggestion)\n __raw_completion__: str = ClassPlaceholder(init=False, value=\"\")\n\n def _set_raw_completion(self, completion: str):\n self.__raw_completion__ = completion\n\n # - Parsing\n\n _Self = TypeVar(\"_Self\", bound=\"BaseLLMResponse\") # backward compatibility for pre-Python 3.12\n\n @classmethod\n def parse_response(cls: type[_Self], response: str) -> _Self:\n return Parser(cls).parse(response)" }, { "identifier": "LLMArrayOutput", "path": "typegpt/fields.py", "snippet": "def LLMArrayOutput(\n expected_count: int | None | tuple[int | None, int | None],\n instruction: Callable[[ExamplePosition], str],\n multiline: bool = False,\n) -> Any:\n min_count, max_count = 0, None\n if isinstance(expected_count, tuple):\n min_count, max_count = expected_count\n min_count = min_count or 0\n elif expected_count is not None:\n min_count = expected_count\n max_count = expected_count\n\n return LLMArrayOutputInfo(instruction=instruction, min_count=min_count, max_count=max_count, multiline=multiline)" }, { "identifier": "LLMOutput", "path": "typegpt/fields.py", "snippet": "def LLMOutput(\n instruction: str,\n default: SupportedBaseTypes | None | _NoDefaultType = _NoDefault,\n # required: bool = True,\n multiline: bool = False,\n) -> Any:\n return LLMOutputInfo(instruction=instruction, default=default, required=(default is _NoDefault), multiline=multiline)" }, { "identifier": "PromptTemplate", "path": "typegpt/prompt_definition/prompt_template.py", "snippet": "class PromptTemplate(Protocol): # , Generic[_Output]):\n def system_prompt(self) -> str:\n ...\n\n def user_prompt(self) -> str:\n ...\n\n def reduce_if_possible(self) -> bool:\n \"\"\"\n Override this method to reduce the parameters of the prompt, which gets called when the token limit is exceeded\n @returns: whether the parameters could be further reduced\n \"\"\"\n return False\n\n Output: type[BaseLLMResponse] # type[_Output]\n\n def generate_messages(self, token_limit: int, token_counter: Callable[[list[EncodedMessage]], int]):\n \"\"\"\n Generates messages dictionary that can be sent to any OpenAI equivalent API, ensuring that the total number of tokens is below the specified limit\n Messages that do not fit in are removed inside the object permanently\n \"\"\"\n return MessageCollectionFactory(self, token_counter=token_counter).generate_messages(token_limit=token_limit)" }, { "identifier": "LLMTokenLimitExceeded", "path": "typegpt/exceptions.py", "snippet": "class LLMTokenLimitExceeded(LLMException):\n ..." }, { "identifier": "AsyncTypeAzureOpenAI", "path": "typegpt/openai/_async/client.py", "snippet": "class AsyncTypeAzureOpenAI(AsyncAzureOpenAI, AsyncTypeOpenAI):\n ..." }, { "identifier": "AsyncTypeOpenAI", "path": "typegpt/openai/_async/client.py", "snippet": "class AsyncTypeOpenAI(AsyncOpenAI):\n chat: AsyncTypeChat\n\n def __init__(\n self,\n *,\n api_key: str | None = None,\n organization: str | None = None,\n base_url: str | httpx.URL | None = None,\n timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,\n max_retries: int = DEFAULT_MAX_RETRIES,\n default_headers: Mapping[str, str] | None = None,\n default_query: Mapping[str, object] | None = None,\n # Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details.\n http_client: httpx.AsyncClient | None = None,\n # only needed to have same subclass capabilities (i.e. for Azure)\n _strict_response_validation: bool = False,\n ) -> None:\n super().__init__(\n api_key=api_key,\n organization=organization,\n base_url=base_url,\n timeout=timeout,\n max_retries=max_retries,\n default_headers=default_headers,\n default_query=default_query,\n http_client=http_client,\n )\n self.chat = AsyncTypeChat(self)" }, { "identifier": "TypeAzureOpenAI", "path": "typegpt/openai/_sync/client.py", "snippet": "class TypeAzureOpenAI(AzureOpenAI, TypeOpenAI):\n ..." }, { "identifier": "TypeOpenAI", "path": "typegpt/openai/_sync/client.py", "snippet": "class TypeOpenAI(OpenAI):\n chat: TypeChat\n\n def __init__(\n self,\n *,\n api_key: str | None = None,\n organization: str | None = None,\n base_url: str | httpx.URL | None = None,\n timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,\n max_retries: int = DEFAULT_MAX_RETRIES,\n default_headers: Mapping[str, str] | None = None,\n default_query: Mapping[str, object] | None = None,\n # Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details.\n http_client: httpx.Client | None = None,\n # only needed to have same subclass capabilities (i.e. for Azure)\n _strict_response_validation: bool = False,\n ) -> None:\n super().__init__(\n api_key=api_key,\n organization=organization,\n base_url=base_url,\n timeout=timeout,\n max_retries=max_retries,\n default_headers=default_headers,\n default_query=default_query,\n http_client=http_client,\n )\n self.chat = TypeChat(self)" }, { "identifier": "OpenAIChatModel", "path": "typegpt/openai/views.py", "snippet": "class AzureChatModel:\nclass AzureConfig:" } ]
import os import sys import pytest from typing import List, Optional, Union from unittest.mock import Mock from openai import AsyncOpenAI from openai.types.chat import ChatCompletion from openai.types.chat.chat_completion import Choice from openai.types.chat.chat_completion_message import ChatCompletionMessage from typegpt import BaseLLMResponse, LLMArrayOutput, LLMOutput, PromptTemplate from typegpt.exceptions import LLMTokenLimitExceeded from typegpt.openai import AsyncTypeAzureOpenAI, AsyncTypeOpenAI, OpenAIChatModel, TypeAzureOpenAI, TypeOpenAI
3,676
message=ChatCompletionMessage(role="assistant", content=content_res), ) ], ) mocker.patch("typegpt.openai._async.chat_completion.AsyncTypeChatCompletion.create", new=async_mock) @pytest.mark.asyncio async def test_mock_end_to_end_parse_retry(self, mock_openai_retry_completion): class FullExamplePrompt(PromptTemplate): def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" class Output(BaseLLMResponse): title: str items: list[str] = LLMArrayOutput((1, 2), instruction=lambda _: "Put the items here") count: int client = AsyncTypeOpenAI(api_key="mock") result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), max_output_tokens=100, retry_on_parse_error=5 ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "Some title n" assert result.items == ["abc"] assert result.count == 42 @pytest.mark.asyncio async def test_mock_reduce_prompt(self, mock_openai_completion): class NonAutomaticReducingPrompt(PromptTemplate): def __init__(self, number: int): self.lines = [f"This is line {i}" for i in range(number)] def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "My lines:\n\n" + "\n".join(self.lines) class Output(BaseLLMResponse): lines: list[str] non_reducing_prompt_100 = NonAutomaticReducingPrompt(100) client = AsyncTypeOpenAI(api_key="mock") result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=non_reducing_prompt_100, max_output_tokens=100, ) non_reducing_prompt_1000 = NonAutomaticReducingPrompt(1000) with pytest.raises(LLMTokenLimitExceeded): result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=non_reducing_prompt_1000, max_output_tokens=100, ) class ReducingTestPrompt(PromptTemplate): def __init__(self, number: int): self.lines = [f"This is line {i}" for i in range(number)] def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "My lines:\n\n" + "\n".join(self.lines) class Output(BaseLLMResponse): lines: list[str] def reduce_if_possible(self) -> bool: if len(self.lines) > 10: # remove last 10 lines self.lines = self.lines[:-10] return True return False reducing_prompt_100 = ReducingTestPrompt(100) result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=reducing_prompt_100, max_output_tokens=100, ) assert len(reducing_prompt_100.lines) == 100 reducing_prompt_1000 = ReducingTestPrompt(1000) result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=reducing_prompt_1000, max_output_tokens=100, ) # - def test_dynamic_output_type(self, mock_openai_completion_sync): class FullExamplePrompt(PromptTemplate): def __init__(self, name: str): self.name = name def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" @property def Output(self): class Output(BaseLLMResponse):
myPath = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, myPath + "/../") class TestOpenAIChatCompletion: def test_token_counter(self): test_messages = [ {"role": "system", "content": "This is a system message"}, {"role": "user", "content": "This is a user message 🧑🏾"}, ] # check if test covers all models (increase if new models are added) assert len(OpenAIChatModel.__args__) == 14 #  type: ignore client = AsyncTypeOpenAI(api_key="mock") assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-0301") == 29 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-0613") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-1106") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-16k") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-3.5-turbo-16k-0613") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-0314") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-0613") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-32k") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-32k-0314") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-32k-0613") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-1106-preview") == 27 assert client.chat.completions.num_tokens_from_messages(test_messages, model="gpt-4-vision-preview") == 27 # - @pytest.fixture def mock_openai_completion(self, mocker): async def async_mock(*args, **kwargs): return ChatCompletion( id="test", model="gpt-3.5-turbo", object="chat.completion", created=123, choices=[ Choice( finish_reason="stop", index=1, message=ChatCompletionMessage(role="assistant", content="TITLE: This is a test completion\nCOUNT: 09"), ) ], ) mocker.patch("typegpt.openai._async.chat_completion.AsyncTypeChatCompletion.create", new=async_mock) @pytest.mark.asyncio async def test_mock_end_to_end(self, mock_openai_completion): class FullExamplePrompt(PromptTemplate): def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" class Output(BaseLLMResponse): title: str count: int client = AsyncTypeOpenAI(api_key="mock") result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=FullExamplePrompt.Output, max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 result_base = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 # - class AlternativeOutput(BaseLLMResponse): count: int result_alt = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=AlternativeOutput, max_output_tokens=100, ) assert isinstance(result_alt, AlternativeOutput) assert result_alt.count == 9 assert not hasattr(result_alt, "title") @pytest.mark.asyncio async def test_mock_end_to_end_azure(Self, mock_openai_completion): class FullExamplePrompt(PromptTemplate): def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" class Output(BaseLLMResponse): title: str count: int client = AsyncTypeAzureOpenAI(api_key="mock", azure_endpoint="mock", api_version="mock") result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=FullExamplePrompt.Output, max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 result_base = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 # - class AlternativeOutput(BaseLLMResponse): count: int result_alt = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=AlternativeOutput, max_output_tokens=100, ) assert isinstance(result_alt, AlternativeOutput) assert result_alt.count == 9 assert not hasattr(result_alt, "title") @pytest.fixture def mock_openai_completion_sync(self, mocker): def sync_mock(*args, **kwargs): return ChatCompletion( id="test", model="gpt-3.5-turbo", object="chat.completion", created=123, choices=[ Choice( finish_reason="stop", index=1, message=ChatCompletionMessage(role="assistant", content="TITLE: This is a test completion\nCOUNT: 09"), ) ], ) mocker.patch("typegpt.openai._sync.chat_completion.TypeChatCompletion.create", new=sync_mock) def test_mock_end_to_end_sync(self, mock_openai_completion_sync): class FullExamplePrompt(PromptTemplate): def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" class Output(BaseLLMResponse): title: str count: int client = TypeOpenAI(api_key="mock") result = client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=FullExamplePrompt.Output, max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 result_base = client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 # - class AlternativeOutput(BaseLLMResponse): count: int result_alt = client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=AlternativeOutput, max_output_tokens=100, ) assert isinstance(result_alt, AlternativeOutput) assert result_alt.count == 9 assert not hasattr(result_alt, "title") def test_mock_end_to_end_sync_azure(self, mock_openai_completion_sync): class FullExamplePrompt(PromptTemplate): def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" class Output(BaseLLMResponse): title: str count: int client = TypeAzureOpenAI(api_key="mock", azure_endpoint="mock", api_version="mock") result = client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=FullExamplePrompt.Output, max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 result_base = client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), max_output_tokens=100, ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "This is a test completion" assert result.count == 9 # - class AlternativeOutput(BaseLLMResponse): count: int result_alt = client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), output_type=AlternativeOutput, max_output_tokens=100, ) assert isinstance(result_alt, AlternativeOutput) assert result_alt.count == 9 assert not hasattr(result_alt, "title") @pytest.fixture def mock_openai_retry_completion(self, mocker): call_count = 0 async def async_mock(*args, **kwargs): nonlocal call_count call_count += 1 if call_count == 6: content_res = "TITLE: Some title n\nCOUNT: 42\nITEM 1: abc" elif call_count == 5: content_res = "TITLE: Some title n\nCOUNT: 42" elif call_count == 4: content_res = "Random stuff" # no content elif call_count == 3: content_res = "TITLE: Some title\nCOUNT: 99999\nITEM 1: abc\nITEM 2: def\nITEM 3: ghi" # too many items elif call_count == 2: content_res = "TITLE: Some title\nCOUNT: random string\nITEM 1: abc" # wrong type else: content_res = "TITLE: Only title\nITEM 1: abc" return ChatCompletion( id="test", model="gpt-3.5-turbo", object="chat.completion", created=123, choices=[ Choice( finish_reason="stop", index=1, message=ChatCompletionMessage(role="assistant", content=content_res), ) ], ) mocker.patch("typegpt.openai._async.chat_completion.AsyncTypeChatCompletion.create", new=async_mock) @pytest.mark.asyncio async def test_mock_end_to_end_parse_retry(self, mock_openai_retry_completion): class FullExamplePrompt(PromptTemplate): def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" class Output(BaseLLMResponse): title: str items: list[str] = LLMArrayOutput((1, 2), instruction=lambda _: "Put the items here") count: int client = AsyncTypeOpenAI(api_key="mock") result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=FullExamplePrompt(), max_output_tokens=100, retry_on_parse_error=5 ) assert isinstance(result, FullExamplePrompt.Output) assert result.title == "Some title n" assert result.items == ["abc"] assert result.count == 42 @pytest.mark.asyncio async def test_mock_reduce_prompt(self, mock_openai_completion): class NonAutomaticReducingPrompt(PromptTemplate): def __init__(self, number: int): self.lines = [f"This is line {i}" for i in range(number)] def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "My lines:\n\n" + "\n".join(self.lines) class Output(BaseLLMResponse): lines: list[str] non_reducing_prompt_100 = NonAutomaticReducingPrompt(100) client = AsyncTypeOpenAI(api_key="mock") result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=non_reducing_prompt_100, max_output_tokens=100, ) non_reducing_prompt_1000 = NonAutomaticReducingPrompt(1000) with pytest.raises(LLMTokenLimitExceeded): result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=non_reducing_prompt_1000, max_output_tokens=100, ) class ReducingTestPrompt(PromptTemplate): def __init__(self, number: int): self.lines = [f"This is line {i}" for i in range(number)] def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "My lines:\n\n" + "\n".join(self.lines) class Output(BaseLLMResponse): lines: list[str] def reduce_if_possible(self) -> bool: if len(self.lines) > 10: # remove last 10 lines self.lines = self.lines[:-10] return True return False reducing_prompt_100 = ReducingTestPrompt(100) result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=reducing_prompt_100, max_output_tokens=100, ) assert len(reducing_prompt_100.lines) == 100 reducing_prompt_1000 = ReducingTestPrompt(1000) result = await client.chat.completions.generate_output( model="gpt-3.5-turbo", prompt=reducing_prompt_1000, max_output_tokens=100, ) # - def test_dynamic_output_type(self, mock_openai_completion_sync): class FullExamplePrompt(PromptTemplate): def __init__(self, name: str): self.name = name def system_prompt(self) -> str: return "This is a random system prompt" def user_prompt(self) -> str: return "This is a random user prompt" @property def Output(self): class Output(BaseLLMResponse):
title: str = LLMOutput(f"The title of {self.name}")
2
2023-10-25 22:17:27+00:00
8k
andriioreshk1118/python-storage-main
tests/unit/test_bucket.py
[ { "identifier": "DEFAULT_RETRY", "path": "google/cloud/storage/retry.py", "snippet": "DEFAULT_RETRY = retry.Retry(predicate=_should_retry)" }, { "identifier": "DEFAULT_RETRY_IF_ETAG_IN_JSON", "path": "google/cloud/storage/retry.py", "snippet": "DEFAULT_RETRY_IF_ETAG_IN_JSON = ConditionalRetryPolicy(\n DEFAULT_RETRY, is_etag_in_json, [\"data\"]\n)" }, { "identifier": "DEFAULT_RETRY_IF_GENERATION_SPECIFIED", "path": "google/cloud/storage/retry.py", "snippet": "DEFAULT_RETRY_IF_GENERATION_SPECIFIED = ConditionalRetryPolicy(\n DEFAULT_RETRY, is_generation_specified, [\"query_params\"]\n)" }, { "identifier": "DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED", "path": "google/cloud/storage/retry.py", "snippet": "DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED = ConditionalRetryPolicy(\n DEFAULT_RETRY, is_metageneration_specified, [\"query_params\"]\n)" }, { "identifier": "PUBLIC_ACCESS_PREVENTION_ENFORCED", "path": "google/cloud/storage/constants.py", "snippet": "PUBLIC_ACCESS_PREVENTION_ENFORCED = \"enforced\"" }, { "identifier": "PUBLIC_ACCESS_PREVENTION_INHERITED", "path": "google/cloud/storage/constants.py", "snippet": "PUBLIC_ACCESS_PREVENTION_INHERITED = \"inherited\"" }, { "identifier": "PUBLIC_ACCESS_PREVENTION_UNSPECIFIED", "path": "google/cloud/storage/constants.py", "snippet": "PUBLIC_ACCESS_PREVENTION_UNSPECIFIED = \"unspecified\"" }, { "identifier": "RPO_DEFAULT", "path": "google/cloud/storage/constants.py", "snippet": "RPO_DEFAULT = \"DEFAULT\"" }, { "identifier": "RPO_ASYNC_TURBO", "path": "google/cloud/storage/constants.py", "snippet": "RPO_ASYNC_TURBO = \"ASYNC_TURBO\"" } ]
import datetime import unittest import mock import pytest import google.auth.credentials import datetime import datetime import datetime import datetime import datetime import datetime import datetime import datetime import datetime import datetime import datetime import operator import operator import base64 import json from google.cloud.storage.retry import DEFAULT_RETRY from google.cloud.storage.retry import DEFAULT_RETRY_IF_ETAG_IN_JSON from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED from google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_ENFORCED from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_INHERITED from google.cloud.storage.constants import PUBLIC_ACCESS_PREVENTION_UNSPECIFIED from google.cloud.storage.constants import RPO_DEFAULT from google.cloud.storage.constants import RPO_ASYNC_TURBO from google.cloud.storage.bucket import _blobs_page_start from google.cloud.storage.bucket import _item_to_blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.bucket import LifecycleRuleConditions from google.cloud.storage.bucket import LifecycleRuleDelete from google.cloud.storage.bucket import LifecycleRuleSetStorageClass from google.cloud.storage.bucket import ( LifecycleRuleAbortIncompleteMultipartUpload, ) from google.cloud.storage.bucket import IAMConfiguration from google.cloud.storage.bucket import Bucket from google.cloud._helpers import UTC from google.cloud._helpers import UTC from google.cloud._helpers import UTC from google.cloud._helpers import UTC from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud.storage.bucket import Bucket from google.cloud.storage.constants import _DEFAULT_TIMEOUT from google.cloud.storage.client import Client from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.notification import BucketNotification from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT from google.cloud.storage.notification import ( BucketNotification, OBJECT_FINALIZE_EVENT_TYPE, OBJECT_DELETE_EVENT_TYPE, JSON_API_V1_PAYLOAD_FORMAT, ) from google.cloud.exceptions import NotFound from google.cloud.storage.acl import BucketACL from google.cloud.storage.acl import DefaultObjectACL from google.cloud.exceptions import NotFound from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import Blob from google.cloud.storage.blob import _get_encryption_headers from google.cloud.storage.bucket import _item_to_notification from google.cloud.storage.bucket import _item_to_notification from google.cloud.exceptions import NotFound from google.cloud.storage.notification import BucketNotification from google.cloud.storage.notification import _TOPIC_REF_FMT from google.cloud.storage.notification import JSON_API_V1_PAYLOAD_FORMAT from google.cloud.exceptions import NotFound from google.cloud.exceptions import NotFound from google.cloud.exceptions import NotFound from google.cloud.exceptions import NotFound from google.cloud.exceptions import NotFound from google.cloud.storage.blob import Blob from google.cloud.storage.acl import ObjectACL from google.cloud.storage import bucket as bucket_module from google.cloud.storage.bucket import IAMConfiguration from google.cloud._helpers import UTC from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud.storage.bucket import IAMConfiguration from google.cloud.storage.bucket import ( LifecycleRuleDelete, LifecycleRuleSetStorageClass, LifecycleRuleAbortIncompleteMultipartUpload, ) from google.cloud.storage.bucket import ( LifecycleRuleDelete, LifecycleRuleSetStorageClass, ) from google.cloud.storage.constants import REGION_LOCATION_TYPE from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud._helpers import UTC from google.cloud.storage import constants from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud._helpers import UTC from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS from google.cloud.storage.constants import STANDARD_STORAGE_CLASS from google.cloud.storage.constants import NEARLINE_STORAGE_CLASS from google.cloud.storage.constants import COLDLINE_STORAGE_CLASS from google.cloud.storage.constants import ARCHIVE_STORAGE_CLASS from google.cloud.storage.constants import MULTI_REGIONAL_LEGACY_STORAGE_CLASS from google.cloud.storage.constants import REGIONAL_LEGACY_STORAGE_CLASS from google.cloud.storage.constants import ( DURABLE_REDUCED_AVAILABILITY_LEGACY_STORAGE_CLASS, ) from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_EDITOR_ROLE from google.cloud.storage.iam import STORAGE_VIEWER_ROLE from google.api_core.iam import Policy from google.api_core.iam import Policy from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_EDITOR_ROLE from google.cloud.storage.iam import STORAGE_VIEWER_ROLE from google.api_core.iam import Policy from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_EDITOR_ROLE from google.cloud.storage.iam import STORAGE_VIEWER_ROLE from google.api_core.iam import Policy from google.cloud.storage.iam import STORAGE_OBJECTS_LIST from google.cloud.storage.iam import STORAGE_BUCKETS_GET from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE from google.cloud.storage.iam import STORAGE_OBJECTS_LIST from google.cloud.storage.iam import STORAGE_BUCKETS_GET from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE from google.cloud.storage.acl import _ACLEntity from google.cloud.storage.acl import _ACLEntity from google.cloud.storage.acl import _ACLEntity from google.cloud.storage.acl import _ACLEntity from google.cloud.storage.acl import _ACLEntity from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud._helpers import _datetime_to_rfc3339 from urllib import parse from google.cloud._helpers import UTC from google.cloud.storage._helpers import _bucket_bound_hostname_url from google.cloud.storage.blob import _API_ACCESS_ENDPOINT from google.cloud.storage.bucket import Bucket from google.cloud.storage.bucket import Bucket from google.cloud.storage.bucket import Bucket from google.cloud._helpers import UTC from google.cloud.storage.bucket import _item_to_notification from google.cloud.storage.notification import BucketNotification from google.cloud.storage.notification import _TOPIC_REF_FMT from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT
4,158
rule = self._make_one(age=10, matches_storage_class=["COLDLINE"]) expected = { "action": {"type": "Delete"}, "condition": {"age": 10, "matchesStorageClass": ["COLDLINE"]}, } self.assertEqual(dict(rule), expected) def test_from_api_repr(self): klass = self._get_target_class() conditions = { "age": 10, "createdBefore": "2018-08-01", "isLive": True, "matchesStorageClass": ["COLDLINE"], "numNewerVersions": 3, } resource = {"action": {"type": "Delete"}, "condition": conditions} rule = klass.from_api_repr(resource) self.assertEqual(dict(rule), resource) class Test_LifecycleRuleSetStorageClass(unittest.TestCase): @staticmethod def _get_target_class(): return LifecycleRuleSetStorageClass def _make_one(self, **kw): return self._get_target_class()(**kw) def test_ctor_wo_conditions(self): with self.assertRaises(ValueError): self._make_one(storage_class="COLDLINE") def test_ctor_w_condition(self): rule = self._make_one( storage_class="COLDLINE", age=10, matches_storage_class=["NEARLINE"] ) expected = { "action": {"type": "SetStorageClass", "storageClass": "COLDLINE"}, "condition": {"age": 10, "matchesStorageClass": ["NEARLINE"]}, } self.assertEqual(dict(rule), expected) def test_from_api_repr(self): klass = self._get_target_class() conditions = { "age": 10, "createdBefore": "2018-08-01", "isLive": True, "matchesStorageClass": ["NEARLINE"], "numNewerVersions": 3, } resource = { "action": {"type": "SetStorageClass", "storageClass": "COLDLINE"}, "condition": conditions, } rule = klass.from_api_repr(resource) self.assertEqual(dict(rule), resource) class Test_LifecycleRuleAbortIncompleteMultipartUpload(unittest.TestCase): @staticmethod def _get_target_class(): return LifecycleRuleAbortIncompleteMultipartUpload def _make_one(self, **kw): return self._get_target_class()(**kw) def test_ctor_wo_conditions(self): with self.assertRaises(ValueError): self._make_one() def test_ctor_w_condition(self): rule = self._make_one(age=10) expected = { "action": {"type": "AbortIncompleteMultipartUpload"}, "condition": {"age": 10}, } self.assertEqual(dict(rule), expected) def test_from_api_repr(self): klass = self._get_target_class() conditions = { "age": 10, } resource = { "action": {"type": "AbortIncompleteMultipartUpload"}, "condition": conditions, } rule = klass.from_api_repr(resource) self.assertEqual(dict(rule), resource) class Test_IAMConfiguration(unittest.TestCase): @staticmethod def _get_target_class(): return IAMConfiguration def _make_one(self, bucket, **kw): return self._get_target_class()(bucket, **kw) @staticmethod def _make_bucket(): return mock.create_autospec(Bucket, instance=True) def test_ctor_defaults(self): bucket = self._make_bucket() config = self._make_one(bucket) self.assertIs(config.bucket, bucket) self.assertFalse(config.uniform_bucket_level_access_enabled) self.assertIsNone(config.uniform_bucket_level_access_locked_time) # TODO: Remove unspecified after changeover is complete self.assertIn( config.public_access_prevention,
# Copyright 2014 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def _create_signing_credentials(): class _SigningCredentials( google.auth.credentials.Credentials, google.auth.credentials.Signing ): pass credentials = mock.Mock(spec=_SigningCredentials) return credentials class Test__blobs_page_start(unittest.TestCase): @staticmethod def _call_fut(iterator, page, response): return _blobs_page_start(iterator, page, response) def test_wo_any_prefixes(self): iterator = mock.Mock(spec=["prefixes"], prefixes=set()) page = mock.Mock(spec=["prefixes"]) response = {} self._call_fut(iterator, page, response) self.assertEqual(page.prefixes, ()) self.assertEqual(iterator.prefixes, set()) def test_w_prefixes(self): iterator_prefixes = set(["foo/", "qux/"]) iterator = mock.Mock(spec=["prefixes"], prefixes=iterator_prefixes) page = mock.Mock(spec=["prefixes"]) page_prefixes = ["foo/", "bar/", "baz/"] response = {"prefixes": page_prefixes} self._call_fut(iterator, page, response) self.assertEqual(page.prefixes, tuple(page_prefixes)) self.assertEqual(iterator.prefixes, iterator_prefixes.union(page_prefixes)) class Test__item_to_blob(unittest.TestCase): @staticmethod def _call_fut(iterator, item): return _item_to_blob(iterator, item) def test_wo_extra_properties(self): blob_name = "blob-name" bucket = mock.Mock(spec=[]) iterator = mock.Mock(spec=["bucket"], bucket=bucket) item = {"name": blob_name} blob = self._call_fut(iterator, item) self.assertIsInstance(blob, Blob) self.assertIs(blob.bucket, bucket) self.assertEqual(blob.name, blob_name) self.assertEqual(blob._properties, item) def test_w_extra_properties(self): blob_name = "blob-name" bucket = mock.Mock(spec=[]) iterator = mock.Mock(spec=["bucket"], bucket=bucket) item = { "name": blob_name, "generation": 123, "contentType": "text/plain", "contentLanguage": "en-US", } blob = self._call_fut(iterator, item) self.assertIsInstance(blob, Blob) self.assertIs(blob.bucket, bucket) self.assertEqual(blob.name, blob_name) self.assertEqual(blob._properties, item) class Test_LifecycleRuleConditions(unittest.TestCase): @staticmethod def _get_target_class(): return LifecycleRuleConditions def _make_one(self, **kw): return self._get_target_class()(**kw) def test_ctor_wo_conditions(self): with self.assertRaises(ValueError): self._make_one() def test_ctor_w_age_and_matches_storage_class(self): conditions = self._make_one(age=10, matches_storage_class=["COLDLINE"]) expected = {"age": 10, "matchesStorageClass": ["COLDLINE"]} self.assertEqual(dict(conditions), expected) self.assertEqual(conditions.age, 10) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertEqual(conditions.matches_storage_class, ["COLDLINE"]) self.assertIsNone(conditions.number_of_newer_versions) def test_ctor_w_created_before_and_is_live(self): before = datetime.date(2018, 8, 1) conditions = self._make_one(created_before=before, is_live=False) expected = {"createdBefore": "2018-08-01", "isLive": False} self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertEqual(conditions.created_before, before) self.assertEqual(conditions.is_live, False) self.assertIsNone(conditions.matches_storage_class) self.assertIsNone(conditions.number_of_newer_versions) self.assertIsNone(conditions.days_since_custom_time) self.assertIsNone(conditions.custom_time_before) self.assertIsNone(conditions.noncurrent_time_before) def test_ctor_w_number_of_newer_versions(self): conditions = self._make_one(number_of_newer_versions=3) expected = {"numNewerVersions": 3} self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertIsNone(conditions.matches_storage_class) self.assertEqual(conditions.number_of_newer_versions, 3) def test_ctor_w_days_since_custom_time(self): conditions = self._make_one( number_of_newer_versions=3, days_since_custom_time=2 ) expected = {"numNewerVersions": 3, "daysSinceCustomTime": 2} self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertIsNone(conditions.matches_storage_class) self.assertEqual(conditions.number_of_newer_versions, 3) self.assertEqual(conditions.days_since_custom_time, 2) def test_ctor_w_days_since_noncurrent_time(self): conditions = self._make_one( number_of_newer_versions=3, days_since_noncurrent_time=2 ) expected = {"numNewerVersions": 3, "daysSinceNoncurrentTime": 2} self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertIsNone(conditions.matches_storage_class) self.assertEqual(conditions.number_of_newer_versions, 3) self.assertEqual(conditions.days_since_noncurrent_time, 2) def test_ctor_w_custom_time_before(self): custom_time_before = datetime.date(2018, 8, 1) conditions = self._make_one( number_of_newer_versions=3, custom_time_before=custom_time_before ) expected = { "numNewerVersions": 3, "customTimeBefore": custom_time_before.isoformat(), } self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertIsNone(conditions.matches_storage_class) self.assertEqual(conditions.number_of_newer_versions, 3) self.assertEqual(conditions.custom_time_before, custom_time_before) def test_ctor_w_noncurrent_time_before(self): noncurrent_before = datetime.date(2018, 8, 1) conditions = self._make_one( number_of_newer_versions=3, noncurrent_time_before=noncurrent_before ) expected = { "numNewerVersions": 3, "noncurrentTimeBefore": noncurrent_before.isoformat(), } self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertIsNone(conditions.matches_storage_class) self.assertEqual(conditions.number_of_newer_versions, 3) self.assertEqual(conditions.noncurrent_time_before, noncurrent_before) def test_ctor_w_matches_prefix(self): conditions = self._make_one(matches_prefix=["test-prefix"]) expected = {"matchesPrefix": ["test-prefix"]} self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertIsNone(conditions.matches_storage_class) self.assertIsNone(conditions.matches_suffix) self.assertEqual(conditions.matches_prefix, ["test-prefix"]) def test_ctor_w_matches_suffix(self): conditions = self._make_one(matches_suffix=["test-suffix"]) expected = {"matchesSuffix": ["test-suffix"]} self.assertEqual(dict(conditions), expected) self.assertIsNone(conditions.age) self.assertIsNone(conditions.created_before) self.assertIsNone(conditions.is_live) self.assertIsNone(conditions.matches_storage_class) self.assertIsNone(conditions.matches_prefix) self.assertEqual(conditions.matches_suffix, ["test-suffix"]) def test_from_api_repr(self): custom_time_before = datetime.date(2018, 8, 1) noncurrent_before = datetime.date(2018, 8, 1) before = datetime.date(2018, 8, 1) klass = self._get_target_class() resource = { "age": 10, "createdBefore": "2018-08-01", "isLive": True, "matchesStorageClass": ["COLDLINE"], "numNewerVersions": 3, "daysSinceCustomTime": 2, "customTimeBefore": custom_time_before.isoformat(), "daysSinceNoncurrentTime": 2, "noncurrentTimeBefore": noncurrent_before.isoformat(), } conditions = klass.from_api_repr(resource) self.assertEqual(conditions.age, 10) self.assertEqual(conditions.created_before, before) self.assertEqual(conditions.is_live, True) self.assertEqual(conditions.matches_storage_class, ["COLDLINE"]) self.assertEqual(conditions.number_of_newer_versions, 3) self.assertEqual(conditions.days_since_custom_time, 2) self.assertEqual(conditions.custom_time_before, custom_time_before) self.assertEqual(conditions.days_since_noncurrent_time, 2) self.assertEqual(conditions.noncurrent_time_before, noncurrent_before) class Test_LifecycleRuleDelete(unittest.TestCase): @staticmethod def _get_target_class(): return LifecycleRuleDelete def _make_one(self, **kw): return self._get_target_class()(**kw) def test_ctor_wo_conditions(self): with self.assertRaises(ValueError): self._make_one() def test_ctor_w_condition(self): rule = self._make_one(age=10, matches_storage_class=["COLDLINE"]) expected = { "action": {"type": "Delete"}, "condition": {"age": 10, "matchesStorageClass": ["COLDLINE"]}, } self.assertEqual(dict(rule), expected) def test_from_api_repr(self): klass = self._get_target_class() conditions = { "age": 10, "createdBefore": "2018-08-01", "isLive": True, "matchesStorageClass": ["COLDLINE"], "numNewerVersions": 3, } resource = {"action": {"type": "Delete"}, "condition": conditions} rule = klass.from_api_repr(resource) self.assertEqual(dict(rule), resource) class Test_LifecycleRuleSetStorageClass(unittest.TestCase): @staticmethod def _get_target_class(): return LifecycleRuleSetStorageClass def _make_one(self, **kw): return self._get_target_class()(**kw) def test_ctor_wo_conditions(self): with self.assertRaises(ValueError): self._make_one(storage_class="COLDLINE") def test_ctor_w_condition(self): rule = self._make_one( storage_class="COLDLINE", age=10, matches_storage_class=["NEARLINE"] ) expected = { "action": {"type": "SetStorageClass", "storageClass": "COLDLINE"}, "condition": {"age": 10, "matchesStorageClass": ["NEARLINE"]}, } self.assertEqual(dict(rule), expected) def test_from_api_repr(self): klass = self._get_target_class() conditions = { "age": 10, "createdBefore": "2018-08-01", "isLive": True, "matchesStorageClass": ["NEARLINE"], "numNewerVersions": 3, } resource = { "action": {"type": "SetStorageClass", "storageClass": "COLDLINE"}, "condition": conditions, } rule = klass.from_api_repr(resource) self.assertEqual(dict(rule), resource) class Test_LifecycleRuleAbortIncompleteMultipartUpload(unittest.TestCase): @staticmethod def _get_target_class(): return LifecycleRuleAbortIncompleteMultipartUpload def _make_one(self, **kw): return self._get_target_class()(**kw) def test_ctor_wo_conditions(self): with self.assertRaises(ValueError): self._make_one() def test_ctor_w_condition(self): rule = self._make_one(age=10) expected = { "action": {"type": "AbortIncompleteMultipartUpload"}, "condition": {"age": 10}, } self.assertEqual(dict(rule), expected) def test_from_api_repr(self): klass = self._get_target_class() conditions = { "age": 10, } resource = { "action": {"type": "AbortIncompleteMultipartUpload"}, "condition": conditions, } rule = klass.from_api_repr(resource) self.assertEqual(dict(rule), resource) class Test_IAMConfiguration(unittest.TestCase): @staticmethod def _get_target_class(): return IAMConfiguration def _make_one(self, bucket, **kw): return self._get_target_class()(bucket, **kw) @staticmethod def _make_bucket(): return mock.create_autospec(Bucket, instance=True) def test_ctor_defaults(self): bucket = self._make_bucket() config = self._make_one(bucket) self.assertIs(config.bucket, bucket) self.assertFalse(config.uniform_bucket_level_access_enabled) self.assertIsNone(config.uniform_bucket_level_access_locked_time) # TODO: Remove unspecified after changeover is complete self.assertIn( config.public_access_prevention,
[PUBLIC_ACCESS_PREVENTION_UNSPECIFIED, PUBLIC_ACCESS_PREVENTION_INHERITED],
6
2023-10-31 10:36:21+00:00
8k
worldbank/blackmarblepy
src/blackmarble/raster.py
[ { "identifier": "BlackMarbleDownloader", "path": "src/blackmarble/download.py", "snippet": "class BlackMarbleDownloader(BaseModel):\n \"\"\"A downloader to retrieve `NASA Black Marble <https://blackmarble.gsfc.nasa.gov>`_ data.\n\n Attributes\n ----------\n bearer: str\n NASA EarthData bearer token\n\n directory: Path\n Local directory to which download\n \"\"\"\n\n bearer: str\n directory: Path\n\n TILES: ClassVar[geopandas.GeoDataFrame] = geopandas.read_file(\n files(\"blackmarble.data\").joinpath(\"blackmarbletiles.geojson\")\n )\n URL: ClassVar[str] = \"https://ladsweb.modaps.eosdis.nasa.gov\"\n\n def __init__(self, bearer: str, directory: Path):\n nest_asyncio.apply()\n super().__init__(bearer=bearer, directory=directory)\n\n async def get_manifest(\n self,\n gdf: geopandas.GeoDataFrame,\n product_id: Product,\n date_range: datetime.date | List[datetime.date],\n ) -> pd.DataFrame:\n \"\"\"Retrieve NASA Black Marble data manifest. i.d., download links.\n\n Parameters\n ----------\n product_id: Product\n NASA Black Marble product suite (VNP46) identifier\n\n date_range: datetime.date | List[datetime.date]\n Date range for which to retrieve NASA Black Marble data manifest\n\n Returns\n -------\n pandas.DataFrame\n NASA Black Marble data manifest (i.e., downloads links)\n \"\"\"\n if isinstance(date_range, datetime.date):\n date_range = [date_range]\n if isinstance(product_id, str):\n product_id = Product(product_id)\n\n # Create bounding box\n gdf = pd.concat([gdf, gdf.bounds], axis=\"columns\").round(2)\n gdf[\"bbox\"] = gdf.round(2).apply(\n lambda row: f\"x{row.minx}y{row.miny},x{row.maxx}y{row.maxy}\", axis=1\n )\n\n async with httpx.AsyncClient(verify=False) as client:\n tasks = []\n for chunk in chunks(date_range, 250):\n for _, row in gdf.iterrows():\n url = f\"{self.URL}/api/v1/files\"\n params = {\n \"product\": product_id.value,\n \"collection\": \"5000\",\n \"dateRanges\": f\"{min(chunk)}..{max(chunk)}\",\n \"areaOfInterest\": row[\"bbox\"],\n }\n tasks.append(asyncio.ensure_future(get_url(client, url, params)))\n\n responses = [\n await f\n for f in tqdm(\n asyncio.as_completed(tasks),\n total=len(tasks),\n desc=\"GETTING MANIFEST...\",\n )\n ]\n\n rs = []\n for r in responses:\n try:\n rs.append(pd.DataFrame(r.json()).T)\n except json.decoder.JSONDecodeError:\n continue\n\n return pd.concat(rs)\n\n @backoff.on_exception(\n backoff.expo,\n HTTPError,\n )\n def _download_file(\n self,\n name: str,\n ):\n \"\"\"Download NASA Black Marble file\n\n Parameters\n ----------\n names: str\n NASA Black Marble filename\n\n Returns\n -------\n filename: pathlib.Path\n Filename of downloaded data file\n \"\"\"\n url = f\"{self.URL}{name}\"\n name = name.split(\"/\")[-1]\n\n with open(filename := Path(self.directory, name), \"wb+\") as f:\n with httpx.stream(\n \"GET\",\n url,\n headers={\"Authorization\": f\"Bearer {self.bearer}\"},\n ) as response:\n total = int(response.headers[\"Content-Length\"])\n with tqdm(\n total=total,\n unit=\"B\",\n unit_scale=True,\n leave=None,\n ) as pbar:\n pbar.set_description(f\"Retrieving {name}...\")\n for chunk in response.iter_raw():\n f.write(chunk)\n pbar.update(len(chunk))\n\n return filename\n\n def download(\n self,\n gdf: geopandas.GeoDataFrame,\n product_id: Product,\n date_range: List[datetime.date],\n skip_if_exists: bool = True,\n ):\n \"\"\"Download (in parallel) from NASA Black Marble archive\n\n Parameters\n ----------\n gdf: geopandas.GeoDataFrame\n Region of Interest\n\n product: Product\n Nasa Black Marble Product Id (e.g, VNP46A1)\n\n date_range: List[datetime.date]\n Date range for which to download NASA Black Marble data.\n\n skip_if_exists: bool, default=True\n Whether to skip downloading data if file already exists\n \"\"\"\n gdf = geopandas.overlay(\n gdf.to_crs(\"EPSG:4326\").dissolve(), self.TILES, how=\"intersection\"\n )\n\n bm_files_df = asyncio.run(self.get_manifest(gdf, product_id, date_range))\n bm_files_df = bm_files_df[\n bm_files_df[\"name\"].str.contains(\"|\".join(gdf[\"TileID\"]))\n ]\n names = bm_files_df[\"fileURL\"].tolist()\n\n args = [(name,) for name in names]\n return pqdm(\n args,\n self._download_file,\n n_jobs=16,\n argument_type=\"args\",\n desc=\"Downloading...\",\n )" }, { "identifier": "Product", "path": "src/blackmarble/types.py", "snippet": "class Product(Enum):\n \"\"\"NASA Black Marble product suite (VNP46)\"\"\"\n\n VNP46A1 = \"VNP46A1\"\n VNP46A2 = \"VNP46A2\"\n VNP46A3 = \"VNP46A3\"\n VNP46A4 = \"VNP46A4\"" } ]
import datetime import re import tempfile import geopandas import h5py import numpy as np import pandas as pd import rasterio import rioxarray import xarray as xr from pathlib import Path from typing import List, Optional from pydantic import ConfigDict, validate_call from rasterio.transform import from_origin from rioxarray.merge import merge_arrays from shapely.geometry import mapping from tqdm.auto import tqdm from .download import BlackMarbleDownloader from .types import Product
3,957
height, width = da.shape return from_origin( left, top, (right - left) / width, (top - bottom) / height, ) def _pivot_paths_by_date(paths: List[Path]): """Return dictionary of paths by date Returns ------- dict """ results = {} for p in paths: key = datetime.datetime.strptime(p.stem.split(".")[1], "A%Y%j").date() if key not in results: results[key] = [] results[key].append(p) return results @validate_call(config=ConfigDict(arbitrary_types_allowed=True)) def bm_raster( gdf: geopandas.GeoDataFrame, product_id: Product, date_range: datetime.date | List[datetime.date], bearer: str, variable: Optional[str] = None, quality_flag_rm: List[int] = [255], check_all_tiles_exist: bool = True, file_directory: Optional[Path] = None, file_prefix: Optional[str] = None, file_skip_if_exists: bool = True, ): """Create a stack of nighttime lights rasters by retrieiving from `NASA Black Marble <https://blackmarble.gsfc.nasa.gov>`_ data. Parameters ---------- roi: geopandas.GeoDataFrame Region of interest product_id: Product NASA Black Marble product suite (VNP46) identifier. The available products are shown in following list: - ``VNP46A1``: Daily (raw) - ``VNP46A2``: Daily (corrected) - ``VNP46A3``: Monthly - ``VNP46A4``: Annual date_range: datetime.date | List[datetime.date] Date range (single date or list of dates) for which to retrieve NASA Black Marble data. bearer: str NASA Earthdata Bearer token. Please refer to the `documentation <https://worldbank.github.io/blackmarblepy/examples/blackmarblepy.html#nasa-earthdata-bearer-token>`_. variable: str, default = None Variable to create GeoTIFF raster. Further information, pleae see the `NASA Black Marble User Guide <https://ladsweb.modaps.eosdis.nasa.gov/api/v2/content/archives/Document%20Archive/Science%20Data%20Product%20Documentation/VIIRS_Black_Marble_UG_v1.2_April_2021.pdf>`_ for `VNP46A1`, see Table 3; for `VNP46A2` see Table 6; for `VNP46A3` and `VNP46A4`, see Table 9. By default, it uses the following default variables: - For ``VNP46A1``, uses ``DNB_At_Sensor_Radiance_500m`` - For ``VNP46A2``, uses ``Gap_Filled_DNB_BRDF-Corrected_NTL`` - For ``VNP46A3``, uses ``NearNadir_Composite_Snow_Free``. - For ``VNP46A4``, uses ``NearNadir_Composite_Snow_Free``. quality_flag: List[int], default = [255] Quality flag values to use to set values to ``NA``. Each pixel has a quality flag value, where low quality values can be removed. Values are set to ``NA`` for each value in ther ``quality_flag_rm`` vector. For ``VNP46A1`` and ``VNP46A2`` (daily data): - ``0``: High-quality, Persistent nighttime lights - ``1``: High-quality, Ephemeral nighttime Lights - ``2``: Poor-quality, Outlier, potential cloud contamination, or other issues - ``255``: No retrieval, Fill value (masked out on ingestion) For ``VNP46A3`` and ``VNP46A4`` (monthly and annual data): - ``0``: Good-quality, The number of observations used for the composite is larger than 3 - ``1``: Poor-quality, The number of observations used for the composite is less than or equal to 3 - ``2``: Gap filled NTL based on historical data - ``255``: Fill value check_all_tiles_exist: bool, default=True Check whether all Black Marble nighttime light tiles exist for the region of interest. Sometimes not all tiles are available, so the full region of interest may not be covered. By default (True), it skips cases where not all tiles are available. file_directory: pathlib.Path, optional Where to produce output. By default, the output will be procuded onto a temporary directory. file_prefix: str, optional Prefix file_skip_if_exists: bool, default=True Whether to skip downloading or extracting data if the data file for that date already exists. Returns ------- xarray.Dataset A Xarray dataset contaning a stack of nighttime lights rasters """ # Validate and fix args if not isinstance(quality_flag_rm, list): quality_flag_rm = [quality_flag_rm] if not isinstance(date_range, list): date_range = [date_range] if variable is None: variable = VARIABLE_DEFAULT.get(product_id) match product_id: case Product.VNP46A3: date_range = sorted(set([d.replace(day=1) for d in date_range])) case Product.VNP46A4: date_range = sorted(set([d.replace(day=1, month=1) for d in date_range])) # Download and construct Dataset with file_directory if file_directory else tempfile.TemporaryDirectory() as d:
VARIABLE_DEFAULT = { Product.VNP46A1: "DNB_At_Sensor_Radiance_500m", Product.VNP46A2: "Gap_Filled_DNB_BRDF-Corrected_NTL", Product.VNP46A3: "NearNadir_Composite_Snow_Free", Product.VNP46A4: "NearNadir_Composite_Snow_Free", } def h5_to_geotiff( f: Path, /, variable: str = None, quality_flag_rm=[255], output_directory: Path = None, output_prefix: str = None, ): """ Convert HDF5 file to GeoTIFF for a selected (or default) variable from NASA Black Marble data Parameters ---------- f: Path H5DF filename variable: str, default = None Variable to create GeoTIFF raster. Further information, pleae see the `NASA Black Marble User Guide <https://ladsweb.modaps.eosdis.nasa.gov/api/v2/content/archives/Document%20Archive/Science%20Data%20Product%20Documentation/VIIRS_Black_Marble_UG_v1.2_April_2021.pdf>`_ for `VNP46A1`, see Table 3; for `VNP46A2` see Table 6; for `VNP46A3` and `VNP46A4`, see Table 9. By default, it uses the following default variables: - For ``VNP46A1``, uses ``DNB_At_Sensor_Radiance_500m`` - For ``VNP46A2``, uses ``Gap_Filled_DNB_BRDF-Corrected_NTL`` - For ``VNP46A3``, uses ``NearNadir_Composite_Snow_Free``. - For ``VNP46A4``, uses ``NearNadir_Composite_Snow_Free``. Returns ------ output_path: Path Path to which export GeoTIFF file """ output_path = Path(output_directory, f.name).with_suffix(".tif") product_id = Product(f.stem.split(".")[0]) if variable is None: variable = VARIABLE_DEFAULT.get(product_id) with h5py.File(f, "r") as h5_data: attrs = h5_data.attrs if product_id in [Product.VNP46A1, Product.VNP46A2]: dataset = h5_data["HDFEOS"]["GRIDS"]["VNP_Grid_DNB"]["Data Fields"][ variable ] left, bottom, right, top = ( attrs.get("WestBoundingCoord"), attrs.get("SouthBoundingCoord"), attrs.get("EastBoundingCoord"), attrs.get("NorthBoundingCoord"), ) qf = h5_data["HDFEOS"]["GRIDS"]["VNP_Grid_DNB"]["Data Fields"][ "Mandatory_Quality_Flag" ] else: dataset = h5_data["HDFEOS"]["GRIDS"]["VIIRS_Grid_DNB_2d"]["Data Fields"][ variable ] lat = h5_data["HDFEOS"]["GRIDS"]["VIIRS_Grid_DNB_2d"]["Data Fields"]["lat"] lon = h5_data["HDFEOS"]["GRIDS"]["VIIRS_Grid_DNB_2d"]["Data Fields"]["lon"] left, bottom, right, top = min(lon), min(lat), max(lon), max(lat) if len(quality_flag_rm) > 0: variable_short = variable variable_short = re.sub("_Num", "", variable_short) variable_short = re.sub("_Std", "", variable_short) h5_names = list( h5_data["HDFEOS"]["GRIDS"]["VIIRS_Grid_DNB_2d"][ "Data Fields" ].keys() ) if (qf_name := f"{variable_short}_Quality") in h5_names: qf = h5_data["HDFEOS"]["GRIDS"]["VIIRS_Grid_DNB_2d"]["Data Fields"][ qf_name ] if variable in h5_names: qf = h5_data["HDFEOS"]["GRIDS"]["VIIRS_Grid_DNB_2d"]["Data Fields"][ variable ] # Extract data and attributes scale_factor = dataset.attrs.get("scale_factor", 1) offset = dataset.attrs.get("offset", 0) data = scale_factor * dataset[:] + offset qf = qf[:] for val in quality_flag_rm: data = np.where(qf == val, np.nan, data) # Get geospatial metadata (coordinates and attributes) height, width = data.shape transform = from_origin( left, top, (right - left) / width, (top - bottom) / height, ) with rasterio.open( output_path, "w", driver="GTiff", height=height, width=width, count=1, dtype=data.dtype, crs="EPSG:4326", transform=transform, ) as dst: dst.write(data, 1) dst.update_tags(**attrs) return output_path def transform(da: xr.DataArray): """Return Affice transformation""" left, bottom, right, top = ( da["x"].min(), da["y"].min(), da["x"].max(), da["y"].max(), ) height, width = da.shape return from_origin( left, top, (right - left) / width, (top - bottom) / height, ) def _pivot_paths_by_date(paths: List[Path]): """Return dictionary of paths by date Returns ------- dict """ results = {} for p in paths: key = datetime.datetime.strptime(p.stem.split(".")[1], "A%Y%j").date() if key not in results: results[key] = [] results[key].append(p) return results @validate_call(config=ConfigDict(arbitrary_types_allowed=True)) def bm_raster( gdf: geopandas.GeoDataFrame, product_id: Product, date_range: datetime.date | List[datetime.date], bearer: str, variable: Optional[str] = None, quality_flag_rm: List[int] = [255], check_all_tiles_exist: bool = True, file_directory: Optional[Path] = None, file_prefix: Optional[str] = None, file_skip_if_exists: bool = True, ): """Create a stack of nighttime lights rasters by retrieiving from `NASA Black Marble <https://blackmarble.gsfc.nasa.gov>`_ data. Parameters ---------- roi: geopandas.GeoDataFrame Region of interest product_id: Product NASA Black Marble product suite (VNP46) identifier. The available products are shown in following list: - ``VNP46A1``: Daily (raw) - ``VNP46A2``: Daily (corrected) - ``VNP46A3``: Monthly - ``VNP46A4``: Annual date_range: datetime.date | List[datetime.date] Date range (single date or list of dates) for which to retrieve NASA Black Marble data. bearer: str NASA Earthdata Bearer token. Please refer to the `documentation <https://worldbank.github.io/blackmarblepy/examples/blackmarblepy.html#nasa-earthdata-bearer-token>`_. variable: str, default = None Variable to create GeoTIFF raster. Further information, pleae see the `NASA Black Marble User Guide <https://ladsweb.modaps.eosdis.nasa.gov/api/v2/content/archives/Document%20Archive/Science%20Data%20Product%20Documentation/VIIRS_Black_Marble_UG_v1.2_April_2021.pdf>`_ for `VNP46A1`, see Table 3; for `VNP46A2` see Table 6; for `VNP46A3` and `VNP46A4`, see Table 9. By default, it uses the following default variables: - For ``VNP46A1``, uses ``DNB_At_Sensor_Radiance_500m`` - For ``VNP46A2``, uses ``Gap_Filled_DNB_BRDF-Corrected_NTL`` - For ``VNP46A3``, uses ``NearNadir_Composite_Snow_Free``. - For ``VNP46A4``, uses ``NearNadir_Composite_Snow_Free``. quality_flag: List[int], default = [255] Quality flag values to use to set values to ``NA``. Each pixel has a quality flag value, where low quality values can be removed. Values are set to ``NA`` for each value in ther ``quality_flag_rm`` vector. For ``VNP46A1`` and ``VNP46A2`` (daily data): - ``0``: High-quality, Persistent nighttime lights - ``1``: High-quality, Ephemeral nighttime Lights - ``2``: Poor-quality, Outlier, potential cloud contamination, or other issues - ``255``: No retrieval, Fill value (masked out on ingestion) For ``VNP46A3`` and ``VNP46A4`` (monthly and annual data): - ``0``: Good-quality, The number of observations used for the composite is larger than 3 - ``1``: Poor-quality, The number of observations used for the composite is less than or equal to 3 - ``2``: Gap filled NTL based on historical data - ``255``: Fill value check_all_tiles_exist: bool, default=True Check whether all Black Marble nighttime light tiles exist for the region of interest. Sometimes not all tiles are available, so the full region of interest may not be covered. By default (True), it skips cases where not all tiles are available. file_directory: pathlib.Path, optional Where to produce output. By default, the output will be procuded onto a temporary directory. file_prefix: str, optional Prefix file_skip_if_exists: bool, default=True Whether to skip downloading or extracting data if the data file for that date already exists. Returns ------- xarray.Dataset A Xarray dataset contaning a stack of nighttime lights rasters """ # Validate and fix args if not isinstance(quality_flag_rm, list): quality_flag_rm = [quality_flag_rm] if not isinstance(date_range, list): date_range = [date_range] if variable is None: variable = VARIABLE_DEFAULT.get(product_id) match product_id: case Product.VNP46A3: date_range = sorted(set([d.replace(day=1) for d in date_range])) case Product.VNP46A4: date_range = sorted(set([d.replace(day=1, month=1) for d in date_range])) # Download and construct Dataset with file_directory if file_directory else tempfile.TemporaryDirectory() as d:
downloader = BlackMarbleDownloader(bearer, d)
0
2023-10-26 09:17:26+00:00
8k
TopGuru777/badsecrets
tests/all_modules_test.py
[ { "identifier": "check_all_modules", "path": "badsecrets/base.py", "snippet": "def check_all_modules(*args, **kwargs):\n for m in BadsecretsBase.__subclasses__():\n x = m(custom_resource=kwargs.get(\"custom_resource\", None))\n r = x.check_secret(*args[0 : x.check_secret_args])\n if r:\n r[\"detecting_module\"] = m.__name__\n r[\"description\"] = x.get_description()\n\n # allow the module to provide an amended product, if needed\n if \"product\" not in r.keys():\n r[\"product\"] = args[0]\n r[\"location\"] = \"manual\"\n return r\n return None" }, { "identifier": "carve_all_modules", "path": "badsecrets/base.py", "snippet": "def carve_all_modules(**kwargs):\n results = []\n for m in BadsecretsBase.__subclasses__():\n x = m(custom_resource=kwargs.get(\"custom_resource\", None))\n r_list = x.carve(**kwargs)\n if len(r_list) > 0:\n for r in r_list:\n r[\"detecting_module\"] = m.__name__\n results.append(r)\n if results:\n return results" } ]
import requests import requests_mock from badsecrets.base import check_all_modules, carve_all_modules
7,123
tests = [ "yJrdyJV6tkmHLII2uDq1Sl509UeDg9xGI4u3tb6dm9BQS4wD08KTkyXKST4PeQs00giqSA==", "eyJoZWxsbyI6IndvcmxkIn0.XDtqeQ.1qsBdjyRJLokwRzJdzXMVCSyRTA", "vpwClvnLODIx9te2vO%2F4e06KzbKkjtwmNnMx09D1Dmau0dPliYzgpqB9MnEqhPNe3fWemQyH25eLULJi8KiYHXeHvjfS1TZAL2o5Gku1gJbLuqusRXZQYTNlU2Aq4twXO0o0CgVUTfknU89iw0ceyaKjSteOhxGvaE3VEDfiKDd8%2B9j9vD3qso0mLMqn%2Btxirc%2FkIq5oBbzOCgMrJjkaPMa2SJpc5QI2amffBJ%2BsAN25VH%2BwabEJXrjRy%2B8NlYCoUQQKrI%2BEzRSdBsiMOxQTD4vz2TCjSKrK5JEeFMTyE7J39MhXFG38Bq%2FZMDO%2FETHHdsBtTTkqzJ2odVArcOzrce3Kt2%2FqgTUPW%2BCjFtkSNmh%2FzlB9BhbxB1kJt1NkNsjywvP9j7PvNoOBJsa8OwpEyrPTT3Gm%2BfhDwtjvwpvN7l7oIfbcERGExAFrAMENOOt4WGlYhF%2F8c9NcDv0Bv3YJrJoGq0rRurXSh9kcwum9nB%2FGWcjPikqTDm6p3Z48hEnQCVuJNkwJwIKEsYxJqCL95IEdX3PzR81zf36uXPlEa3YdeAgM1RD8YGlwlIXnrLhvMbRvQW0W9eoPzE%2FjP68JGUIZc1TwTQusIWjnuVubFTEUMDLfDNk12tMwM9mfnwT8lWFTMjv9pF70W5OtO7gVN%2BOmCxqAuQmScRVExNds%2FF%2FPli4oxRKfgI7FhAaC%2Fu1DopZ6vvBdUq1pBQE66fQ9SnxRTmIClCpULUhNO90ULTpUi9ga2UtBCTzI8z6Sb6qyQ52NopNZMFdrn9orzdP8oqFeyYpF%2BQEtbp%2F5AMENkFkWUxHZn8NoSlO8P6G6ubSyDdY4QJPaFS4FxNhhm85WlZC9xfEZ1AGSSBOu9JJVYiKxXnL1yYLqrlWp5mfBHZeUBwEa%2FMjGxZEVYDhXo4PiU0jxN7fYmjaobp3DSgA5H3BcFuNG5d8CUnOlQcEie5b%2BUHOpI9zAk7qcuEUXbaZ5Mvh0t2jXCRALRKYDyBdbHlWAFo10dTIM6L3aSTM5uEz9%2FalXLXoWlMo7dTDpuO5bBfTq7YkoPExL3g3JJX47UhuLq85i3%2Bzxfvd7r%2Fmid69kbD3PnX%2Bj0QxaiShhyOZg6jl1HMeRRXvZap3FPCIfxbCf7j2TRqB5gYefBIIdGYjrdiL6HS8SbjXcROMwh2Fxnt505X4jmkmDcGmneU3z%2B84TSSFewcSpxGEGvHVkkU4OaT6vyFwsxCmdrR187tQZ7gn3ZkAiTps%2FfOPcL5QWXja06Z%2FHT3zboq6Hj9v9NBHzpC1eAK0YN8r4V2UMI3P0%2FsIPQYXhovoeLjJwq6snKZTX37ulE1mbS1uOY%2BZrvFYbLN5DdNL%2B%2Bl%2F%2BcWIpc0RSYBLo19xHpKeoeLjU2sxaYzK%2B92D4zKANdPPvsHPqJD1Y%2FBwCL%2FfZKaJfRK9Bj09ez1Z1ixTEKjIRCwuxijnJGq33faZchbwpMPpTfv43jEriGwXwoqOo9Mbj9ggPAil7O81XZxNT4vv4RoxXTN93V100rt3ClXauL%2BlNID%2BseN2CEZZqnygpTDf2an%2FVsmJGJJcc0goW3l43mhx2U79zeuT94cFPGpvITEbMtjmuNsUbOBuw6nqm5rAs%2FxjIsDRqfQxGQWfS0kuwuU6RRmiME2Ps0NrBENIbZzcbgw6%2BRIwClWkvEG%2BK%2FPdcAdfmRkAPWUNadxnhjeU2jNnzI1yYNIOhziUBPxgFEcAT45E7rWvf8ghT08HZvphzytPmD%2FxuvJaDdRgb6a30TjSpa7i%2BEHkIMxM5eH1kiwhN6xkTcBsJ87epGdFRWKhTGKYwCbaYid1nRs7%2BvQEU7MRYghok8KMTueELipohm3otuKo8V4a7w4TgTSBvPE%2BLPLJRwhM8KcjGlcpzF1NowRo6zeJJhbdPpouUH2NJzDcp7P4uUuUB9Cxt9B986My6zDnz1eyBvRMzj7TABfmfPFPoY3RfzBUzDm%2FA9lOGsM6d9WZj2CH0WxqiLDGmP1Ts9DWX%2FsYyqEGK5R1Xpnp7kRIarPtYliecp50ZIH6nqSkoCBllMCCE6JN%2BdoXobTpulALdmQV0%2Bppv%2FAjzIJrTHgX7jwRGEAeRgAxTomtemmIaH5NtV7xt8XS%2BqwghdJl1D06%2FWhpMtJ1%2FoQGoJ0%2F7ChYyefyAfsiQNWsO66UNVyl71RVPwATnbRO5K5mtxn0M2wuXXpAARNh6pQTcVX%2FTJ4jmosyKwhI6I870NEOsSaWlKVyOdb97C3Bt0pvzq8BagV5FMsNtJKmqIIM0HRkMkalIyfow9iS%2B5xGN5eKM8NE4E6hO4CvmpG%2BH2xFHTSNzloV0FjLdDmj5UfMjhUuEb3rkKK1bGAVaaherp6Ai6N4YJQzh%2FDdpo6al95EZN2OYolzxitgDgsWVGhMvddyQTwnRqRY04hdVJTwdhi4TiCPbLJ1Wcty2ozy6VDs4w77EOAQ5JnxUmDVPA3vXmADJZR0hIJEsuxXfYg%2BRIdV4fzGunV4%2B9jpiyM9G11iiesURK82o%2BdcG7FaCkkun2K2bvD6qGcL61uhoxNeLVpAxjrRjaEBrXsexZ9rExpMlFD8e3NM%2B0K0LQJvdEvpWYS5UTG9cAbNAzBs%3DpDsPXFGf2lEMcyGaK1ouARHUfqU0fzkeVwjXU9ORI%2Fs%3D", "qAAAAAQDAgEBAAAAvAIAAAAAAAAsAAAABABTaGRyAk4AdQg4AC4AMQAwABRhZGwcBykRPNQv++kTK0KePPqVVGgAAAAFAFNkYXRhXHicHYc7DkBQAATnIUqVa3jxLRzApxJBrxA18bmdw1l2k9nZG/Bcxxjt4/An3NnYOVlZOMRL7ld0NAQ9IzUTMy0DeUpMqkYkso+ZGFNiKbRW//Pyb0Guzwtozw4Q", ".eJxVjLsOAiEURP-F2hAuL8HSfr-BAPciq4ZNlt3K-O9KsoU2U8w5My8W4r7VsHdaw4zswoCdfrsU84PaAHiP7bbwvLRtnRMfCj9o59OC9Lwe7t9Bjb2OtbMkAEGQtQjekykmJy9JZIW-6CgUaCGsA6eSyV65s1Qya_xGKZrY-wPVYjdw:1ojOrE:bfOktjgLlUykwCIRIpvaTZRQMM3-UypscEN57ECtXis", "dUEvRldLekFNcklGZ3ZSbU1XaHJ0ZGxsLzhYTHlNTW43T3BVN05kZXE3WUhQOVVKbVA3Rm5WaSs5eG5QQ1VIRVBzeDFNTnNpZ0xCM1FKbzFZTEJISzhaNzFmVGYzME0waDFURVpCYm5TQlJFRmRFclYzNUZhR3VuN29PMmlkVHBrRi8wb3AwZWgvWmxObkFOYnpkeHR1YWpWZ3lnN0Y4ZW9xSk9LNVlQd0U4MmFsbWtLZUI5VzkzRkM4YXBFWXBWLS15L00xME1nVFp2ZTlmUWcxZVlpelpnPT0=--7efe7919a5210cfd1ac4c6228e3ff82c0600d841", "eyJhbGciOiJIUzI1NiJ9.eyJJc3N1ZXIiOiJJc3N1ZXIiLCJVc2VybmFtZSI6IkJhZFNlY3JldHMiLCJleHAiOjE1OTMxMzM0ODMsImlhdCI6MTQ2NjkwMzA4M30.ovqRikAo_0kKJ0GVrAwQlezymxrLGjcEiW_s3UJMMCo", "owOnMokk%2F4N7IMo6gznRP56OYIT34dZ1Bh0KBbXlFgztgiNNEBYrgWRYDBkDlX8BIFYBcBztC3NMwoT%2FtNF%2Ff2nCsA37ORIgfBem1foENqumZvmcTpQuoiXXbMWW8oDjs270y6LDAmHhCRsl4Itox4NSBwDgMIOsoMhNrMigV7o7jlgU16L3ezISSmVqFektKmu9qATIXme63u4IKk9UL%2BGP%2Fk3NPv9MsTEVH1wMEf4MApH5KfWBX96TRIc9nlp3IE5BEWNMvI1Gd%2BWXbY5cSY%2Buey2mXQ%2BAFuXAernruJDm%2BxK8ZZ09TNsn5UREutvNtFRrePA8tz3r7p14yG756E0vrU7uBz5TQlTPNUeN3shdxlMK5Qzw1EqxRZmjhaRpMN0YZgmjIpzFgrTnT0%2Bo0f6keaL8Z9TY8vJN8%2BEUPoq%2F7AJiHKm1C8GNc3woVzs5mJKZxMUP398HwGTDv9KSwwkSpHeXFsZofbaWyG0WuNldHNzM%2FgyWMsnGxY6S086%2F477xEQkWdWG5UE%2FowesockebyTTEn3%2B%2FqiVy%2FIOxXvMpvrLel5nVY%2FSouHp5n2URRyRsfo%2B%2BOXJZo7yxKQoYBSSkmxdehJqKJmbgxNp5Ew8m89xAS5g99Hzzg382%2BxFp8yoDVZMOiTEuw0J%2B4G6KizqRW9cis%2FELd0aDE1V7TUuJnFrX%2BlCLOiv100tKpeJ0ePMOYrmvSn0wx7JhswNuj%2BgdKqvCnMSLakGWiOHxu5m9Qqdm3s5sk7nsaxMkh8IqV%2BSzB9A2K1kYEUlY40II1Wun67OSdLlYfdCFQk4ED0N%2BV4kES%2F1xpGiaPhxjboFiiV%2BkvCyJfkuotYuN%2B42CqFyAyepXPA%2BR5jVSThT6OIN2n1UahUnrD%2BwKKGMA9QpVPTSiGLen2KSnJtXISbrl2%2BA2AnQNH%2BMEwYVNjseM0%2BAosbgVfNde2ukMyugo%2FRfrRM27cbdVlE0ms0uXhlgKAYJ2ZN54w1tPWhpGxvZtB0keWpZan0YPh8CBgzsAIMa04HMYLCtgUTqxKqANoKXSy7VIJUzg3fl%2F2WUELjpXK9gRcgexNWDNB1E0rHd9PUo0PvpB4fxSrRpb1LRryipqsuoJ8mrpOVrVMvjracBvtoykK3GrN%2FDUlXkSG%2FAeBQN7HwDJ9QPi3AtEOohp78Op3nmbItXo7IJUSjzBNzUYR8YPj6Ud7Fje9LZSwMBngvgx%2BOKy6HsV4ofOAU2%2FK1%2BfxI0KkCeoSso9NJHWgBD7ijfXUa1Hrc%2FuNU3mTlSSVp3VStQrJbQCkr4paaHYWeeO4pRZCDSBNUzs9qq3TDePwpEQc4QROrw5htdniRk26lFIFm%2Fzk2nC77Pg%2BrkRC1W%2BlRv0lyXsmXVBCe8F1szpWXHCxHNAJwKH%2FBb%2BV1k6AXFXVWPW5vADbXUvRu0s6KLaqu6a0KCB7dt3K2Ni%2FI6O%2FmISYXzknbMrwwakNfajbRF2ibodgR9R9xvoCoCXa3ka7%2Fejr%2BmsZ2HvPKUAffd2fNIWCQrejfpuIoOWiYx6ufN8E41HetCbYfvsI6JQfPOEdOYWI2px%2BLdfO3Nybq99%2BRSQOhjNZakBP54ozlCUfwgpLOmTBwsswZexv1RK5MIi8%2FWtjlJ%2FKjkYxdkFUlwggGS2xDwzcyl2%2FakNCQ5YmxjU8cRY7jZQRMo%2F8uTw5qa2MNZPaQGI18uRgr0i%2FTX3t57fJYCpMLXSaUKIdO7O%2FCQhIyGTS6KrPN%2B3%2FgUb%2BPQ1viGhpnWfGEYF9vhIlK57z8G8G82UQ3DpttD7M8mQ0KsmCOq75ECx9CWrWGk51vADlm%2BLEZ5oWjVMs%2FThki40B7tL7gzFrBuQksWXYeubMzZfFo4ZQ49di4wupHG5kRsyL2fJUzgpaLDP%2BSe6%2FjCnc52C7lZ3Ls0cHJVf9HRwDNXWM%2B4h8donNy5637QWK%2BV7mlH%2FL4xBZCfU9l6sIz%2FWHMtRaQprEem6a%2FRwPRDBiP65I2EwZLKGY8I%2F1uXJncwC8egLu82JY9maweI0VmJSmRcTf0evxqqe7vc9MqpsUlpSVNh4bFnxVIo5E4PGX70kVaTFe0vu1YdGKmFX5PLvkmWIf%2FnwfgPMqYsa0%2F09trboJ5LGDEQRXSBb7ldG%2FwLdOiqocYKAb91SMpn1fXVPBgkPM27QZxHnSAmWVbJR2%2FIhO%2BIVNzkgFAJlptiEPPPTxuBh%2BTT7CaIQE3oZbbJeQKvRkrt4bawTCOzciU%2F1zFGxubTJTSyInjQ8%2F1tVo7KjnxPKqGSfwZQN%2FeWL6R%2FpvCb%2BE6D4pdyczoJRUWsSNXNnA7QrdjgGNWhyOMiKvkDf3RD4mrXbul18WYVTsLyp0hvQsbdwBWOh7VlwfrWdy%2BklsttFi%2B%2BadKR7DbwjLTcxvdNpTx1WJhXROR8jwW26VEYSXPVqWnYvfyZo4DojKHMSDMbAakbuSJdkGP1d5w0AYbKlAcVQOqp9hbAvfwwLy4ErdIsOg0YEeCcnQVRAXwaCI9JvWWmM%2FzYJzE3X45A6lU9Pe7TAbft810MYh7lmV6Keb5HI6qXFiD%2B8khBZqi%2FsK6485k0a86aWLxOb4Eqnoc41x%2BYPv5CWfvP6cebsENo%3D%2BIUg0f64C4y77N4FZ6C82m5wMpvDQIHqx0ZFIHLhwMg%3D", "8H61sylBH/Ad3thZCGDVLyaso2g499GnjAuqpNapesoJgoo5Zk3nxDqXoWfRDwzmKk6eDLTyWViTRTdnr8Su7+XzW6MMAcZo+Fa7UwdfE4pKJ2+z6OYK58l+/93LHZmgVUF5dqI3G8mLr3uI", "H4sIAAAAAAAAAAG4BEf7SqmRq5Y9DfCIR9QLZ9wfMXuwWMtbz4CYqd0%2FCCMNXbRgEOJmkCbpKBJXQ%2BAz78OO%2FufCpa1k1nqcEgNxRzRnKKNVBBPMov%2FE%2BXFqh%2Bb5KZLhJvXicwGSIuVshN1XYpSRzKrosUB0ykN8j9hA90IA5AulHsXIofHj07FlFC%2BTbQqVZ7jKeHDurUkVhf8WQ1up%2BVO9KZwQU6WZzsF5y6AkidThF411avCLTxGAtIC7uZBnzMLL4duUf7YtdIDHt4UWGsXCI7ItciWv4Dzk9w5bKeWRRLp1W1pbniEQY01lTulTZBYPuLtna6pB0I3EJ5bV4c3Gktdd1YAVQcBQ2Yy5TW92YEclM99vW9mwu6xD8ZRYJNIb622TjjFMvmR4u4sNh%2BdgL5MlagVpvQjIxUmP7TzelScfku0PrKnKve2zzG6m8czF2WgbQcSLk%2B6TJAijmezo0byTzBsc0FbiI16jm7OBn%2Bi4xCBJQ0AHtu%2Bj2kUE3SUp3wnwgvCR9EnQIw%2F8p2PIp1h6FG6QOIKamihDeY9r5RCW7yLds5vwmUgT9mPTfN%2B%2Fjpzp4U4axfZv5yrVyMSpsuDEhj0H0CjYQMssn%2BsXMYOJGLqv%2FF0SrGrtcAGYv12%2B17PybzbqrXGe8xYR%2B9wHaKX3CD5Ak3IE0CiILhEIZrDICPTifm8%2FygUDztVZmHwpM6HBpF2inkGbaX6Fa8BOrMJaEqZWAualYYBth37jWyqCKV01TWFfHtS7y7kvkWOPwYYORzx9IKO5yyFrftg4hCH7f5vtHsMoyP8CcWPh9c82O70CIlscfLURWeoAyXv1FYtgC6pBLVlgdHEjMzjKvK7DRtJliNPl0VGazg5jTAYHtuwdc23jIjwBfG0MXpPjkw%2BVR179clfwK4t1VfJTJF8F02EXZXaZzCA7cH%2B%2B3bQaXOpvZBTFGdD9JnwRp2vEhy8%2BWMXhd7C%2BcmliOvraOoK%2Fksa9PNarTZJTTJuZupvYwBWhx%2F2vVDEdCM81Z7bFgb0wGd9ViHIOz0MH8v%2FIgn6qd2ojjnkJ29MfSfhtRi%2BXAvmgFXoIhlIBXBwapozxsKcDXOc5JRWpK%2F7y4naW7Fuogp1oU1fHXOXnQh8FAsjgyqn3J0acyY7FDKtkAjxDTMThh1GrA4dLvvLjPx%2FKUMeCQSZ1Y01X%2BNVRbxXBLGLkDbcBHNmkTTaxbsctSBBMSyOYQfG5W9%2Bhw9D2AFSWwFAuz%2BCDvsPSze0CYDoG9lbuYnW2wseNiKYItaSQhUbnq3SGVcjy1JouogVK63TDGTwE8Cy3UoNrAz%2FzV7AaoVjytyuMBqOTYBS%2BSLif1R2qqeut0ID%2BCudcjrKJvcP1J8rHV%2F5h2lRNj7tW0wVQS4XtqpnPy90BhF%2BgcfCy7FtRJbH8i5HAl5FY1OpZQ68ig12imShpNI%2FgHuO2q3n5%2FVUFia7fwHqkkuZBRZHreEvEyPlUpgwJhpCBS3F8b1ViO2G5zsTNF9TR%2BzW8UJVG2lhMdcvZw92dg%2F74tndJ8LzhVrQrG5au9yu6fUExO5MNz6izVMFzOxG6FqxUcm8otgf6qqSBi23jrMceNzAT8LcREGoVvjmj8uINrJbJt9ZfXb%2BaIYsMGsc2uAQAAA%3D%3D", "https://localhost/_fragment?_path=_controller%3Dsystem%26command%3Did%26return_value%3Dnull&_hash=Xnsvx/yLVQaimEd1CfepgH0rEXr422JnRSn/uaCE3gs=", "s%3A8FnPwdeM9kdGTZlWvdaVtQ0S1BCOhY5G.qys7H2oGSLLdRsEq7sqh7btOohHsaRKqyjV4LiVnBvc", "eyJpdiI6IlhlNTZ2UjZUQWZKVHdIcG9nZFkwcGc9PSIsInZhbHVlIjoiRlUvY2grU1F1b01lSXdveXJ0T3N1WGJqeVVmZlNRQjNVOWxiSzljL1Z3RDhqYUdDbjZxMU9oSThWRzExT0YvUmthVzVKRE9kL0RvTEw1cFRhQkphOGw4S2loV1ZrMkkwTHd4am9sZkJQd2VCZ3R0VlFSeFo3ay9wTlBMb3lLSG8iLCJtYWMiOiJkMmU3M2ExNDc2NTc5YjAwMGMwMTdkYTQ1NThkMjRkNTY2YTE4OTg2MzY5MzE5NGZmOTM4YWVjOGZmMWU4NTk2IiwidGFnIjoiIn0%3D", ] negative_tests = [ "AAAAAAAA", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkJhZFNpZ25hdHVyZSIsImlhdCI6MTUxNjIzOTAyMn0.S_8lg9Pzezv8JhXT3cppPZcz046cFM8H1o1GJYYAAAA", "AAAA℗", ] def test_check_all(): # Confirm each of the examples produced a positive result for test in tests:
tests = [ "yJrdyJV6tkmHLII2uDq1Sl509UeDg9xGI4u3tb6dm9BQS4wD08KTkyXKST4PeQs00giqSA==", "eyJoZWxsbyI6IndvcmxkIn0.XDtqeQ.1qsBdjyRJLokwRzJdzXMVCSyRTA", "vpwClvnLODIx9te2vO%2F4e06KzbKkjtwmNnMx09D1Dmau0dPliYzgpqB9MnEqhPNe3fWemQyH25eLULJi8KiYHXeHvjfS1TZAL2o5Gku1gJbLuqusRXZQYTNlU2Aq4twXO0o0CgVUTfknU89iw0ceyaKjSteOhxGvaE3VEDfiKDd8%2B9j9vD3qso0mLMqn%2Btxirc%2FkIq5oBbzOCgMrJjkaPMa2SJpc5QI2amffBJ%2BsAN25VH%2BwabEJXrjRy%2B8NlYCoUQQKrI%2BEzRSdBsiMOxQTD4vz2TCjSKrK5JEeFMTyE7J39MhXFG38Bq%2FZMDO%2FETHHdsBtTTkqzJ2odVArcOzrce3Kt2%2FqgTUPW%2BCjFtkSNmh%2FzlB9BhbxB1kJt1NkNsjywvP9j7PvNoOBJsa8OwpEyrPTT3Gm%2BfhDwtjvwpvN7l7oIfbcERGExAFrAMENOOt4WGlYhF%2F8c9NcDv0Bv3YJrJoGq0rRurXSh9kcwum9nB%2FGWcjPikqTDm6p3Z48hEnQCVuJNkwJwIKEsYxJqCL95IEdX3PzR81zf36uXPlEa3YdeAgM1RD8YGlwlIXnrLhvMbRvQW0W9eoPzE%2FjP68JGUIZc1TwTQusIWjnuVubFTEUMDLfDNk12tMwM9mfnwT8lWFTMjv9pF70W5OtO7gVN%2BOmCxqAuQmScRVExNds%2FF%2FPli4oxRKfgI7FhAaC%2Fu1DopZ6vvBdUq1pBQE66fQ9SnxRTmIClCpULUhNO90ULTpUi9ga2UtBCTzI8z6Sb6qyQ52NopNZMFdrn9orzdP8oqFeyYpF%2BQEtbp%2F5AMENkFkWUxHZn8NoSlO8P6G6ubSyDdY4QJPaFS4FxNhhm85WlZC9xfEZ1AGSSBOu9JJVYiKxXnL1yYLqrlWp5mfBHZeUBwEa%2FMjGxZEVYDhXo4PiU0jxN7fYmjaobp3DSgA5H3BcFuNG5d8CUnOlQcEie5b%2BUHOpI9zAk7qcuEUXbaZ5Mvh0t2jXCRALRKYDyBdbHlWAFo10dTIM6L3aSTM5uEz9%2FalXLXoWlMo7dTDpuO5bBfTq7YkoPExL3g3JJX47UhuLq85i3%2Bzxfvd7r%2Fmid69kbD3PnX%2Bj0QxaiShhyOZg6jl1HMeRRXvZap3FPCIfxbCf7j2TRqB5gYefBIIdGYjrdiL6HS8SbjXcROMwh2Fxnt505X4jmkmDcGmneU3z%2B84TSSFewcSpxGEGvHVkkU4OaT6vyFwsxCmdrR187tQZ7gn3ZkAiTps%2FfOPcL5QWXja06Z%2FHT3zboq6Hj9v9NBHzpC1eAK0YN8r4V2UMI3P0%2FsIPQYXhovoeLjJwq6snKZTX37ulE1mbS1uOY%2BZrvFYbLN5DdNL%2B%2Bl%2F%2BcWIpc0RSYBLo19xHpKeoeLjU2sxaYzK%2B92D4zKANdPPvsHPqJD1Y%2FBwCL%2FfZKaJfRK9Bj09ez1Z1ixTEKjIRCwuxijnJGq33faZchbwpMPpTfv43jEriGwXwoqOo9Mbj9ggPAil7O81XZxNT4vv4RoxXTN93V100rt3ClXauL%2BlNID%2BseN2CEZZqnygpTDf2an%2FVsmJGJJcc0goW3l43mhx2U79zeuT94cFPGpvITEbMtjmuNsUbOBuw6nqm5rAs%2FxjIsDRqfQxGQWfS0kuwuU6RRmiME2Ps0NrBENIbZzcbgw6%2BRIwClWkvEG%2BK%2FPdcAdfmRkAPWUNadxnhjeU2jNnzI1yYNIOhziUBPxgFEcAT45E7rWvf8ghT08HZvphzytPmD%2FxuvJaDdRgb6a30TjSpa7i%2BEHkIMxM5eH1kiwhN6xkTcBsJ87epGdFRWKhTGKYwCbaYid1nRs7%2BvQEU7MRYghok8KMTueELipohm3otuKo8V4a7w4TgTSBvPE%2BLPLJRwhM8KcjGlcpzF1NowRo6zeJJhbdPpouUH2NJzDcp7P4uUuUB9Cxt9B986My6zDnz1eyBvRMzj7TABfmfPFPoY3RfzBUzDm%2FA9lOGsM6d9WZj2CH0WxqiLDGmP1Ts9DWX%2FsYyqEGK5R1Xpnp7kRIarPtYliecp50ZIH6nqSkoCBllMCCE6JN%2BdoXobTpulALdmQV0%2Bppv%2FAjzIJrTHgX7jwRGEAeRgAxTomtemmIaH5NtV7xt8XS%2BqwghdJl1D06%2FWhpMtJ1%2FoQGoJ0%2F7ChYyefyAfsiQNWsO66UNVyl71RVPwATnbRO5K5mtxn0M2wuXXpAARNh6pQTcVX%2FTJ4jmosyKwhI6I870NEOsSaWlKVyOdb97C3Bt0pvzq8BagV5FMsNtJKmqIIM0HRkMkalIyfow9iS%2B5xGN5eKM8NE4E6hO4CvmpG%2BH2xFHTSNzloV0FjLdDmj5UfMjhUuEb3rkKK1bGAVaaherp6Ai6N4YJQzh%2FDdpo6al95EZN2OYolzxitgDgsWVGhMvddyQTwnRqRY04hdVJTwdhi4TiCPbLJ1Wcty2ozy6VDs4w77EOAQ5JnxUmDVPA3vXmADJZR0hIJEsuxXfYg%2BRIdV4fzGunV4%2B9jpiyM9G11iiesURK82o%2BdcG7FaCkkun2K2bvD6qGcL61uhoxNeLVpAxjrRjaEBrXsexZ9rExpMlFD8e3NM%2B0K0LQJvdEvpWYS5UTG9cAbNAzBs%3DpDsPXFGf2lEMcyGaK1ouARHUfqU0fzkeVwjXU9ORI%2Fs%3D", "qAAAAAQDAgEBAAAAvAIAAAAAAAAsAAAABABTaGRyAk4AdQg4AC4AMQAwABRhZGwcBykRPNQv++kTK0KePPqVVGgAAAAFAFNkYXRhXHicHYc7DkBQAATnIUqVa3jxLRzApxJBrxA18bmdw1l2k9nZG/Bcxxjt4/An3NnYOVlZOMRL7ld0NAQ9IzUTMy0DeUpMqkYkso+ZGFNiKbRW//Pyb0Guzwtozw4Q", ".eJxVjLsOAiEURP-F2hAuL8HSfr-BAPciq4ZNlt3K-O9KsoU2U8w5My8W4r7VsHdaw4zswoCdfrsU84PaAHiP7bbwvLRtnRMfCj9o59OC9Lwe7t9Bjb2OtbMkAEGQtQjekykmJy9JZIW-6CgUaCGsA6eSyV65s1Qya_xGKZrY-wPVYjdw:1ojOrE:bfOktjgLlUykwCIRIpvaTZRQMM3-UypscEN57ECtXis", "dUEvRldLekFNcklGZ3ZSbU1XaHJ0ZGxsLzhYTHlNTW43T3BVN05kZXE3WUhQOVVKbVA3Rm5WaSs5eG5QQ1VIRVBzeDFNTnNpZ0xCM1FKbzFZTEJISzhaNzFmVGYzME0waDFURVpCYm5TQlJFRmRFclYzNUZhR3VuN29PMmlkVHBrRi8wb3AwZWgvWmxObkFOYnpkeHR1YWpWZ3lnN0Y4ZW9xSk9LNVlQd0U4MmFsbWtLZUI5VzkzRkM4YXBFWXBWLS15L00xME1nVFp2ZTlmUWcxZVlpelpnPT0=--7efe7919a5210cfd1ac4c6228e3ff82c0600d841", "eyJhbGciOiJIUzI1NiJ9.eyJJc3N1ZXIiOiJJc3N1ZXIiLCJVc2VybmFtZSI6IkJhZFNlY3JldHMiLCJleHAiOjE1OTMxMzM0ODMsImlhdCI6MTQ2NjkwMzA4M30.ovqRikAo_0kKJ0GVrAwQlezymxrLGjcEiW_s3UJMMCo", "owOnMokk%2F4N7IMo6gznRP56OYIT34dZ1Bh0KBbXlFgztgiNNEBYrgWRYDBkDlX8BIFYBcBztC3NMwoT%2FtNF%2Ff2nCsA37ORIgfBem1foENqumZvmcTpQuoiXXbMWW8oDjs270y6LDAmHhCRsl4Itox4NSBwDgMIOsoMhNrMigV7o7jlgU16L3ezISSmVqFektKmu9qATIXme63u4IKk9UL%2BGP%2Fk3NPv9MsTEVH1wMEf4MApH5KfWBX96TRIc9nlp3IE5BEWNMvI1Gd%2BWXbY5cSY%2Buey2mXQ%2BAFuXAernruJDm%2BxK8ZZ09TNsn5UREutvNtFRrePA8tz3r7p14yG756E0vrU7uBz5TQlTPNUeN3shdxlMK5Qzw1EqxRZmjhaRpMN0YZgmjIpzFgrTnT0%2Bo0f6keaL8Z9TY8vJN8%2BEUPoq%2F7AJiHKm1C8GNc3woVzs5mJKZxMUP398HwGTDv9KSwwkSpHeXFsZofbaWyG0WuNldHNzM%2FgyWMsnGxY6S086%2F477xEQkWdWG5UE%2FowesockebyTTEn3%2B%2FqiVy%2FIOxXvMpvrLel5nVY%2FSouHp5n2URRyRsfo%2B%2BOXJZo7yxKQoYBSSkmxdehJqKJmbgxNp5Ew8m89xAS5g99Hzzg382%2BxFp8yoDVZMOiTEuw0J%2B4G6KizqRW9cis%2FELd0aDE1V7TUuJnFrX%2BlCLOiv100tKpeJ0ePMOYrmvSn0wx7JhswNuj%2BgdKqvCnMSLakGWiOHxu5m9Qqdm3s5sk7nsaxMkh8IqV%2BSzB9A2K1kYEUlY40II1Wun67OSdLlYfdCFQk4ED0N%2BV4kES%2F1xpGiaPhxjboFiiV%2BkvCyJfkuotYuN%2B42CqFyAyepXPA%2BR5jVSThT6OIN2n1UahUnrD%2BwKKGMA9QpVPTSiGLen2KSnJtXISbrl2%2BA2AnQNH%2BMEwYVNjseM0%2BAosbgVfNde2ukMyugo%2FRfrRM27cbdVlE0ms0uXhlgKAYJ2ZN54w1tPWhpGxvZtB0keWpZan0YPh8CBgzsAIMa04HMYLCtgUTqxKqANoKXSy7VIJUzg3fl%2F2WUELjpXK9gRcgexNWDNB1E0rHd9PUo0PvpB4fxSrRpb1LRryipqsuoJ8mrpOVrVMvjracBvtoykK3GrN%2FDUlXkSG%2FAeBQN7HwDJ9QPi3AtEOohp78Op3nmbItXo7IJUSjzBNzUYR8YPj6Ud7Fje9LZSwMBngvgx%2BOKy6HsV4ofOAU2%2FK1%2BfxI0KkCeoSso9NJHWgBD7ijfXUa1Hrc%2FuNU3mTlSSVp3VStQrJbQCkr4paaHYWeeO4pRZCDSBNUzs9qq3TDePwpEQc4QROrw5htdniRk26lFIFm%2Fzk2nC77Pg%2BrkRC1W%2BlRv0lyXsmXVBCe8F1szpWXHCxHNAJwKH%2FBb%2BV1k6AXFXVWPW5vADbXUvRu0s6KLaqu6a0KCB7dt3K2Ni%2FI6O%2FmISYXzknbMrwwakNfajbRF2ibodgR9R9xvoCoCXa3ka7%2Fejr%2BmsZ2HvPKUAffd2fNIWCQrejfpuIoOWiYx6ufN8E41HetCbYfvsI6JQfPOEdOYWI2px%2BLdfO3Nybq99%2BRSQOhjNZakBP54ozlCUfwgpLOmTBwsswZexv1RK5MIi8%2FWtjlJ%2FKjkYxdkFUlwggGS2xDwzcyl2%2FakNCQ5YmxjU8cRY7jZQRMo%2F8uTw5qa2MNZPaQGI18uRgr0i%2FTX3t57fJYCpMLXSaUKIdO7O%2FCQhIyGTS6KrPN%2B3%2FgUb%2BPQ1viGhpnWfGEYF9vhIlK57z8G8G82UQ3DpttD7M8mQ0KsmCOq75ECx9CWrWGk51vADlm%2BLEZ5oWjVMs%2FThki40B7tL7gzFrBuQksWXYeubMzZfFo4ZQ49di4wupHG5kRsyL2fJUzgpaLDP%2BSe6%2FjCnc52C7lZ3Ls0cHJVf9HRwDNXWM%2B4h8donNy5637QWK%2BV7mlH%2FL4xBZCfU9l6sIz%2FWHMtRaQprEem6a%2FRwPRDBiP65I2EwZLKGY8I%2F1uXJncwC8egLu82JY9maweI0VmJSmRcTf0evxqqe7vc9MqpsUlpSVNh4bFnxVIo5E4PGX70kVaTFe0vu1YdGKmFX5PLvkmWIf%2FnwfgPMqYsa0%2F09trboJ5LGDEQRXSBb7ldG%2FwLdOiqocYKAb91SMpn1fXVPBgkPM27QZxHnSAmWVbJR2%2FIhO%2BIVNzkgFAJlptiEPPPTxuBh%2BTT7CaIQE3oZbbJeQKvRkrt4bawTCOzciU%2F1zFGxubTJTSyInjQ8%2F1tVo7KjnxPKqGSfwZQN%2FeWL6R%2FpvCb%2BE6D4pdyczoJRUWsSNXNnA7QrdjgGNWhyOMiKvkDf3RD4mrXbul18WYVTsLyp0hvQsbdwBWOh7VlwfrWdy%2BklsttFi%2B%2BadKR7DbwjLTcxvdNpTx1WJhXROR8jwW26VEYSXPVqWnYvfyZo4DojKHMSDMbAakbuSJdkGP1d5w0AYbKlAcVQOqp9hbAvfwwLy4ErdIsOg0YEeCcnQVRAXwaCI9JvWWmM%2FzYJzE3X45A6lU9Pe7TAbft810MYh7lmV6Keb5HI6qXFiD%2B8khBZqi%2FsK6485k0a86aWLxOb4Eqnoc41x%2BYPv5CWfvP6cebsENo%3D%2BIUg0f64C4y77N4FZ6C82m5wMpvDQIHqx0ZFIHLhwMg%3D", "8H61sylBH/Ad3thZCGDVLyaso2g499GnjAuqpNapesoJgoo5Zk3nxDqXoWfRDwzmKk6eDLTyWViTRTdnr8Su7+XzW6MMAcZo+Fa7UwdfE4pKJ2+z6OYK58l+/93LHZmgVUF5dqI3G8mLr3uI", "H4sIAAAAAAAAAAG4BEf7SqmRq5Y9DfCIR9QLZ9wfMXuwWMtbz4CYqd0%2FCCMNXbRgEOJmkCbpKBJXQ%2BAz78OO%2FufCpa1k1nqcEgNxRzRnKKNVBBPMov%2FE%2BXFqh%2Bb5KZLhJvXicwGSIuVshN1XYpSRzKrosUB0ykN8j9hA90IA5AulHsXIofHj07FlFC%2BTbQqVZ7jKeHDurUkVhf8WQ1up%2BVO9KZwQU6WZzsF5y6AkidThF411avCLTxGAtIC7uZBnzMLL4duUf7YtdIDHt4UWGsXCI7ItciWv4Dzk9w5bKeWRRLp1W1pbniEQY01lTulTZBYPuLtna6pB0I3EJ5bV4c3Gktdd1YAVQcBQ2Yy5TW92YEclM99vW9mwu6xD8ZRYJNIb622TjjFMvmR4u4sNh%2BdgL5MlagVpvQjIxUmP7TzelScfku0PrKnKve2zzG6m8czF2WgbQcSLk%2B6TJAijmezo0byTzBsc0FbiI16jm7OBn%2Bi4xCBJQ0AHtu%2Bj2kUE3SUp3wnwgvCR9EnQIw%2F8p2PIp1h6FG6QOIKamihDeY9r5RCW7yLds5vwmUgT9mPTfN%2B%2Fjpzp4U4axfZv5yrVyMSpsuDEhj0H0CjYQMssn%2BsXMYOJGLqv%2FF0SrGrtcAGYv12%2B17PybzbqrXGe8xYR%2B9wHaKX3CD5Ak3IE0CiILhEIZrDICPTifm8%2FygUDztVZmHwpM6HBpF2inkGbaX6Fa8BOrMJaEqZWAualYYBth37jWyqCKV01TWFfHtS7y7kvkWOPwYYORzx9IKO5yyFrftg4hCH7f5vtHsMoyP8CcWPh9c82O70CIlscfLURWeoAyXv1FYtgC6pBLVlgdHEjMzjKvK7DRtJliNPl0VGazg5jTAYHtuwdc23jIjwBfG0MXpPjkw%2BVR179clfwK4t1VfJTJF8F02EXZXaZzCA7cH%2B%2B3bQaXOpvZBTFGdD9JnwRp2vEhy8%2BWMXhd7C%2BcmliOvraOoK%2Fksa9PNarTZJTTJuZupvYwBWhx%2F2vVDEdCM81Z7bFgb0wGd9ViHIOz0MH8v%2FIgn6qd2ojjnkJ29MfSfhtRi%2BXAvmgFXoIhlIBXBwapozxsKcDXOc5JRWpK%2F7y4naW7Fuogp1oU1fHXOXnQh8FAsjgyqn3J0acyY7FDKtkAjxDTMThh1GrA4dLvvLjPx%2FKUMeCQSZ1Y01X%2BNVRbxXBLGLkDbcBHNmkTTaxbsctSBBMSyOYQfG5W9%2Bhw9D2AFSWwFAuz%2BCDvsPSze0CYDoG9lbuYnW2wseNiKYItaSQhUbnq3SGVcjy1JouogVK63TDGTwE8Cy3UoNrAz%2FzV7AaoVjytyuMBqOTYBS%2BSLif1R2qqeut0ID%2BCudcjrKJvcP1J8rHV%2F5h2lRNj7tW0wVQS4XtqpnPy90BhF%2BgcfCy7FtRJbH8i5HAl5FY1OpZQ68ig12imShpNI%2FgHuO2q3n5%2FVUFia7fwHqkkuZBRZHreEvEyPlUpgwJhpCBS3F8b1ViO2G5zsTNF9TR%2BzW8UJVG2lhMdcvZw92dg%2F74tndJ8LzhVrQrG5au9yu6fUExO5MNz6izVMFzOxG6FqxUcm8otgf6qqSBi23jrMceNzAT8LcREGoVvjmj8uINrJbJt9ZfXb%2BaIYsMGsc2uAQAAA%3D%3D", "https://localhost/_fragment?_path=_controller%3Dsystem%26command%3Did%26return_value%3Dnull&_hash=Xnsvx/yLVQaimEd1CfepgH0rEXr422JnRSn/uaCE3gs=", "s%3A8FnPwdeM9kdGTZlWvdaVtQ0S1BCOhY5G.qys7H2oGSLLdRsEq7sqh7btOohHsaRKqyjV4LiVnBvc", "eyJpdiI6IlhlNTZ2UjZUQWZKVHdIcG9nZFkwcGc9PSIsInZhbHVlIjoiRlUvY2grU1F1b01lSXdveXJ0T3N1WGJqeVVmZlNRQjNVOWxiSzljL1Z3RDhqYUdDbjZxMU9oSThWRzExT0YvUmthVzVKRE9kL0RvTEw1cFRhQkphOGw4S2loV1ZrMkkwTHd4am9sZkJQd2VCZ3R0VlFSeFo3ay9wTlBMb3lLSG8iLCJtYWMiOiJkMmU3M2ExNDc2NTc5YjAwMGMwMTdkYTQ1NThkMjRkNTY2YTE4OTg2MzY5MzE5NGZmOTM4YWVjOGZmMWU4NTk2IiwidGFnIjoiIn0%3D", ] negative_tests = [ "AAAAAAAA", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkJhZFNpZ25hdHVyZSIsImlhdCI6MTUxNjIzOTAyMn0.S_8lg9Pzezv8JhXT3cppPZcz046cFM8H1o1GJYYAAAA", "AAAA℗", ] def test_check_all(): # Confirm each of the examples produced a positive result for test in tests:
r = check_all_modules(test)
0
2023-10-30 12:52:39+00:00
8k
mlvlab/UP-NeRF
tto.py
[ { "identifier": "get_from_path", "path": "configs/config.py", "snippet": "def get_from_path(config_path):\n config = default()\n if config_path is not None:\n merge_from_file(config, config_path)\n\n return config" }, { "identifier": "NeRFSystemOptimize", "path": "models/nerf_system_optmize.py", "snippet": "class NeRFSystemOptimize(NeRFSystem):\n def __init__(self, hparams):\n super().__init__(hparams)\n self.best_psnr = 0\n self.best_ssim = 0\n self.best_lpips = 100\n self.scene = hparams[\"scene_name\"]\n exp_name = hparams[\"exp_name\"]\n self.save_root = os.path.join(\n self.hparams[\"out_dir\"], self.scene, exp_name, \"a_optimize\"\n )\n self.pose_save_dir = os.path.join(self.save_root, \"optimized_pose\")\n if self.hparams[\"pose_optimize\"]:\n self.save_dir = os.path.join(self.save_root, \"optimized_pose\")\n else:\n self.save_dir = os.path.join(self.save_root, \"optimized_emb_a\")\n self.psnr_path = os.path.join(self.save_root, \"psnr.pkl\")\n self.ssim_path = os.path.join(self.save_root, \"ssim.pkl\")\n self.lpips_path = os.path.join(self.save_root, \"lpips.pkl\")\n\n self.best_psnr_dict = {}\n self.best_ssim_dict = {}\n self.best_lpips_dict = {}\n os.makedirs(self.save_dir, exist_ok=True)\n\n def setup(self, stage):\n self.dataset_setup()\n self.model_setup()\n\n def configure_optimizers(self):\n optimizer = []\n if self.hparams[\"pose_optimize\"]:\n self.optimizer = get_optimizer(\n self.hparams[\"optimizer.type\"], 5e-3, self.models_to_train\n )\n optimizer += [self.optimizer]\n\n self.optimizer_pose = get_optimizer(\n self.hparams[\"optimizer_pose.type\"], 1e-4, self.se3_refine\n )\n optimizer += [self.optimizer_pose]\n else:\n self.optimizer = get_optimizer(\"adamw\", 1e-1, self.models_to_train)\n optimizer += [self.optimizer]\n\n return optimizer\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dataset,\n shuffle=True,\n num_workers=8,\n batch_size=self.hparams[\"train.batch_size\"],\n pin_memory=True,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val_dataset,\n shuffle=False,\n num_workers=4,\n batch_size=1, # validate one image (H*W rays) at a time\n pin_memory=True,\n )\n\n def forward(self, rays, img_idx, train=True):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays.shape[0]\n results = defaultdict(list)\n chunk = B if train else self.hparams[\"val.chunk_size\"]\n for i in range(0, B, chunk):\n rendered_ray_chunks = render_rays(\n models=self.models,\n embeddings=self.embeddings,\n rays=rays[i : i + chunk],\n img_idx=img_idx[i : i + chunk],\n sched_mult=1.0,\n sched_phase=2,\n N_samples=self.hparams[\"nerf.N_samples\"],\n use_disp=self.hparams[\"nerf.use_disp\"],\n perturb=self.hparams[\"nerf.perturb\"] if train else 0,\n N_importance=self.hparams[\"nerf.N_importance\"],\n white_back=self.train_dataset.white_back,\n encode_feat=True if self.hparams[\"nerf.feat_dim\"] > 0 else False,\n validation=False if train else True,\n )\n\n for k, v in rendered_ray_chunks.items():\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def training_step(self, batch, batch_nb):\n ray_infos = batch[\"ray_infos\"]\n rgbs = batch[\"rgbs\"]\n img_idx = batch[\"img_idx\"]\n directions = batch[\"directions\"]\n pose = batch[\"c2w\"]\n\n if self.hparams[\"pose_optimize\"]:\n pose_refine = camera_utils.lie.se3_to_SE3(self.se3_refine(img_idx))\n refined_pose = camera_utils.pose.compose([pose_refine, pose])\n rays_o, rays_d = get_rays(directions, refined_pose) # both (h*w, 3)\n else:\n rays_o, rays_d = get_rays(directions, pose) # both (h*w, 3)\n rays = torch.cat([rays_o, rays_d, ray_infos], 1)\n\n results = self(rays, img_idx)\n loss = ((results[\"s_rgb_fine\"] - rgbs) ** 2).mean()\n\n if self.hparams[\"pose_optimize\"]:\n self.optimizers()[0].zero_grad()\n self.optimizers()[1].zero_grad()\n self.manual_backward(loss)\n self.optimizers()[0].step()\n self.optimizers()[1].step()\n else:\n self.optimizers().zero_grad()\n self.manual_backward(loss)\n self.optimizers().step()\n\n with torch.no_grad():\n psnr_ = metric_utils.psnr(results[f\"s_rgb_fine\"], rgbs)\n self.log(\"lr\", get_learning_rate(self.optimizer))\n if self.hparams[\"pose_optimize\"]:\n self.log(\"lr_pose\", get_learning_rate(self.optimizers()[1]))\n self.log(\"train/loss\", loss)\n self.log(\"train/psnr\", psnr_, prog_bar=True)\n\n return loss\n\n def validation_step(self, batch, batch_nb):\n ray_infos = batch[\"ray_infos\"][0]\n rgbs = batch[\"rgbs\"][0]\n img_idx = batch[\"img_idx\"][0]\n directions = batch[\"directions\"][0]\n pose = batch[\"c2w\"][0]\n\n # get refined pose\n if self.hparams[\"pose_optimize\"]:\n pose_refine = camera_utils.lie.se3_to_SE3(self.se3_refine(img_idx))\n refined_pose = camera_utils.pose.compose([pose_refine, pose])\n rays_o, rays_d = get_rays(directions, refined_pose) # both (h*w, 3)\n else:\n rays_o, rays_d = get_rays(directions, pose.squeeze()) # both (h*w, 3)\n rays = torch.cat([rays_o, rays_d, ray_infos], 1)\n\n # forward\n results = self(rays, img_idx, train=False)\n loss = ((results[\"s_rgb_fine\"] - rgbs) ** 2).mean()\n\n # log\n idx = self.val_dataset.optimize_num\n if self.hparams[\"dataset_name\"] == \"phototourism\":\n WH = batch[\"img_wh\"]\n W, H = WH[0].item(), WH[1].item()\n else:\n W, H = self.hparams[\"blender.img_wh\"]\n img = results[f\"s_rgb_fine\"].view(H, W, -1).permute(2, 0, 1).cpu() # (3, H, W)\n img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)\n\n psnr_ = metric_utils.psnr(results[f\"s_rgb_fine\"], rgbs)\n ssim_ = metric_utils.ssim(img[None, ...], img_gt[None, ...])\n lpips_ = metric_utils.lpips_alex((img_gt[None, ...]), img[None, ...])\n self.log(\"val/loss\", loss)\n self.log(\"val/psnr\", psnr_)\n self.log(\"val/ssim\", ssim_)\n self.log(\"val/lpips\", lpips_)\n\n ### log image\n if self.logger is not None:\n self.logger.log_image(f\"val_{idx}/viz/GT\", [img_gt])\n self.logger.log_image(f\"val_{idx}/viz/rgb_fine\", [img])\n\n if psnr_ > self.best_psnr:\n self.best_psnr = psnr_\n self.best_ssim = ssim_\n self.best_lpips = lpips_\n if self.hparams[\"pose_optimize\"]:\n save_path = os.path.join(\n self.save_dir,\n \"best_pose_\" + str(self.hparams[\"optimize_num\"]).zfill(2) + \".npy\",\n )\n np.save(save_path, np.array(refined_pose[0].cpu()))\n else:\n save_path = os.path.join(\n self.save_dir,\n \"best_pose_\" + str(self.hparams[\"optimize_num\"]).zfill(2) + \".npy\",\n )\n np.save(save_path, np.array(self.embedding_fine_a(img_idx[0]).cpu()))\n if os.path.isfile(self.psnr_path):\n with open(self.psnr_path, \"rb\") as f:\n self.best_psnr_dict = pickle.load(f)\n with open(self.ssim_path, \"rb\") as f:\n self.best_ssim_dict = pickle.load(f)\n with open(self.lpips_path, \"rb\") as f:\n self.best_lpips_dict = pickle.load(f)\n self.best_psnr_dict[self.hparams[\"optimize_num\"]] = self.best_psnr.cpu()\n self.best_ssim_dict[self.hparams[\"optimize_num\"]] = self.best_ssim.cpu()\n self.best_lpips_dict[\n self.hparams[\"optimize_num\"]\n ] = self.best_lpips.cpu()\n with open(self.psnr_path, \"wb\") as f:\n pickle.dump(self.best_psnr_dict, f)\n with open(self.ssim_path, \"wb\") as f:\n pickle.dump(self.best_ssim_dict, f)\n with open(self.lpips_path, \"wb\") as f:\n pickle.dump(self.best_lpips_dict, f)\n\n def validation_epoch_end(self, ourputs):\n pass\n\n def dataset_setup(self):\n dataset = dataset_dict[self.hparams[\"dataset_name\"] + \"_optimize\"]\n kwargs = {\"root_dir\": self.hparams[\"root_dir\"]}\n if self.hparams[\"dataset_name\"] == \"phototourism\":\n kwargs[\"scene_name\"] = self.hparams[\"scene_name\"]\n kwargs[\"img_downscale\"] = self.hparams[\"phototourism.img_downscale\"]\n kwargs[\"use_cache\"] = self.hparams[\"phototourism.use_cache\"]\n kwargs[\"near\"] = self.hparams[\"nerf.near\"]\n kwargs[\"far\"] = self.hparams[\"nerf.far\"]\n kwargs[\"pose_optimize\"] = self.hparams[\"pose_optimize\"]\n kwargs[\"optimize_num\"] = self.hparams[\"optimize_num\"]\n else:\n raise NotImplementedError\n self.train_dataset = dataset(\n split=\"train\", camera_noise=self.hparams[\"pose.noise\"], **kwargs\n )\n self.val_dataset = dataset(\n split=\"val\", camera_noise=self.hparams[\"pose.noise\"], **kwargs\n )\n\n @torch.no_grad()\n def model_setup(self):\n super().model_setup()\n N_images = self.train_dataset.N_images_test\n checkpoint = torch.load(self.hparams[\"ckpt_path\"])\n self.embedding_fine_a = torch.nn.Embedding(\n N_images, self.hparams[\"nerf.appearance_dim\"]\n )\n self.embeddings[\"fine_a\"] = self.embedding_fine_a\n self.models_to_train = [self.embedding_fine_a]\n load_ckpt(self.nerf_coarse, self.hparams[\"ckpt_path\"], model_name=\"nerf_coarse\")\n load_ckpt(self.nerf_fine, self.hparams[\"ckpt_path\"], model_name=\"nerf_fine\")\n self.nerf_coarse.encode_candidate = False\n self.nerf_fine.encode_candidate = False\n # Approximate pose initialization using GT pose before pose optimizing.\n if self.hparams[\"pose_optimize\"]:\n train_se3_refine = torch.nn.Embedding(\n self.train_dataset.N_images_train, 6\n ).to(\"cuda\")\n train_se3_refine.weight[:] = checkpoint[\"state_dict\"][\"se3_refine.weight\"]\n\n gt_train_poses = [\n v\n for k, v in self.train_dataset.GT_poses_dict.items()\n if k in self.train_dataset.img_ids_train\n ]\n gt_train_poses = torch.tensor(np.stack(gt_train_poses, 0))\n noise_poses = torch.stack([torch.eye(3, 4)] * len(gt_train_poses))\n pose_refine_ = camera_utils.lie.se3_to_SE3(train_se3_refine.weight).cpu()\n refine_poses = camera_utils.pose.compose([pose_refine_, noise_poses])\n\n refine_poses = torch.stack(\n [metric_utils.parse_raw_camera(p) for p in refine_poses.float()], dim=0\n )\n gt_train_poses = torch.stack(\n [metric_utils.parse_raw_camera(p) for p in gt_train_poses.float()],\n dim=0,\n )\n aligned_pose, sim3 = metric_utils.prealign_cameras(\n refine_poses, gt_train_poses\n )\n\n gt_test_poses = [\n v\n for k, v in self.train_dataset.GT_poses_dict.items()\n if k in self.train_dataset.img_ids_test\n ]\n gt_test_poses = torch.tensor(np.stack(gt_test_poses, 0))\n gt_test_poses_ = torch.stack(\n [metric_utils.parse_raw_camera(p) for p in gt_test_poses.float()], dim=0\n )\n center = torch.zeros(1, 1, 3)\n center_GT = camera_utils.cam2world(center, gt_test_poses_)[:, 0] # [N,3]\n center_aligned = (\n center_GT - sim3.t0\n ) / sim3.s0 @ sim3.R * sim3.s1 + sim3.t1\n R_aligned = gt_test_poses_[..., :3] @ sim3.R\n t_aligned = (-R_aligned @ center_aligned[..., None])[..., 0]\n aligned_GT_pose_ = camera_utils.pose(R=R_aligned, t=t_aligned)\n aligned_GT_pose = torch.stack(\n [metric_utils.parse_raw_camera(p) for p in aligned_GT_pose_.float()],\n dim=0,\n )\n\n for i, (k, v) in enumerate(self.train_dataset.poses_dict.items()):\n self.train_dataset.poses_dict[k] = aligned_GT_pose[i]\n self.val_dataset.poses_dict[k] = aligned_GT_pose[i]\n else:\n optimized_pose_path = os.path.join(\n self.pose_save_dir,\n \"best_pose_\" + str(self.hparams[\"optimize_num\"]).zfill(2) + \".npy\",\n )\n\n id_ = self.train_dataset.img_ids_test[self.hparams[\"optimize_num\"]]\n self.train_dataset.poses_dict[id_] = torch.from_numpy(\n np.load(optimized_pose_path)\n )\n self.val_dataset.poses_dict[id_] = torch.from_numpy(\n np.load(optimized_pose_path)\n )" } ]
import argparse import os import random import numpy as np import pandas as pd import torch import wandb from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, TQDMProgressBar from pytorch_lightning.loggers import WandbLogger from tqdm import tqdm from configs.config import get_from_path from models.nerf_system_optmize import NeRFSystemOptimize
3,747
def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True def main(hparams): setup_seed(hparams["seed"])
def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True def main(hparams): setup_seed(hparams["seed"])
system = NeRFSystemOptimize(hparams)
1
2023-10-25 08:43:24+00:00
8k
Redrrx/ProxyNest
API.py
[ { "identifier": "get_current_user", "path": "auth.py", "snippet": "class User(BaseModel):\nclass ResetPasswordRequest(BaseModel):\nclass DBCON(BaseModel):\n DB_URL: str\n DB_NAME: str\n DB_USER: str\n DB_PASSWORD: str\nasync def admincheck():\nasync def get_current_user(credentials: HTTPBasicCredentials = Depends(security)):" }, { "identifier": "ProxyManagement", "path": "proxynest.py", "snippet": "class ProxyManagement:\n\n def __init__(self, db_url: str, db_name: str, db_user: str, db_password: str):\n self.settings = None\n self.db_client = AsyncIOMotorClient(\n db_url,\n username=db_user,\n password=db_password,\n serverSelectionTimeoutMS=1000\n )\n self.db = self.db_client[db_name]\n self.proxy_check_urls = [\"https://google.com\", \"https://bing.com\", \"https://yahoo.com\"]\n\n async def load_settings(self):\n default_settings = {\n \"inactive_proxy_timeout\": 10,\n \"threshold_time_minutes\": 10,\n \"background_check_proxies_interval\": 60,\n \"max_instances_per_proxy\": 2,\n \"max_proxies_per_instance\": 1\n }\n\n settings = await self.db.proxy_manager_settings.find_one()\n if settings is None:\n await self.db.proxy_manager_settings.insert_one(default_settings)\n settings = default_settings\n self.settings = settings\n self.inactive_proxy_timeout = settings[\"inactive_proxy_timeout\"]\n self.threshold_time_minutes = settings[\"threshold_time_minutes\"]\n self.background_check_proxies_interval = settings[\"background_check_proxies_interval\"]\n self.max_instances_per_proxy = settings[\"max_instances_per_proxy\"]\n self.max_proxies_per_instance = settings[\"max_proxies_per_instance\"]\n\n return settings\n\n def __str__(self):\n return (\n f\"ProxyManagement Settings:\\n\"\n f\" - inactive_proxy_timeout: {self.inactive_proxy_timeout} \\n\"\n f\" - background_check_proxies_interval: {self.background_check_proxies_interval} \\n\"\n f\" - threshold_time_minutes: {self.threshold_time_minutes} \\n\"\n f\" - max_instances_per_proxy: {self.max_instances_per_proxy}\\n\"\n )\n\n async def reset_all_proxies(self):\n result = await self.db.proxies.update_many({}, {\"$set\": {\"instance_ids\": {}, \"last_used\": None}})\n\n if result.matched_count == 0:\n return {\n \"status\": \"info\",\n \"message\": \"No proxies were available to reset.\"\n }\n elif result.modified_count == 0:\n return {\n \"status\": \"info\",\n \"message\": \"No proxies needed resetting.\"\n }\n else:\n return {\n \"status\": \"success\",\n \"message\": f\"Successfully reset {result.modified_count} proxies.\"\n }\n\n async def update_settings(self, updated_settings: SettingsModel):\n try:\n update_dict = {k: v for k, v in updated_settings.model_dump(exclude_none=True).items()}\n\n if not update_dict:\n raise HTTPException(status_code=400, detail=\"No updates provided\")\n\n result = await self.db.proxy_manager_settings.update_one({}, {'$set': update_dict}, upsert=True)\n\n if result.matched_count < 1:\n raise HTTPException(status_code=404, detail=\"Settings not found\")\n\n await self.load_settings()\n\n return {\"status\": \"success\", \"detail\": \"Settings have been updated\", \"updated_settings\": update_dict}\n\n except HTTPException as http_exc:\n raise http_exc\n\n except Exception as e:\n raise HTTPException(status_code=500, detail=\"An error occurred while updating settings\") from e\n\n async def get_settings(self):\n settings = json.loads(json_util.dumps(self.settings))\n return settings\n\n async def clear_instance_reservation(self, instance_id: str):\n proxies = await self.get_proxies()\n cleared_proxies = []\n\n for proxy in proxies:\n if instance_id in proxy.instance_ids:\n result = await self.clear_instance_id(proxy.id, instance_id)\n if result:\n cleared_proxies.append(proxy.id)\n else:\n return {\n \"status\": \"error\",\n \"message\": f\"Failed to clear instance {instance_id} reservation from proxy {proxy.id}\"\n }\n\n if cleared_proxies:\n str_cleared_proxies = [str(proxy) for proxy in cleared_proxies if\n proxy is not None]\n return {\n \"status\": \"success\",\n \"message\": f\"Instance {instance_id} reservation cleared from proxies {', '.join(str_cleared_proxies)}\"\n }\n\n else:\n return {\n \"status\": \"error\",\n \"message\": f\"Instance {instance_id} not found in any proxy\"\n }\n\n async def clear_instance_from_specific_proxy(self, proxy_id: str, instance_id: str):\n proxy_object_id = ObjectId(proxy_id)\n result = await self.clear_instance_id(proxy_object_id, instance_id)\n if result:\n logger.info(f\"Cleared instance {instance_id} from proxy {proxy_id}\")\n return {\"status\": \"success\", \"message\": f\"Instance {instance_id} cleared from proxy {proxy_id}\"}\n else:\n logger.error(f\"Failed to clear instance {instance_id} from proxy {proxy_id}\")\n return {\"status\": \"error\", \"message\": f\"Failed to clear instance {instance_id} from proxy {proxy_id}\"}\n\n async def clear_instance_id(self, proxy_id: ObjectId, instance_id: str):\n result = await self.db.proxies.update_one(\n {\"_id\": proxy_id, \"instance_ids\": {\"$type\": \"object\"}},\n {\"$unset\": {f\"instance_ids.{instance_id}\": \"\"}}\n )\n\n if result.modified_count == 1:\n return True\n\n result = await self.db.proxies.update_one(\n {\"_id\": proxy_id},\n {\"$set\": {\"instance_id\": None}}\n )\n return result.modified_count == 1\n\n async def assign_proxy_to_instance(self, instance_id: str, country_code: Optional[str] = None,\n tags: Optional[List[str]] = None):\n instance_proxies = await self.db.proxies.find({\"instance_ids\": instance_id}).to_list(None)\n\n if len(instance_proxies) >= self.max_proxies_per_instance:\n return {\n \"status\": \"error\",\n \"message\": f\"Instance {instance_id} is already assigned to the maximum allowed number of proxies ({self.max_proxies_per_instance}).\"\n }\n\n query = {\n \"status\": \"UP\",\n \"$where\": f\"this.instance_ids && Object.keys(this.instance_ids).length < {self.max_instances_per_proxy}\"\n }\n\n if tags:\n query[\"tags\"] = {\"$all\": tags}\n if country_code:\n query[\"country_code\"] = country_code.upper()\n\n proxy = await self.db.proxies.find_one(query)\n\n if not proxy:\n no_proxies_message = \"No available proxies found\"\n if country_code:\n no_proxies_message += f\" for country code {country_code}\"\n if tags:\n no_proxies_message += f\" and tags {tags}\" if country_code else f\" for tags {tags}\"\n\n return {\n \"status\": \"error\",\n \"message\": no_proxies_message\n }\n\n proxy_id = proxy[\"_id\"]\n current_time = datetime.now(pytz.utc)\n\n proxy['instance_ids'] = {\n k: v for k, v in proxy['instance_ids'].items()\n if v.replace(tzinfo=pytz.utc) > current_time - timedelta(minutes=self.inactive_proxy_timeout)\n }\n\n proxy['instance_ids'][instance_id] = current_time\n\n result = await self.db.proxies.update_one(\n {'_id': proxy['_id']},\n {'$set': {'instance_ids': proxy['instance_ids']}}\n )\n\n if result.modified_count == 1:\n await self.update_last_used(proxy_id)\n return {\n \"status\": \"success\",\n \"message\": f\"Proxy {proxy_id} assigned to instance {instance_id}\",\n \"proxy_id\": str(proxy_id),\n \"ip\": proxy['ip'],\n \"port\": proxy['port'],\n \"username\": proxy.get('username'),\n \"password\": proxy.get('password'),\n \"protocol\": proxy['protocol'],\n }\n else:\n return {\n \"status\": \"error\",\n \"message\": f\"Failed to assign proxy {proxy_id} to instance {instance_id}\"\n }\n\n async def clear_inactive_proxies(self):\n while True:\n current_time = datetime.now(pytz.utc)\n threshold_time = current_time - timedelta(minutes=self.threshold_time_minutes)\n proxies = await self.db.proxies.find({}).to_list(length=None)\n\n for proxy in proxies:\n instance_ids = proxy.get(\"instance_ids\", {})\n\n if not isinstance(instance_ids, dict):\n logger.error(\n f\"instance_ids in proxy {proxy['_id']} is not a dictionary. Actual value: {instance_ids}\")\n continue\n\n expired_instance_ids = [\n instance_id for instance_id, last_used in instance_ids.items()\n if last_used.replace(tzinfo=pytz.utc) < threshold_time\n ]\n\n if expired_instance_ids:\n logger.info(f\"Proxy {proxy['_id']} has expired instances: {expired_instance_ids}\")\n update_query = {\n \"$unset\": {f\"instance_ids.{instance_id}\": \"\" for instance_id in expired_instance_ids}\n }\n\n if len(expired_instance_ids) == len(instance_ids):\n update_query[\"$unset\"][\"last_used\"] = \"\"\n\n await self.db.proxies.update_one({'_id': proxy['_id']}, update_query)\n\n for instance_id in expired_instance_ids:\n logger.info(f\"Removed expired instance {instance_id} from proxy {proxy['_id']}\")\n\n await asyncio.sleep(self.background_check_proxies_interval)\n\n async def edit_proxy(self, proxy_id: str, updated_fields: Dict[str, Optional[Union[str, int, List[str]]]]):\n existing_proxy = await self.db.proxies.find_one({\"_id\": ObjectId(proxy_id)})\n if existing_proxy is None:\n raise HTTPException(status_code=404, detail=\"Proxy not found\")\n\n update_dict = {}\n allowed_fields = [\"ip\", \"port\", \"username\", \"password\", \"protocol\", \"country_code\", \"tags\"]\n fields_updated = []\n\n for field, value in updated_fields.items():\n if field in allowed_fields:\n if value is None:\n raise HTTPException(status_code=400, detail=f\"Value for field '{field}' cannot be None\")\n\n if field == \"tags\":\n if not isinstance(value, list):\n raise HTTPException(status_code=400, detail=f\"Value for field 'tags' must be a list\")\n\n fields_updated.append(field)\n update_dict[field] = value\n else:\n raise HTTPException(status_code=400, detail=f\"Field '{field}' is not editable\")\n\n if update_dict:\n result = await self.db.proxies.find_one_and_update(\n {\"_id\": ObjectId(proxy_id)},\n {\"$set\": update_dict},\n return_document=ReturnDocument.AFTER\n )\n\n if not result:\n raise HTTPException(status_code=500, detail=\"The update was not successful for an unknown reason\")\n\n updated_proxy_data = {**result, \"_id\": str(result[\"_id\"])}\n\n updated_proxy_model = ProxyModel(**updated_proxy_data)\n asyncio.create_task(self.check_proxy(updated_proxy_model))\n\n return {\n \"status\": \"success\",\n \"message\": \"Proxy updated and check scheduled\",\n \"updated_fields\": fields_updated,\n \"updated_proxy\": updated_proxy_data\n }\n\n return {\"status\": \"error\", \"message\": \"No valid fields were provided for update\"}\n\n async def get_proxy(self, proxy_id: str):\n proxy = await self.db.proxies.find_one({\"_id\": ObjectId(proxy_id)})\n if proxy:\n return ProxyModel(**proxy)\n else:\n return None\n\n async def get_all_proxies(self):\n proxies = await self.db.proxies.find({}).to_list(length=None)\n return [ProxyModel(**proxy) for proxy in proxies]\n\n async def add_proxy(self, proxy: ProxyModel):\n try:\n if proxy.id is None:\n proxy.id = ObjectId()\n\n proxy_data = proxy.model_dump(by_alias=True, exclude_none=True)\n\n existing_proxy = await self.db.proxies.find_one({\n 'ip': proxy_data['ip'],\n 'port': proxy_data['port'],\n 'protocol': proxy_data['protocol']\n })\n\n if existing_proxy:\n raise HTTPException(\n status_code=400,\n detail=\"A proxy with the same IP, port, and protocol already exists.\"\n )\n\n await self.db.proxies.insert_one(proxy_data)\n asyncio.create_task(self.check_proxy(proxy))\n return {\"_id\": str(proxy.id), \"status\": \"success\", \"message\": \"Proxy added, scheduled for checking\"}\n\n except HTTPException as http_exc:\n raise http_exc\n except Exception as e:\n raise HTTPException(status_code=500, detail=\"An error occurred while adding the proxy.\") from e\n\n async def check_proxy(self, proxy: ProxyModel):\n if proxy.id is None:\n logger.error(\"Received a proxy with no ID.\")\n return\n\n response_times = []\n proxy_type_mapping = {\n \"HTTP\": ProxyType.HTTP,\n \"SOCKS4\": ProxyType.SOCKS4,\n \"SOCKS5\": ProxyType.SOCKS5\n }\n proxy_type = proxy_type_mapping.get(proxy.protocol.upper())\n\n connector_kwargs = {\n \"host\": proxy.ip,\n \"port\": proxy.port,\n \"proxy_type\": proxy_type,\n }\n if proxy.username and proxy.password:\n connector_kwargs[\"username\"] = proxy.username\n connector_kwargs[\"password\"] = proxy.password\n\n connector = ProxyConnector(**connector_kwargs)\n\n async with ClientSession(connector=connector) as session:\n for url in self.proxy_check_urls:\n try:\n start_time = datetime.now()\n async with session.get(url) as response:\n response.raise_for_status()\n end_time = datetime.now()\n duration = end_time - start_time\n response_time = round(duration.seconds * 100)\n response_times.append(response_time)\n\n logger.info(\n f\"Success: Proxy {proxy.id} ({proxy.ip}:{proxy.port}), URL: {url}, Response time: {response_time} ms\")\n except Exception as e:\n logger.error(f\"Error checking proxy {proxy.id} ({proxy.ip}:{proxy.port}): {str(e)}\")\n response_times.append(float('inf'))\n\n valid_response_times = [t for t in response_times if t != float('inf')]\n avg_response_time = round(\n sum(valid_response_times) / len(valid_response_times)) if valid_response_times else float('inf')\n status = \"UP\" if valid_response_times else \"DOWN\"\n try:\n update_fields = {\n \"status\": status,\n \"response_time\": avg_response_time\n }\n\n result = await self.db.proxies.update_one(\n {\"_id\": proxy.id},\n {\"$set\": update_fields}\n )\n\n if result.modified_count == 0:\n logger.error(f\"No document was updated for Proxy ID: {proxy.id}. Does the document exist?\")\n else:\n logger.info(f\"Updated document for Proxy ID: {proxy.id}.\")\n except Exception as e:\n logger.error(f\"An error occurred during the database update for Proxy ID: {proxy.id}. Error: {str(e)}\")\n\n avg_response_time_display = f\"{avg_response_time} ms\" if avg_response_time != float('inf') else \"N/A\"\n logger.info(\n f\"Proxy: {proxy.id} ({proxy.ip}:{proxy.port}), Average response time: {avg_response_time_display}, Status: {status}\")\n\n async def background_update_country_codes(self):\n while True:\n proxies = await self.get_proxies()\n if proxies:\n for proxy in proxies:\n proxy_dict = proxy.model_dump()\n try:\n if proxy_dict[\"country_code\"] is None:\n country_code = await get_country_code(proxy_dict[\"ip\"])\n if country_code:\n await self.update_proxy_country_code(proxy_dict[\"id\"], country_code)\n except Exception as e:\n logger.error(f\"Error updating country code for proxy {proxy_dict['id']}: {e}\")\n pass\n await asyncio.sleep(20)\n\n async def update_proxy_country_code(self, proxy_id: str, country_code: str):\n try:\n result = await self.db.proxies.update_one(\n {\"_id\": ObjectId(proxy_id)},\n {\"$set\": {\"country_code\": country_code}}\n )\n if result.modified_count == 1:\n return {\n \"status\": \"success\",\n \"message\": f\"Updated country code for proxy with ID {proxy_id} to {country_code}\"\n }\n else:\n return {\n \"status\": \"error\",\n \"message\": f\"Failed to update country code for proxy with ID {proxy_id}\"\n }\n except Exception as e:\n print(f\"Error updating country code for proxy with ID {proxy_id}: {e}\")\n return {\n \"status\": \"error\",\n \"message\": f\"Error updating country code for proxy with ID {proxy_id}: {e}\"\n }\n\n async def refresh_proxy_usage(self, proxy_id: str, instance_id: Optional[str] = None):\n proxy = await self.get_proxy(proxy_id)\n if not proxy:\n return {\"status\": \"error\", \"message\": f\"Proxy {proxy_id} not found\"}\n\n if instance_id:\n instance_ids = [instance_id]\n else:\n standalone_instance_id = [proxy.instance_id] if proxy.instance_id else []\n instance_ids_in_list = list(proxy.instance_ids.keys())\n instance_ids = standalone_instance_id + instance_ids_in_list\n\n if not instance_ids:\n return {\"status\": \"error\", \"message\": f\"No instances associated with proxy {proxy_id}\"}\n\n refresh_results = []\n for inst_id in instance_ids:\n result = await self.update_last_used(proxy_id, inst_id)\n if result:\n refresh_results.append(\n {\"status\": \"success\", \"message\": f\"Proxy {proxy_id} usage refreshed for instance {inst_id}\"})\n else:\n refresh_results.append(\n {\"status\": \"error\", \"message\": f\"Failed to refresh proxy usage for instance {inst_id}\"})\n\n return refresh_results\n\n async def update_last_used(self, proxy_id: str, instance_id: Optional[str] = None):\n proxy_object_id = ObjectId(proxy_id)\n\n if instance_id:\n update_query = {\"$currentDate\": {f\"instance_ids.{instance_id}\": True}}\n else:\n update_query = {\"$currentDate\": {\"last_used\": True}}\n\n result = await self.db.proxies.update_one(\n {\"_id\": proxy_object_id},\n update_query\n )\n\n return result.modified_count > 0\n\n async def get_proxies(self, tags: Optional[List[str]] = None):\n query = {}\n if tags:\n query[\"tags\"] = {\"$in\": tags}\n\n proxies = await self.db.proxies.find(query).to_list(length=None)\n proxies_with_counts = []\n for proxy in proxies:\n proxy_data = dict(proxy)\n instance_ids = proxy_data.get(\"instance_ids\", {})\n\n if not isinstance(instance_ids, dict):\n print(f\"Warning: 'instance_ids' expected to be a dict, but got {type(instance_ids).__name__} instead.\")\n instance_ids = {}\n\n instances_count = len(instance_ids)\n if instances_count == 1:\n proxy_data[\"instance_id\"] = next(iter(instance_ids))\n else:\n proxy_data[\"instance_ids\"] = instance_ids\n\n try:\n proxies_with_counts.append(ProxyModel(**proxy_data))\n except ValidationError as e:\n print(f\"A validation error occurred: {e}\")\n\n return proxies_with_counts\n\n async def delete_proxy(self, proxy_id: str):\n result = await self.db.proxies.delete_one({\"_id\": ObjectId(proxy_id)})\n if result.deleted_count == 1:\n return {\"status\": \"success\", \"message\": \"Proxy deleted\"}\n else:\n return {\"status\": \"error\", \"message\": \"Failed to delete the proxy\"}\n\n async def assign_instance_id(self, proxy_id: str, instance_id: str):\n result = await self.db.proxies.update_one(\n {\"_id\": ObjectId(proxy_id)},\n {\"$addToSet\": {\"instance_ids\": instance_id}}\n )\n return result.modified_count == 1\n\n async def background_check_proxies(self):\n while True:\n cursor = self.db.proxies.find({})\n proxies = await cursor.to_list(length=None)\n proxies = [\n ProxyModel(\n **{\n **proxy,\n \"_id\": ObjectId(proxy[\"_id\"]),\n \"response_time\": float(proxy[\"response_time\"]) if \"response_time\" in proxy and isinstance(\n proxy[\"response_time\"], (int, float)) else None\n }\n )\n for proxy in proxies if \"_id\" in proxy\n ]\n await asyncio.gather(*(self.check_proxy(proxy) for proxy in proxies))\n await asyncio.sleep(self.background_check_proxies_interval)" }, { "identifier": "ProxyModel", "path": "proxynest.py", "snippet": "class ProxyModel(BaseModel):\n id: PydanticObjectId = Field(default_factory=PydanticObjectId, alias='_id')\n ip: str\n port: int\n username: Optional[str] = Field(default=None)\n password: Optional[str] = Field(default=None)\n protocol: str = Field(default=\"HTTP\")\n response_time: Optional[float] = Field(default=None)\n status: str = Field(default=\"UNKNOWN\")\n country_code: Optional[str] = Field(default=None)\n instance_id: Optional[str] = Field(default=None)\n instance_ids: Optional[Dict[str, datetime]] = Field(default_factory=dict)\n last_used: Optional[datetime] = Field(default=None)\n tags: Optional[List[str]] = Field(default_factory=list)\n\n class Config:\n arbitrary_types_allowed = True\n populate_by_name = True\n\n json_encoders = {\n ObjectId: str\n }" }, { "identifier": "SettingsModel", "path": "proxynest.py", "snippet": "class SettingsModel(BaseModel):\n max_proxies_per_instance: Optional[int]\n max_instances_per_proxy: Optional[int]\n inactive_proxy_timeout: Optional[int]\n background_check_proxies_interval: Optional[int]\n threshold_time_minutes: Optional[int]" } ]
import asyncio import os import bcrypt import uvicorn from typing import Optional, List, Dict, Union from fastapi import FastAPI, Query, Depends, HTTPException from starlette import status from starlette.responses import JSONResponse from auth import get_current_user, collection, ResetPasswordRequest, admincheck from proxynest import ProxyManagement, ProxyModel, SettingsModel
6,143
proxy_management = ProxyManagement( db_url=os.getenv('DB_URL'), db_name=os.getenv('DB_NAME'), db_user=os.getenv('DB_USER'), db_password=os.getenv('DB_PASSWORD'), ) app = FastAPI(title="ProxyNest", description="ProxyNest is a proxy managment API", version="1.0.0", redoc_url="/redoc") @app.post("/add_proxies", dependencies=[Depends(get_current_user)]) async def add_proxy(proxy: ProxyModel): result = await proxy_management.add_proxy(proxy) return result @app.get("/proxies", dependencies=[Depends(get_current_user)]) async def get_proxies(tags: Optional[List[str]] = Query(None)): proxies = await proxy_management.get_proxies(tags=tags) return proxies @app.post("/assign_proxy", dependencies=[Depends(get_current_user)]) async def assign_proxy_to_instance(instance_id: str, country_code: Optional[str] = Query(None), tags: Optional[List[str]] = Query(None)): result = await proxy_management.assign_proxy_to_instance(instance_id, country_code, tags) return result @app.post("/update_proxy/{proxy_id}", dependencies=[Depends(get_current_user)]) async def update_proxy(proxy_id: str, proxy: Dict[str, Optional[Union[str, int, List[str]]]]): result = await proxy_management.edit_proxy(proxy_id, proxy) return result @app.post("/delete_proxy/{proxy_id}", dependencies=[Depends(get_current_user)]) async def delete_proxy(proxy_id: str): result = await proxy_management.delete_proxy(proxy_id) return result @app.post("/refresh_proxy_usage/{proxy_id}", dependencies=[Depends(get_current_user)]) async def refresh_proxy_usage(proxy_id: str, instance_id: Optional[str] = None): result = await proxy_management.update_last_used(proxy_id, instance_id) if result: if instance_id: return {"status": "success", "message": f"Proxy {proxy_id} usage refreshed for instance {instance_id}"} else: return {"status": "success", "message": f"Proxy {proxy_id} usage refreshed for all instances"} else: return {"status": "error", "message": "Failed to refresh proxy usage"} @app.post("/clear_instance_proxies/{instance_id}", dependencies=[Depends(get_current_user)]) async def clear_instance_reservation(instance_id: str): return await proxy_management.clear_instance_reservation(instance_id) @app.post("/clear_instance_from_specific_proxy/{proxy_id}/{instance_id}", dependencies=[Depends(get_current_user)]) async def clear_instance_from_specific_proxy(proxy_id: str, instance_id: str) -> JSONResponse: result = await proxy_management.clear_instance_from_specific_proxy(proxy_id, instance_id) return JSONResponse(content=result) @app.post("/reset_all_proxies", dependencies=[Depends(get_current_user)]) async def reset_all_proxies(): result = await proxy_management.reset_all_proxies() return result @app.post("/reset-password/") async def reset_password(
proxy_management = ProxyManagement( db_url=os.getenv('DB_URL'), db_name=os.getenv('DB_NAME'), db_user=os.getenv('DB_USER'), db_password=os.getenv('DB_PASSWORD'), ) app = FastAPI(title="ProxyNest", description="ProxyNest is a proxy managment API", version="1.0.0", redoc_url="/redoc") @app.post("/add_proxies", dependencies=[Depends(get_current_user)]) async def add_proxy(proxy: ProxyModel): result = await proxy_management.add_proxy(proxy) return result @app.get("/proxies", dependencies=[Depends(get_current_user)]) async def get_proxies(tags: Optional[List[str]] = Query(None)): proxies = await proxy_management.get_proxies(tags=tags) return proxies @app.post("/assign_proxy", dependencies=[Depends(get_current_user)]) async def assign_proxy_to_instance(instance_id: str, country_code: Optional[str] = Query(None), tags: Optional[List[str]] = Query(None)): result = await proxy_management.assign_proxy_to_instance(instance_id, country_code, tags) return result @app.post("/update_proxy/{proxy_id}", dependencies=[Depends(get_current_user)]) async def update_proxy(proxy_id: str, proxy: Dict[str, Optional[Union[str, int, List[str]]]]): result = await proxy_management.edit_proxy(proxy_id, proxy) return result @app.post("/delete_proxy/{proxy_id}", dependencies=[Depends(get_current_user)]) async def delete_proxy(proxy_id: str): result = await proxy_management.delete_proxy(proxy_id) return result @app.post("/refresh_proxy_usage/{proxy_id}", dependencies=[Depends(get_current_user)]) async def refresh_proxy_usage(proxy_id: str, instance_id: Optional[str] = None): result = await proxy_management.update_last_used(proxy_id, instance_id) if result: if instance_id: return {"status": "success", "message": f"Proxy {proxy_id} usage refreshed for instance {instance_id}"} else: return {"status": "success", "message": f"Proxy {proxy_id} usage refreshed for all instances"} else: return {"status": "error", "message": "Failed to refresh proxy usage"} @app.post("/clear_instance_proxies/{instance_id}", dependencies=[Depends(get_current_user)]) async def clear_instance_reservation(instance_id: str): return await proxy_management.clear_instance_reservation(instance_id) @app.post("/clear_instance_from_specific_proxy/{proxy_id}/{instance_id}", dependencies=[Depends(get_current_user)]) async def clear_instance_from_specific_proxy(proxy_id: str, instance_id: str) -> JSONResponse: result = await proxy_management.clear_instance_from_specific_proxy(proxy_id, instance_id) return JSONResponse(content=result) @app.post("/reset_all_proxies", dependencies=[Depends(get_current_user)]) async def reset_all_proxies(): result = await proxy_management.reset_all_proxies() return result @app.post("/reset-password/") async def reset_password(
reset_request: ResetPasswordRequest,
0
2023-10-27 15:45:30+00:00
8k
QutacQuantum/qugen
qugen/main/generator/discrete_qgan_model_handler.py
[ { "identifier": "compute_gradient_JAX", "path": "qugen/main/generator/quantum_circuits/discrete_generator_pennylane.py", "snippet": "@partial(jax.jit, static_argnames=[\"discriminator\"])\ndef compute_gradient_JAX(samples, discriminator, discriminator_weights):\n def criterion(outputs):\n return (-1.0 * jnp.log(outputs) ).mean()\n\n gradient = []\n for i in range(0, len(samples), 2):\n forward_fake = samples[i]\n backward_fake = samples[i + 1]\n\n forward_output = discriminator.apply(discriminator_weights, forward_fake).flatten()\n backward_output = discriminator.apply(discriminator_weights, backward_fake).flatten()\n\n forward_diff = criterion(forward_output)\n backward_diff = criterion(backward_output)\n gradient.append(1 / 2 * (forward_diff - backward_diff))\n\n return jnp.array(gradient)" }, { "identifier": "BaseModelHandler", "path": "qugen/main/generator/base_model_handler.py", "snippet": "class BaseModelHandler(ABC):\n \"\"\"\n It implements the interface for each of the models handlers (continuous QGAN/QCBM and discrete QGAN/QCBM),\n which includes building the models, training them, saving and reloading them, and generating samples from them.\n \"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n self.device_configuration = None\n\n @abstractmethod\n def build(self, *args, **kwargs) -> \"BaseModelHandler\":\n \"\"\"\n Define the architecture of the model. Weights initialization is also typically performed here.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def save(self, file_path: Path, overwrite: bool = True) -> \"BaseModelHandler\":\n \"\"\"\n Saves the model weights to a file.\n\n Parameters:\n file_path (pathlib.Path): destination file for model weights\n overwrite (bool): Flag indicating if any existing file at the target location should be overwritten\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reload(self, file_path: Path) -> \"BaseModelHandler\":\n \"\"\"\n Loads the model from a set of weights.\n\n Parameters:\n file_path (pathlib.Path): source file for the model weights\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def train(self, *args) -> \"BaseModelHandler\":\n \"\"\"\n Perform training of the model.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def predict(self, *args) -> np.array:\n \"\"\"\n Draw samples from the model.\n \"\"\"\n raise NotImplementedError\n\n def evaluate(\n self, train_dataset_original_space: np.ndarray, number_bins=16\n ) -> pd.DataFrame:\n parameters_all_training_iterations = glob.glob(\n f\"{self.path_to_models}/parameters_training_iteration=*\"\n )\n it_list = []\n kl_list_transformed_space = []\n kl_list_original_space = []\n train_dataset_transformed_space = self.normalizer.transform(\n train_dataset_original_space\n )\n dimension = train_dataset_original_space.shape[1]\n progress = tqdm(range(len(parameters_all_training_iterations)))\n progress.set_description(\"Evaluating\")\n best_kl_original_space = np.inf\n best_samples_original_space = None\n for it in progress:\n parameters_path = parameters_all_training_iterations[it]\n iteration = re.search(\n \"parameters_training_iteration=(.*).(pickle|npy)\",\n os.path.basename(parameters_path),\n ).group(1)\n it_list.append(iteration)\n self.reload(self.model_name, int(iteration))\n synthetic_transformed_space = self.predict_transform(\n n_samples=100000 #len(train_dataset_original_space)\n )\n synthetic_original_space = self.normalizer.inverse_transform(\n synthetic_transformed_space\n )\n kl_transformed_space = kl_divergence_from_data(\n train_dataset_transformed_space,\n synthetic_transformed_space,\n number_bins=number_bins,\n bin_range=[[0, 1] for _ in range(dimension)],\n dimension=dimension,\n )\n kl_list_transformed_space.append(kl_transformed_space)\n\n kl_original_space = kl_divergence_from_data(\n train_dataset_original_space,\n synthetic_original_space,\n number_bins=number_bins,\n bin_range=list(\n zip(\n train_dataset_original_space.min(axis=0),\n train_dataset_original_space.max(axis=0),\n )\n ),\n dimension=dimension,\n )\n kl_list_original_space.append(kl_original_space)\n\n if kl_original_space < best_kl_original_space:\n best_kl_original_space = kl_original_space\n best_samples_original_space = synthetic_original_space\n\n progress.set_postfix(\n kl_original_space=kl_original_space,\n kl_transformed_space=kl_transformed_space,\n refresh=False,\n )\n\n fig = plt.figure()\n fig.suptitle(f\"{self.path_to_models}, KL={best_kl_original_space}\")\n samples_idx = np.random.choice(len(best_samples_original_space), 1000)\n samples = best_samples_original_space[samples_idx]\n if dimension == 2:\n ax = fig.add_subplot()\n ax.scatter(samples[:, 0], samples[:, 1])\n elif dimension == 3:\n ax = fig.add_subplot(projection='3d')\n ax.scatter(samples[:, 0], samples[:, 1], samples[:, 2])\n ax.view_init(elev=10., azim=20)\n else:\n raise ValueError\n plt.savefig(f\"{self.path_to_models}/scatterplot_best_samples_original_space.png\")\n \n kl_results = pd.DataFrame(\n {\n \"iteration\": it_list,\n \"kl_transformed_space\": np.array(kl_list_transformed_space).astype(float),\n \"kl_original_space\": np.array(kl_list_original_space).astype(float),\n }\n )\n kl_results = kl_results.sort_values(by=[\"iteration\"])\n kl_results.to_csv(f\"{self.path_to_models}/kl_results.csv\", index=False)\n return kl_results" }, { "identifier": "CustomDataset", "path": "qugen/main/data/helper.py", "snippet": "class CustomDataset:\n def __init__(self,data):\n self._index_in_epoch = 0\n self._epochs_completed = 0\n self._data = data\n self._num_examples = data.shape[0]\n pass\n\n\n @property\n def data(self):\n return self._data\n\n def next_batch(self,batch_size,shuffle = True):\n start = self._index_in_epoch\n if start == 0 and self._epochs_completed == 0:\n idx = np.arange(0, self._num_examples) # get all possible indexes\n np.random.shuffle(idx) # shuffle indexe\n self._data = self.data[idx] # get list of `num` random samples\n\n # go to the next batch\n if start + batch_size > self._num_examples:\n self._epochs_completed += 1\n rest_num_examples = self._num_examples - start\n data_rest_part = self.data[start:self._num_examples]\n idx0 = np.arange(0, self._num_examples) # get all possible indexes\n np.random.shuffle(idx0) # shuffle indexes\n self._data = self.data[idx0] # get list of `num` random samples\n \n start = 0\n self._index_in_epoch = batch_size - rest_num_examples #avoid the case where the #sample != integar times of batch_size\n end = self._index_in_epoch \n data_new_part = self._data[start:end] \n return np.concatenate((data_rest_part, data_new_part), axis=0)\n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self._data[start:end]" }, { "identifier": "PITNormalizer", "path": "qugen/main/data/data_handler.py", "snippet": "class PITNormalizer():\n def __init__(self, reverse_lookup = None, epsilon = 0):\n self.reverse_lookup = reverse_lookup\n self.epsilon = epsilon\n\n def fit_transform(self, data: np.ndarray) -> np.ndarray:\n df = pd.DataFrame(data)\n epit = df.copy(deep=True).transpose()\n reverse_epit_lookup = epit.copy(deep=True)\n\n epit.values[::] = [emp_integral_trans(row) for row in epit.values]\n epit = epit.transpose()\n reverse_epit_lookup.values[::] = [np.sort(row) for row in reverse_epit_lookup.values]\n\n df = epit.copy()\n self.reverse_lookup = reverse_epit_lookup.values\n self.reverse_lookup = jnp.array(self.reverse_lookup)\n return df.values / (1 + self.epsilon)\n\n def transform(self, data: np.ndarray) -> np.ndarray:\n df = pd.DataFrame(data)\n epit = df.copy(deep=True).transpose()\n reverse_epit_lookup = epit.copy(deep=True)\n\n epit.values[::] = [emp_integral_trans(row) for row in epit.values]\n epit = epit.transpose()\n reverse_epit_lookup.values[::] = [np.sort(row) for row in reverse_epit_lookup.values]\n\n df = epit.copy()\n return df.values / (1 + self.epsilon)\n\n def _reverse_emp_integral_trans_single(self, values: jnp.ndarray) -> List[float]:\n # assumes non ragged array\n values = values * (jnp.shape(self.reverse_lookup)[1] - 1)\n rows = jnp.shape(self.reverse_lookup)[0]\n # if we are an integer do not use linear interpolation\n valuesL = jnp.floor(values).astype(int)\n valuesH = jnp.ceil(values).astype(int)\n # if we are an integer then floor and ceiling are the same\n isIntMask = 1 - (valuesH - valuesL)\n rowIndexer = jnp.arange(rows)\n resultL = self.reverse_lookup[([rowIndexer], [valuesL])] # doing 2d lookup as [[index1.row, index2.row],[index1.column, index2.column]]\n resultH = self.reverse_lookup[([rowIndexer], [valuesH])] # where 2d index tuple would be (index1.row, index1.column)\n # lookup int or do linear interpolation\n return resultL * (isIntMask + values - valuesL) + resultH * (valuesH - values) \n\n @partial(jax.jit, static_argnums=(0,))\n def inverse_transform(self, data: np.ndarray) -> np.ndarray:\n data = data * (1 + self.epsilon)\n res = jax.vmap(self._reverse_emp_integral_trans_single)(data)\n # res = [self._reverse_emp_integral_trans_single(row) for row in data]\n return res[:, 0, :]" }, { "identifier": "MinMaxNormalizer", "path": "qugen/main/data/data_handler.py", "snippet": "class MinMaxNormalizer:\n def __init__(self, reverse_lookup = None, epsilon = 0):\n self.reverse_lookup = reverse_lookup\n self.epsilon = epsilon\n\n def fit_transform(self, data: np.ndarray) -> np.ndarray:\n self.min = data.min()\n self.max = data.max() - data.min()\n data = (data - self.min) / self.max\n self.reverse_lookup = (self.min, self.max)\n return data / (1 + self.epsilon)\n\n def transform(self, data: np.ndarray) -> np.ndarray:\n min = data.min()\n max = data.max() - data.min()\n data = (data - min) / max\n return data / (1 + self.epsilon)\n\n def inverse_transform(self, data: np.ndarray) -> np.ndarray:\n data = data * (1 + self.epsilon)\n self.min, self.max = self.reverse_lookup\n return data * self.max + self.min" }, { "identifier": "kl_divergence", "path": "qugen/main/data/helper.py", "snippet": "def kl_divergence(p, q):\n eps = 1e-6\n cost = jnp.sum( p * jnp.log((p+eps)/(q+eps)) )\n return cost" }, { "identifier": "Discriminator_JAX", "path": "qugen/main/discriminator/discriminator.py", "snippet": "class Discriminator_JAX(nn_jax.Module):\n @nn_jax.compact\n def __call__(self, x):\n x = nn_jax.Dense(\n 2 * x.shape[1],\n kernel_init=nn_jax.initializers.variance_scaling(\n scale=10, mode=\"fan_avg\", distribution=\"uniform\"\n ),\n )(x)\n x = nn_jax.leaky_relu(x)\n x = nn_jax.Dense(\n 1,\n kernel_init=nn_jax.initializers.variance_scaling(\n scale=10, mode=\"fan_avg\", distribution=\"uniform\"\n ),\n )(x)\n x = nn_jax.leaky_relu(x)\n return nn_jax.sigmoid(x)" }, { "identifier": "compute_discretization", "path": "qugen/main/data/discretization.py", "snippet": "def compute_discretization(n_qubits, n_registered):\n format_string = \"{:0\" + str(n_qubits) + \"b}\"\n n = 2 ** (n_qubits // n_registered)\n dict_bins = {}\n for k, coordinates in enumerate(product(range(n), repeat=n_registered)):\n dict_bins.update({\n format_string.format(k): [coordinates, center(coordinates, n)]\n })\n return dict_bins" } ]
from pathlib import Path from itertools import chain from typing import Optional from tqdm import tqdm from qugen.main.generator.quantum_circuits.discrete_generator_pennylane import compute_gradient_JAX from qugen.main.generator.base_model_handler import BaseModelHandler from qugen.main.data.helper import CustomDataset from qugen.main.data.data_handler import PITNormalizer, MinMaxNormalizer from qugen.main.data.helper import kl_divergence from qugen.main.discriminator.discriminator import Discriminator_JAX from qugen.main.data.discretization import compute_discretization from jax.config import config from qugen.main.generator.quantum_circuits.discrete_generator_pennylane \ import discrete_copula_circuit_JAX as get_generator from qugen.main.generator.quantum_circuits.discrete_generator_pennylane \ import discrete_standard_circuit_JAX as get_generator from qugen.main.generator.quantum_circuits.discrete_generator_pennylane import \ discrete_copula_circuit_JAX as get_generator from qugen.main.generator.quantum_circuits.discrete_generator_pennylane import \ discrete_standard_circuit_JAX as get_generator import json import time import hashlib import os import warnings import jax import jax.numpy as jnp import numpy as np import optax import pickle import matplotlib.pyplot as plt import matplotlib as mpl
6,167
train_dataset = self.normalizer.fit_transform(train_dataset) self.reverse_lookup = self.normalizer.reverse_lookup if self.save_artifacts: with open(self.path_to_models + "/" + "meta.json", "w+") as file: json.dump(self.metadata, file) jnp.save(self.path_to_models + "/" + 'reverse_lookup.npy', self.reverse_lookup) self.dict_bins = compute_discretization(self.n_qubits, self.n_registers) n = 2 ** (self.n_qubits // self.n_registers) nns = tuple(n for _ in range(self.n_registers)) nns_nq = nns + tuple((self.n_qubits,)) inverse_bins = np.zeros(nns_nq) for key, value in self.dict_bins.items(): id_n = value[0] inverse_bins[id_n] = jnp.array([int(bit) for bit in key]) coordinates = np.floor(train_dataset * n).astype(int) train_dataset = [ inverse_bins[tuple([xy[ii] for ii in range(self.n_registers)])] for xy in coordinates ] train_dataset = jnp.array(train_dataset).astype(jnp.float32) distribution_pit = np.zeros(nns) for xy in coordinates: indices = tuple(xy[ii] for ii in range(self.n_registers)) distribution_pit[indices] += 1 distribution_pit /= np.sum(distribution_pit) distribution_pit = jnp.array(distribution_pit) optimizer_discriminator = optax.adam( learning_rate=initial_learning_rate_discriminator, b1=self.beta_1, b2=0.999, ) optimizer_state_d = optimizer_discriminator.init(self.discriminator_weights) optimizer_generator = optax.sgd(learning_rate=initial_learning_rate_generator) self.random_key, subkey = jax.random.split(self.random_key) optimizer_state_g = optimizer_generator.init(self.generator_weights) kl_list_transformed_space = [] it_list = [] # create shifts in advance, leads to less code at application elementary_shift = 1 shifts = [ [elementary_shift * e_i, -elementary_shift * e_i] for e_i in jnp.eye(self.generator_weights.size) ] shifts = list(chain(*shifts)) shifts = [shift.reshape(self.generator_weights.shape) for shift in shifts] parameters = [] epsilon = 1e-10 X_train = CustomDataset(train_dataset.astype("float32")) def cost_fn_discriminator(X, generator_weights, discriminator_weights): self.random_key, subkey = jax.random.split(self.random_key) G_samples = self.generator( subkey, generator_weights, n_shots=len(X), ) D_fake = self.D.apply(discriminator_weights, G_samples) D_real = self.D.apply(discriminator_weights, X) loss_1 = -jnp.mean(jnp.log(D_real + epsilon)) loss_2 = -jnp.mean(jnp.log(1.0 - D_fake + epsilon)) D_loss = loss_1 + loss_2 return D_loss def cost_fn_generator(X, generator_weights, discriminator_weights): self.random_key, subkey = jax.random.split(self.random_key) G_samples = self.generator( subkey, weights=generator_weights, n_shots=len(X), ) D_fake = self.D.apply(discriminator_weights, G_samples) G_loss = -jnp.mean(jnp.log(D_fake + epsilon)) # Vanilla GAN return G_loss progress = tqdm(range(n_epochs), mininterval=10 if self.slower_progress_update else None) for it in progress: if self.save_artifacts: self.save( f"{self.path_to_models}/parameters_training_iteration={it + self.previous_trained_epochs }.pickle", overwrite=False, ) data = X_train.next_batch(self.batch_size) discriminator_training_steps = 1 # How many times is the discriminator updates per generator update for _ in range(discriminator_training_steps): cost_discriminator, grad_d = jax.value_and_grad( lambda w: cost_fn_discriminator(data, self.generator_weights, w) )(self.discriminator_weights) updates, optimizer_state_d = optimizer_discriminator.update( grad_d, optimizer_state_d ) self.discriminator_weights = optax.apply_updates( self.discriminator_weights, updates ) # This is the method using the old manual gradient cost_generator = cost_fn_generator( data, self.generator_weights, self.discriminator_weights ) self.random_key, *subkeys = jax.random.split(self.random_key, num=len(shifts) + 1) G_samples = [ self.generator( subkey, self.generator_weights + parameter_shift, n_shots=self.batch_size, ) for subkey, parameter_shift in zip(subkeys, shifts) ]
# Copyright 2023 QUTAC, BASF Digital Solutions GmbH, BMW Group, # Lufthansa Industry Solutions AS GmbH, Merck KGaA (Darmstadt, Germany), # Munich Re, SAP SE. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at #     http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. config.update("jax_enable_x64", True) mpl.use("Agg") class DiscreteQGANModelHandler(BaseModelHandler): def __init__(self): """Initialize the parameters specific to this model handler by assigning defaults to all attributes which should immediately be available across all methods.""" super().__init__() self.n_qubits = None self.n_registers = None self.circuit_depth = None self.weights = None self.generator = None self.num_generator_params = None self.circuit = None self.n_epochs = None self.generator_weights = None self.discriminator_weights = None self.random_key = None self.reverse_lookup = None self.save_artifacts = None self.slower_progress_update = None self.normalizer = None def build( self, model_name: str, data_set_name: str, n_qubits=8, n_registers=2, circuit_depth=1, random_seed=42, transformation="pit", circuit_type="copula", save_artifacts=True, slower_progress_update=False, ) -> BaseModelHandler: """Build the discrete QGAN model. This defines the architecture of the model, including the circuit ansatz, data transformation and whether the artifacts are saved. Args: model_name (str): The name which will be used to save the data to disk. data_set_name (str): The name of the data set which is set as part of the model name n_qubits (int, optional): Number of qubits. Defaults to 8. n_registers (int): Number of dimensions of the data. circuit_depth (int, optional): Number of repetitions of qml.StronglyEntanglingLayers. Defaults to 1. random_seed (int, optional): Random seed for reproducibility. Defaults to 42. transformation (str, optional): Type of normalization, either "minmax" or "pit". Defaults to "pit". circuit_type (string, optional): name of the circuit anstaz to be used for the QGAN, either "copula" or "standard". Defaults to "copula" save_artifacts (bool, optional): Whether to save the artifacts to disk. Defaults to True. slower_progress_update (bool, optional): Controls how often the progress bar is updated. If set to True, update every 10 seconds at most, otherwise use tqdm defaults. Defaults to False. Returns: BaseModelHandler: Return the built model handler. It is not strictly necessary to overwrite the existing variable with this since all changes are made in place. """ self.slower_progress_update = slower_progress_update self.n_qubits = n_qubits self.n_registers = n_registers self.circuit_depth = circuit_depth self.data_set_name = data_set_name self.transformation = transformation self.circuit_type = circuit_type self.performed_trainings = 0 self.save_artifacts = save_artifacts time_str = str(time.time()).encode('utf-8') uniq = hashlib.md5(time_str).hexdigest()[:4] self.model_name = model_name + '_' + self.data_set_name + '_' + self.circuit_type + '_' + self.transformation+ '_' + 'qgan_' + uniq self.device = 'cpu' self.beta_1 = 0.5 self.real_label = 1. self.fake_label = 0. self.n_samples = 10000 self.path_to_models = "experiments/" + self.model_name self.metadata = dict({ 'model_name': self.model_name, 'n_qubits': self.n_qubits, 'n_registers': self.n_registers, 'circuit_type': self.circuit_type, 'circuit_depth': self.circuit_depth, 'transformation': self.transformation, 'data_set ': self.data_set_name, 'n_epochs': self.n_epochs, 'discriminator': 'digital', "training_data": {}, }) # save artifacts only when save_artifacts flag is true, used for testing if save_artifacts: # create experiments folder os.makedirs('experiments/' + self.model_name) print('model_name', self.model_name) with open( self.path_to_models + "/" + "meta.json", "w" ) as fp: json.dump(self.metadata, fp) # jax specific self.random_key = jax.random.PRNGKey(random_seed) self.D = Discriminator_JAX() self.D.apply = jax.jit(self.D.apply) self.random_key, subkey1, subkey2 = jax.random.split(self.random_key, num=3) self.discriminator_weights = self.D.init( subkey2, jax.random.uniform( subkey1, ( 1, self.n_qubits, ), ), ) # Use dummy input for init if self.transformation == 'minmax': self.normalizer = MinMaxNormalizer(epsilon=1e-6) elif self.transformation == 'pit': self.normalizer = PITNormalizer(epsilon=1e-6) else: raise ValueError("Transformation value must be either 'minmax' or 'pit'") if self.circuit_type == 'copula': elif self.circuit_type == 'standard': else: raise ValueError("Circuit value must be either 'standard' or 'copula'") self.generator, self.num_generator_params = get_generator(self.n_qubits, self.n_registers, self.circuit_depth) self.random_key, subkey = jax.random.split(self.random_key) # Draw from interval [0, pi) because that is how it was before self.generator_weights = jax.random.uniform(subkey, shape=(self.num_generator_params,)) * jnp.pi print(f"{self.num_generator_params=}") def save(self, file_path: Path, overwrite: bool = True) -> BaseModelHandler: """Save the generator and discriminator weights to disk. Args: file_path (Path): The paths where the pickled tuple of generator and discriminator weights will be placed. overwrite (bool, optional): Whether to overwrite the file if it already exists. Defaults to True. Returns: BaseModelHandler: The model, unchanged. """ if overwrite or not os.path.exists(file_path): with open(file_path, "wb") as file: pickle.dump((self.generator_weights, self.discriminator_weights), file) return self def reload( self, model_name: str, epoch: int, random_seed: Optional[int] = None ) -> BaseModelHandler: """Reload the model from the artifacts including the parameters for the generator and the discriminator, the metadata and the data transformation file (reverse lookup table or original min and max of the training data). Args: model_name (str): The name of the model to reload. epoch (int): The epoch to reload. random_seed (int, Optional): Specify a random seed for reproducibility. Returns: BaseModelHandler: The reloaded model, but changes have been made in place as well. """ self.model_name = model_name self.path_to_models = "experiments/" + self.model_name weights_file = "experiments/" + model_name + "/" + "parameters_training_iteration={0}.pickle".format(str(epoch)) meta_file = "experiments/"+ model_name + "/" + "meta.json" reverse_file = "experiments/" + model_name + "/" + 'reverse_lookup.npy' with open(weights_file, "rb") as file: self.generator_weights, self.discriminator_weights = pickle.load(file) with open(meta_file, 'r') as f: self.metadata = json.load(f) self.reverse_lookup = jnp.load(reverse_file) self.n_qubits = self.metadata["n_qubits"] self.transformation = self.metadata["transformation"] self.circuit_depth = self.metadata["circuit_depth"] self.performed_trainings = len(self.metadata["training_data"]) self.n_registers = self.metadata['n_registers'] self.circuit_type = self.metadata['circuit_type'] if random_seed is None: if self.random_key is None: self.random_key = jax.random.PRNGKey(2) else: if self.random_key is not None: warnings.warn( "Random state already initialized in the model handler, but a random_seed was specified when reloading. " "Re-initializing with the random_seed." ) self.random_key = jax.random.PRNGKey(random_seed) if self.normalizer is None: if self.transformation == 'minmax': self.normalizer = MinMaxNormalizer(epsilon=1e-6) elif self.transformation == 'pit': self.normalizer = PITNormalizer(epsilon=1e-6) else: raise ValueError("Transformation value must be either 'minmax' or 'pit'") self.normalizer.reverse_lookup = self.reverse_lookup if self.generator is None: if self.circuit_type == 'copula': elif self.circuit_type == 'standard': else: raise ValueError("Circuit value must be either 'standard' or 'copula'") self.generator, self.num_generator_params = get_generator(self.n_qubits, self.n_registers, self.circuit_depth) return self def train( self, train_dataset: np.array, n_epochs: int, initial_learning_rate_generator: float, initial_learning_rate_discriminator: float, batch_size = 1000, ) -> BaseModelHandler: """Train the discrete QGAN. Args: train_dataset (np.array): The training data in the original space. n_epochs (int): Technically, we are not passing the number of passes through the training data, but the number of iterations of the training loop. initial_learning_rate_generator (float, optional): Learning rate for the quantum generator. initial_learning_rate_discriminator (float, optional): Learning rate for the classical discriminator. batch_size (int, optional): Batch size. Defaults to None, and the whole training data is used in each iteration. Raises: ValueError: Raises ValueError if the training dataset has dimension (number of columns) not equal to 2 or 3. Returns: BaseModelHandler: The trained model. """ self.batch_size = batch_size self.n_epochs = n_epochs if self.performed_trainings == 0: self.previous_trained_epochs = 0 else: self.previous_trained_epochs = sum([self.metadata["training_data"][str(i)]["n_epochs"] for i in range(self.performed_trainings)]) training_data = {} training_data["n_epochs"] = self.n_epochs training_data["batch_size"] = self.batch_size training_data["learning_rate_generator"] = initial_learning_rate_generator training_data["learning_rate_discriminator"] = initial_learning_rate_discriminator self.metadata["training_data"][str(self.performed_trainings)] = training_data self.performed_trainings += 1 train_dataset = self.normalizer.fit_transform(train_dataset) self.reverse_lookup = self.normalizer.reverse_lookup if self.save_artifacts: with open(self.path_to_models + "/" + "meta.json", "w+") as file: json.dump(self.metadata, file) jnp.save(self.path_to_models + "/" + 'reverse_lookup.npy', self.reverse_lookup) self.dict_bins = compute_discretization(self.n_qubits, self.n_registers) n = 2 ** (self.n_qubits // self.n_registers) nns = tuple(n for _ in range(self.n_registers)) nns_nq = nns + tuple((self.n_qubits,)) inverse_bins = np.zeros(nns_nq) for key, value in self.dict_bins.items(): id_n = value[0] inverse_bins[id_n] = jnp.array([int(bit) for bit in key]) coordinates = np.floor(train_dataset * n).astype(int) train_dataset = [ inverse_bins[tuple([xy[ii] for ii in range(self.n_registers)])] for xy in coordinates ] train_dataset = jnp.array(train_dataset).astype(jnp.float32) distribution_pit = np.zeros(nns) for xy in coordinates: indices = tuple(xy[ii] for ii in range(self.n_registers)) distribution_pit[indices] += 1 distribution_pit /= np.sum(distribution_pit) distribution_pit = jnp.array(distribution_pit) optimizer_discriminator = optax.adam( learning_rate=initial_learning_rate_discriminator, b1=self.beta_1, b2=0.999, ) optimizer_state_d = optimizer_discriminator.init(self.discriminator_weights) optimizer_generator = optax.sgd(learning_rate=initial_learning_rate_generator) self.random_key, subkey = jax.random.split(self.random_key) optimizer_state_g = optimizer_generator.init(self.generator_weights) kl_list_transformed_space = [] it_list = [] # create shifts in advance, leads to less code at application elementary_shift = 1 shifts = [ [elementary_shift * e_i, -elementary_shift * e_i] for e_i in jnp.eye(self.generator_weights.size) ] shifts = list(chain(*shifts)) shifts = [shift.reshape(self.generator_weights.shape) for shift in shifts] parameters = [] epsilon = 1e-10 X_train = CustomDataset(train_dataset.astype("float32")) def cost_fn_discriminator(X, generator_weights, discriminator_weights): self.random_key, subkey = jax.random.split(self.random_key) G_samples = self.generator( subkey, generator_weights, n_shots=len(X), ) D_fake = self.D.apply(discriminator_weights, G_samples) D_real = self.D.apply(discriminator_weights, X) loss_1 = -jnp.mean(jnp.log(D_real + epsilon)) loss_2 = -jnp.mean(jnp.log(1.0 - D_fake + epsilon)) D_loss = loss_1 + loss_2 return D_loss def cost_fn_generator(X, generator_weights, discriminator_weights): self.random_key, subkey = jax.random.split(self.random_key) G_samples = self.generator( subkey, weights=generator_weights, n_shots=len(X), ) D_fake = self.D.apply(discriminator_weights, G_samples) G_loss = -jnp.mean(jnp.log(D_fake + epsilon)) # Vanilla GAN return G_loss progress = tqdm(range(n_epochs), mininterval=10 if self.slower_progress_update else None) for it in progress: if self.save_artifacts: self.save( f"{self.path_to_models}/parameters_training_iteration={it + self.previous_trained_epochs }.pickle", overwrite=False, ) data = X_train.next_batch(self.batch_size) discriminator_training_steps = 1 # How many times is the discriminator updates per generator update for _ in range(discriminator_training_steps): cost_discriminator, grad_d = jax.value_and_grad( lambda w: cost_fn_discriminator(data, self.generator_weights, w) )(self.discriminator_weights) updates, optimizer_state_d = optimizer_discriminator.update( grad_d, optimizer_state_d ) self.discriminator_weights = optax.apply_updates( self.discriminator_weights, updates ) # This is the method using the old manual gradient cost_generator = cost_fn_generator( data, self.generator_weights, self.discriminator_weights ) self.random_key, *subkeys = jax.random.split(self.random_key, num=len(shifts) + 1) G_samples = [ self.generator( subkey, self.generator_weights + parameter_shift, n_shots=self.batch_size, ) for subkey, parameter_shift in zip(subkeys, shifts) ]
grad_g = compute_gradient_JAX(
0
2023-10-27 12:25:58+00:00
8k
loliverhennigh/PhantomGaze
phantomgaze/render/contour.py
[ { "identifier": "ScreenBuffer", "path": "phantomgaze/buffers.py", "snippet": "class ScreenBuffer:\n \"\"\"\n Create a screen buffer.\n The screen buffer stores fragment information for rendering.\n\n Parameters\n ----------\n height : int\n The height of the screen buffer\n width : int\n The width of the screen buffer\n \"\"\"\n\n def __init__(self, height, width):\n # Store height and width\n self.height = height\n self.width = width\n\n # Create buffers for opaque rendering\n self.opaque_pixel_buffer = cp.zeros((height, width, 3), dtype=cp.float32)\n self.depth_buffer = cp.zeros((height, width), dtype=cp.float32) + cp.inf\n self.normal_buffer = cp.zeros((height, width, 3), dtype=cp.float32)\n\n # Create buffer transparent rendering\n self.transparent_pixel_buffer = cp.zeros((height, width, 3), dtype=cp.float32)\n self.revealage_buffer = cp.ones((height, width), dtype=cp.float32)\n\n # Create buffer for background\n self.background_buffer = cp.zeros((height, width, 3), dtype=cp.float32)\n\n # Create buffer for final image\n self.image_buffer = cp.zeros((height, width, 4), dtype=cp.float32)\n\n @staticmethod\n def from_camera(camera):\n \"\"\" Create a screen buffer from a camera\n\n Parameters\n ----------\n camera : Camera\n The camera to create the screen buffer for, uses the camera's height and width\n \"\"\"\n\n # Make screen buffer\n screen_buffer = ScreenBuffer(camera.height, camera.width)\n\n # Set background\n screen_buffer.background_buffer[:, :, 0] = camera.background.color[0]\n screen_buffer.background_buffer[:, :, 1] = camera.background.color[1]\n screen_buffer.background_buffer[:, :, 2] = camera.background.color[2]\n\n return screen_buffer\n\n @property\n def image(self):\n \"\"\" Get the image buffer \"\"\"\n\n # Run the kernel\n threads_per_block = (16, 16)\n blocks_per_grid = (\n (self.width + threads_per_block[0] - 1) // threads_per_block[0],\n (self.height + threads_per_block[1] - 1) // threads_per_block[1]\n )\n _combine_buffers_kernel[blocks_per_grid, threads_per_block](\n self.opaque_pixel_buffer,\n self.depth_buffer,\n self.normal_buffer,\n self.transparent_pixel_buffer,\n self.revealage_buffer,\n self.background_buffer,\n self.image_buffer\n )\n return self.image_buffer\n\n def clear(self):\n \"\"\" Clear the screen buffer \"\"\"\n\n # Clear opaque buffers\n self.opaque_pixel_buffer.fill(0.0)\n self.depth_buffer.fill(cp.inf)\n self.normal_buffer.fill(0.0)\n\n # Clear transparent buffers\n self.transparent_pixel_buffer.fill(0.0)\n self.revealage_buffer.fill(1.0)\n\n # Clear background buffer\n self.background_buffer.fill(0.0)\n\n # Clear image buffer\n self.image_buffer.fill(0.0)" }, { "identifier": "Colormap", "path": "phantomgaze/coloring.py", "snippet": "class Colormap(Coloring):\n \"\"\"A colormap class for plots.\n\n Parameters\n ----------\n name : str\n The name of the colormap using matplotlib's naming convention.\n vmin : float\n The minimum value of the colormap.\n vmax : float\n The maximum value of the colormap.\n num_table_values : int\n The number of values in the colormap table.\n opacity : cp.array, float, optional\n The opacity array for the colormap. If None is given, then the\n colormap is opaque. If an array is given, then the colormap uses\n the array as the opacity.\n nan_color : tuple\n The color for NaN values.\n \"\"\"\n\n def __init__(\n self,\n name='jet',\n vmin=0.0,\n vmax=1.0,\n num_table_values=256,\n opacity=None,\n nan_color=(1.0, 1.0, 0.0),\n nan_opacity=1.0,\n ):\n\n \"\"\"Initialize the colormap.\"\"\"\n self.name = name\n self.vmin = vmin\n self.vmax = vmax\n self.num_table_values = num_table_values\n self.nan_color = nan_color\n self.nan_opacity = nan_opacity\n\n # Get the colormap\n self.cmap = cm.get_cmap(name, num_table_values)\n self.color_map_array = cp.array([self.cmap(i) for i in range(num_table_values)])\n\n # Set the opacity\n if (opacity is None):\n self.opaque = True\n elif isinstance(opacity, float) and opacity == 1.0:\n self.opaque = True\n elif isinstance(opacity, float) and opacity < 1.0:\n self.opaque = False\n self.color_map_array[:, 3] = opacity\n elif isinstance(opacity, (list, tuple, cp.ndarray, np.ndarray)):\n self.opaque = False\n self.color_map_array[:, 3] = cp.array(opacity)\n else:\n raise TypeError('Invalid opacity type.')" }, { "identifier": "SolidColor", "path": "phantomgaze/coloring.py", "snippet": "class SolidColor(Coloring):\n \"\"\"A coloring class for solid colors.\n TODO: Find a better abstraction for this.\n\n Parameters\n ----------\n color : tuple\n The color for the solid color.\n opacity : float\n The opacity for the solid color.\n \"\"\"\n\n def __init__(\n self,\n color=(1.0, 1.0, 1.0),\n opacity=1.0,\n ):\n self.vmin = 0.0 # Not used\n self.vmax = 1.0\n self.color_map_array = cp.array([[color[0], color[1], color[2], opacity]])\n self.nan_color = color # Not used\n self.nan_opacity = 1.0\n if opacity == 1.0:\n self.opaque = True\n else:\n self.opaque = False" }, { "identifier": "calculate_ray_direction", "path": "phantomgaze/render/camera.py", "snippet": "@cuda.jit(device=True)\ndef calculate_ray_direction(\n x,\n y,\n img_shape,\n camera_position,\n camera_focal,\n camera_up):\n \"\"\"\n Calculate the direction of a ray from the camera to the image plane.\n\n Parameters\n ----------\n x : int\n The x coordinate of the pixel.\n y : int\n The y coordinate of the pixel.\n img_shape : tuple\n The shape of the image.\n camera_position : tuple\n The position of the camera.\n camera_focal : tuple\n The focal point of the camera.\n camera_up : tuple\n The up vector of the camera.\n\n Returns\n -------\n ray_direction : tuple\n \"\"\"\n\n # Compute base vectors\n forward = (\n camera_focal[0] - camera_position[0],\n camera_focal[1] - camera_position[1],\n camera_focal[2] - camera_position[2],\n )\n forward = normalize(forward)\n right = cross(forward, camera_up)\n right = normalize(right)\n up = cross(right, forward)\n\n # Determine the center of the image\n center = (\n camera_position[0] + forward[0],\n camera_position[1] + forward[1],\n camera_position[2] + forward[2],\n )\n\n # Calculate the location on the image plane corresponding (x, y)\n aspect_ratio = img_shape[1] / img_shape[0]\n s = (x - img_shape[1] / 2) / img_shape[1]\n t = (y - img_shape[0] / 2) / img_shape[0]\n\n # Adjust for aspect ratio and field of view (assuming 90 degrees here)\n s *= aspect_ratio * math.tan(math.pi / 4.0)\n t *= math.tan(math.pi / 4.0)\n point_on_image_plane = (\n center[0] + s * right[0] + t * up[0],\n center[1] + s * right[1] + t * up[1],\n center[2] + s * right[2] + t * up[2],\n )\n\n # Calculate the ray direction\n ray_direction = (\n point_on_image_plane[0] - camera_position[0],\n point_on_image_plane[1] - camera_position[1],\n point_on_image_plane[2] - camera_position[2],\n )\n ray_direction = normalize(ray_direction)\n\n return ray_direction" }, { "identifier": "sample_array", "path": "phantomgaze/render/utils.py", "snippet": "@cuda.jit(device=True)\ndef sample_array(\n array,\n spacing,\n origin,\n position):\n \"\"\"Sample an array at a given position.\n Uses trilinear interpolation.\n\n Parameters\n ----------\n array : ndarray\n The volume data.\n spacing : tuple\n The spacing of the volume data.\n origin : tuple\n The origin of the volume data.\n position : tuple\n The position to sample.\n \"\"\"\n\n # Get the lower i, j, and k indices of the volume\n i = int((position[0] - origin[0]) / spacing[0])\n j = int((position[1] - origin[1]) / spacing[1])\n k = int((position[2] - origin[2]) / spacing[2])\n\n # Get the fractional part of the indices\n dx = (position[0] - origin[0]) / spacing[0] - i\n dy = (position[1] - origin[1]) / spacing[1] - j\n dz = (position[2] - origin[2]) / spacing[2] - k\n\n # Sample the array at the indices\n v_000 = _safe_index_array(array, i, j, k)\n v_100 = _safe_index_array(array, i + 1, j, k)\n v_010 = _safe_index_array(array, i, j + 1, k)\n v_110 = _safe_index_array(array, i + 1, j + 1, k)\n v_001 = _safe_index_array(array, i, j, k + 1)\n v_101 = _safe_index_array(array, i + 1, j, k + 1)\n v_011 = _safe_index_array(array, i, j + 1, k + 1)\n v_111 = _safe_index_array(array, i + 1, j + 1, k + 1)\n\n # Perform trilinear interpolation\n return _trilinear_interpolation(\n v_000,\n v_100,\n v_010,\n v_110,\n v_001,\n v_101,\n v_011,\n v_111,\n dx,\n dy,\n dz)" }, { "identifier": "sample_array_derivative", "path": "phantomgaze/render/utils.py", "snippet": "@cuda.jit(device=True)\ndef sample_array_derivative(\n array,\n spacing,\n origin,\n position):\n \"\"\"Compute the derivative of an array at a given position.\n\n Parameters\n ----------\n array : ndarray\n The volume data.\n spacing : tuple\n The spacing of the volume data.\n origin : tuple\n The origin of the volume data.\n position : tuple\n The position to sample.\n \"\"\"\n\n # Move the position by a small amount\n value_0_1_1 = sample_array(array, spacing, origin, (position[0] - spacing[0]/2.0, position[1], position[2]))\n value_1_0_1 = sample_array(array, spacing, origin, (position[0], position[1] - spacing[1]/2.0, position[2]))\n value_1_1_0 = sample_array(array, spacing, origin, (position[0], position[1], position[2] - spacing[2]/2.0))\n value_2_1_1 = sample_array(array, spacing, origin, (position[0] + spacing[0]/2.0, position[1], position[2]))\n value_1_2_1 = sample_array(array, spacing, origin, (position[0], position[1] + spacing[1]/2.0, position[2]))\n value_1_1_2 = sample_array(array, spacing, origin, (position[0], position[1], position[2] + spacing[2]/2.0))\n\n # Compute the derivative\n array_dx = (value_2_1_1 - value_0_1_1) / spacing[0]\n array_dy = (value_1_2_1 - value_1_0_1) / spacing[1]\n array_dz = (value_1_1_2 - value_1_1_0) / spacing[2]\n\n # Return the derivative\n return (array_dx, array_dy, array_dz)" }, { "identifier": "ray_intersect_box", "path": "phantomgaze/render/utils.py", "snippet": "@cuda.jit(device=True)\ndef ray_intersect_box(\n box_origin,\n box_upper,\n ray_origin,\n ray_direction):\n \"\"\"Compute the intersection of a ray with a box.\n\n Parameters\n ----------\n box_origin : tuple\n The origin of the box\n box_upper : tuple\n The upper bounds of the box.\n ray_origin : tuple\n The origin of the ray.\n ray_direction : tuple\n The direction of the ray.\n \"\"\"\n\n # Get tmix and tmax\n tmin_x = (box_origin[0] - ray_origin[0]) / ray_direction[0]\n tmax_x = (box_upper[0] - ray_origin[0]) / ray_direction[0]\n tmin_y = (box_origin[1] - ray_origin[1]) / ray_direction[1]\n tmax_y = (box_upper[1] - ray_origin[1]) / ray_direction[1]\n tmin_z = (box_origin[2] - ray_origin[2]) / ray_direction[2]\n tmax_z = (box_upper[2] - ray_origin[2]) / ray_direction[2]\n\n # Get tmin and tmax\n tmmin_x = min(tmin_x, tmax_x)\n tmmax_x = max(tmin_x, tmax_x)\n tmmin_y = min(tmin_y, tmax_y)\n tmmax_y = max(tmin_y, tmax_y)\n tmmin_z = min(tmin_z, tmax_z)\n tmmax_z = max(tmin_z, tmax_z)\n\n # Get t0 and t1\n t0 = max(0.0, max(tmmin_x, max(tmmin_y, tmmin_z)))\n t1 = min(tmmax_x, min(tmmax_y, tmmax_z))\n\n # Return the intersection\n return t0, t1" }, { "identifier": "normalize", "path": "phantomgaze/utils/math.py", "snippet": "@cuda.jit(device=True)\ndef normalize(vector):\n \"\"\"Normalize a vector.\n\n Parameters\n ----------\n vector : tuple\n The vector to normalize.\n\n Returns\n -------\n tuple\n The normalized vector.\n \"\"\"\n\n # Get the length of the vector\n length = (vector[0] ** 2 + vector[1] ** 2 + vector[2] ** 2) ** 0.5\n\n # Normalize the vector\n return vector[0] / length, vector[1] / length, vector[2] / length" }, { "identifier": "dot", "path": "phantomgaze/utils/math.py", "snippet": "@cuda.jit(device=True)\ndef dot(vector1, vector2):\n \"\"\"Compute the dot product of two vectors.\n\n Parameters\n ----------\n vector1 : tuple\n The first vector.\n vector2 : tuple\n The second vector.\n\n Returns\n -------\n float\n The dot product of the two vectors.\n \"\"\"\n\n # Compute the dot product\n if len(vector1) == 2:\n return vector1[0] * vector2[0] + vector1[1] * vector2[1]\n else:\n return vector1[0] * vector2[0] + vector1[1] * vector2[1] + vector1[2] * vector2[2]" }, { "identifier": "cross", "path": "phantomgaze/utils/math.py", "snippet": "@cuda.jit(device=True)\ndef cross(vector1, vector2):\n \"\"\"Compute the cross product of two vectors.\n\n Parameters\n ----------\n vector1 : tuple\n The first vector.\n vector2 : tuple\n The second vector.\n\n Returns\n -------\n tuple\n The cross product of the two vectors.\n \"\"\"\n\n # Compute the cross product\n return (vector1[1] * vector2[2] - vector1[2] * vector2[1],\n vector1[2] * vector2[0] - vector1[0] * vector2[2],\n vector1[0] * vector2[1] - vector1[1] * vector2[0])" }, { "identifier": "scalar_to_color", "path": "phantomgaze/render/color.py", "snippet": "@cuda.jit\ndef scalar_to_color(value, color_map_array, vmin, vmax):\n \"\"\"Convert a scalar value to a color.\n\n Parameters\n ----------\n value : float\n The scalar value to convert.\n color_map_array : ndarray\n The color map array.\n vmin : float\n The minimum value of the scalar range.\n vmax : float\n The maximum value of the scalar range.\n \"\"\"\n\n # Bound the value\n value = min(max(value, vmin), vmax)\n\n # Get the index\n index = int((value - vmin) / (vmax - vmin) * (color_map_array.shape[0] - 1))\n\n # Set the color\n color = (\n color_map_array[index, 0],\n color_map_array[index, 1],\n color_map_array[index, 2],\n color_map_array[index, 3],\n )\n return color" } ]
import cupy as cp import numba from numba import cuda from phantomgaze import ScreenBuffer from phantomgaze import Colormap, SolidColor from phantomgaze.render.camera import calculate_ray_direction from phantomgaze.render.utils import sample_array, sample_array_derivative, ray_intersect_box from phantomgaze.utils.math import normalize, dot, cross from phantomgaze.render.color import scalar_to_color
5,522
The volume data. spacing : tuple The spacing of the volume data. origin : tuple The origin of the volume data. camera_position : tuple The position of the camera. camera_focal : tuple The focal point of the camera. camera_up : tuple The up vector of the camera. max_depth : float The maximum depth, used for Weighted Blended Order-Independent Transparency. threshold : float The threshold to use for the contour. color_array : ndarray The color data. color_map_array : ndarray The color map array. vmin : float The minimum value of the scalar range. vmax : float The maximum value of the scalar range. nan_color : tuple The color to use for NaN values. nan_opacity : float The opacity to use for NaN values. opaque : bool Whether the geometry is opaque or not. opaque_pixel_buffer : ndarray The opaque pixel buffer. depth_buffer : ndarray The depth buffer. normal_buffer : ndarray The normal buffer. transparent_pixel_buffer : ndarray The transparent pixel buffer. revealage_buffer : ndarray The reveal buffer. """ # Get the x and y indices x, y = cuda.grid(2) # Make sure the indices are in bounds if x >= opaque_pixel_buffer.shape[1] or y >= opaque_pixel_buffer.shape[0]: return # Get ray direction ray_direction = calculate_ray_direction( x, y, opaque_pixel_buffer.shape, camera_position, camera_focal, camera_up) # Get volume upper bound volume_upper = ( origin[0] + spacing[0] * volume_array.shape[0], origin[1] + spacing[1] * volume_array.shape[1], origin[2] + spacing[2] * volume_array.shape[2] ) # Get the intersection of the ray with the volume t0, t1 = ray_intersect_box( origin, volume_upper, camera_position, ray_direction) # If there is no intersection, return if t0 > t1: return # Get the starting point of the ray ray_pos = ( camera_position[0] + t0 * ray_direction[0], camera_position[1] + t0 * ray_direction[1], camera_position[2] + t0 * ray_direction[2] ) # Get the step size step_size = min(spacing[0], min(spacing[1], spacing[2])) # Set starting value to lowest possible value value = sample_array(volume_array, spacing, origin, ray_pos) # Inside-outside stored in the sign sign = 1 if value > threshold else -1 # Start the ray marching distance = t0 for step in range(int((t1 - t0) / step_size)): # Check if distance is greater then current depth if (distance > depth_buffer[y, x]): return # Get next step position next_ray_pos = ( ray_pos[0] + step_size * ray_direction[0], ray_pos[1] + step_size * ray_direction[1], ray_pos[2] + step_size * ray_direction[2] ) # Get the value in the next step next_value = sample_array(volume_array, spacing, origin, next_ray_pos) # If contour is crossed, set the color and depth if (next_value - threshold) * sign < 0: # Update the sign sign = -sign # Linearly interpolate the position t = (threshold - value) / (next_value - value) pos_contour = ( ray_pos[0] + t * step_size * ray_direction[0], ray_pos[1] + t * step_size * ray_direction[1], ray_pos[2] + t * step_size * ray_direction[2] ) # Get gradient gradient = sample_array_derivative( volume_array, spacing, origin, pos_contour) gradient = normalize(gradient) # Calculate intensity
# Render functions for rendering a contour of a volume. @cuda.jit def contour_kernel( volume_array, spacing, origin, camera_position, camera_focal, camera_up, max_depth, threshold, color_array, color_map_array, vmin, vmax, nan_color, nan_opacity, opaque, opaque_pixel_buffer, depth_buffer, normal_buffer, transparent_pixel_buffer, revealage_buffer): """Kernel for rendering a contour of a volume. Parameters ---------- volume_array : ndarray The volume data. spacing : tuple The spacing of the volume data. origin : tuple The origin of the volume data. camera_position : tuple The position of the camera. camera_focal : tuple The focal point of the camera. camera_up : tuple The up vector of the camera. max_depth : float The maximum depth, used for Weighted Blended Order-Independent Transparency. threshold : float The threshold to use for the contour. color_array : ndarray The color data. color_map_array : ndarray The color map array. vmin : float The minimum value of the scalar range. vmax : float The maximum value of the scalar range. nan_color : tuple The color to use for NaN values. nan_opacity : float The opacity to use for NaN values. opaque : bool Whether the geometry is opaque or not. opaque_pixel_buffer : ndarray The opaque pixel buffer. depth_buffer : ndarray The depth buffer. normal_buffer : ndarray The normal buffer. transparent_pixel_buffer : ndarray The transparent pixel buffer. revealage_buffer : ndarray The reveal buffer. """ # Get the x and y indices x, y = cuda.grid(2) # Make sure the indices are in bounds if x >= opaque_pixel_buffer.shape[1] or y >= opaque_pixel_buffer.shape[0]: return # Get ray direction ray_direction = calculate_ray_direction( x, y, opaque_pixel_buffer.shape, camera_position, camera_focal, camera_up) # Get volume upper bound volume_upper = ( origin[0] + spacing[0] * volume_array.shape[0], origin[1] + spacing[1] * volume_array.shape[1], origin[2] + spacing[2] * volume_array.shape[2] ) # Get the intersection of the ray with the volume t0, t1 = ray_intersect_box( origin, volume_upper, camera_position, ray_direction) # If there is no intersection, return if t0 > t1: return # Get the starting point of the ray ray_pos = ( camera_position[0] + t0 * ray_direction[0], camera_position[1] + t0 * ray_direction[1], camera_position[2] + t0 * ray_direction[2] ) # Get the step size step_size = min(spacing[0], min(spacing[1], spacing[2])) # Set starting value to lowest possible value value = sample_array(volume_array, spacing, origin, ray_pos) # Inside-outside stored in the sign sign = 1 if value > threshold else -1 # Start the ray marching distance = t0 for step in range(int((t1 - t0) / step_size)): # Check if distance is greater then current depth if (distance > depth_buffer[y, x]): return # Get next step position next_ray_pos = ( ray_pos[0] + step_size * ray_direction[0], ray_pos[1] + step_size * ray_direction[1], ray_pos[2] + step_size * ray_direction[2] ) # Get the value in the next step next_value = sample_array(volume_array, spacing, origin, next_ray_pos) # If contour is crossed, set the color and depth if (next_value - threshold) * sign < 0: # Update the sign sign = -sign # Linearly interpolate the position t = (threshold - value) / (next_value - value) pos_contour = ( ray_pos[0] + t * step_size * ray_direction[0], ray_pos[1] + t * step_size * ray_direction[1], ray_pos[2] + t * step_size * ray_direction[2] ) # Get gradient gradient = sample_array_derivative( volume_array, spacing, origin, pos_contour) gradient = normalize(gradient) # Calculate intensity
intensity = dot(gradient, ray_direction)
8
2023-10-26 23:53:16+00:00
8k
vTuanpham/Large_dataset_translator
examples/OpenOrca/OpenOrca_Parser.py
[ { "identifier": "BaseConfig", "path": "configs/base_config.py", "snippet": "class BaseConfig(Config):\r\n \"\"\"\r\n A single training/test example for base config.\r\n \"\"\"\r\n system_prompt: str\r\n\r\n question_text: str\r\n\r\n orig_answer_texts: str = None\r\n answer_lengths: int = None\r\n\r\n def __post_init__(self) -> None:\r\n # Post validate\r\n self.answer_lengths = len(self.orig_answer_texts) if self.orig_answer_texts is not None else None\r\n\r\n @property\r\n def __repr__(self) -> str:\r\n s = \"\"\r\n s += f\"\\n Question id: {self.qas_id}\"\r\n s += f\"\\n System prompt: {self.system_prompt}\"\r\n s += f\"\\n Question: {self.question_text}\"\r\n if self.orig_answer_texts:\r\n s += f\"\\n Answer text: {self.orig_answer_texts}\"\r\n s += f\"\\n Answer length: {self.answer_lengths}\"\r\n\r\n return s\r\n\r\n @property\r\n def get_dict(self) -> Dict:\r\n return asdict(self)\r\n\r\n @classmethod\r\n def get_keys(cls) -> List[str]:\r\n all_fields = fields(cls)\r\n return [v.name for v in all_fields]\r\n\r\n def get_example(self,\r\n inputs_column: str=\"prompt\",\r\n targets_column: str=\"target\",\r\n system_prefix: str=\"@@@@@@@@@@@@@ System prompt:\",\r\n question_prefix: str=\"####### Instruction:\",\r\n response_prefix: str=\"%%%%%%% Response:\",\r\n is_training: bool=True,\r\n do_perplexity_eval: bool=False,\r\n do_generative_eval: bool=False,\r\n task_type: str=None,\r\n ) -> Dict:\r\n assert task_type, \"Please specified the task type inorder to get the example\"\r\n\r\n system_msg = ' ' + system_prefix + '\\n' + self.system_prompt + \"\\n\\n\"\r\n question_msg = question_prefix + '\\n' + self.question_text + \"\\n\\n\"\r\n prompt = system_msg + ' ' + question_msg\r\n label = self.orig_answer_texts + \"\\n\"\r\n\r\n if task_type == \"SEQ_2_SEQ_LM\":\r\n return {inputs_column: prompt,\r\n targets_column: label}\r\n elif task_type == \"CAUSAL_LM\":\r\n if is_training:\r\n return {inputs_column: prompt + ' ' + response_prefix + '\\n' + label}\r\n\r\n example_dict = {}\r\n # The perplexity field is for perplexity evaluation, which needed the full prompt and label\r\n # while the inputs_column only have prompt and response_prefix for model.generate evaluation\r\n if do_generative_eval:\r\n example_dict[inputs_column] = prompt + ' ' + response_prefix + '\\n'\r\n example_dict[targets_column] = label\r\n\r\n if do_perplexity_eval:\r\n example_dict[\"perplexity\"] = prompt + ' ' + response_prefix + '\\n' + label\r\n\r\n if not bool(example_dict):\r\n raise \"Evaluation files is provided but don't know what to do with them...\"\r\n\r\n return example_dict\r\n else:\r\n raise f\"This task type {task_type} is not support\"\r" }, { "identifier": "DataParser", "path": "translator/data_parser.py", "snippet": "class DataParser(metaclass=ForceBaseCallMeta):\r\n def __init__(self, file_path: str,\r\n output_dir: str,\r\n parser_name: str,\r\n target_fields: List[str],\r\n target_config: Union[BaseConfig, QAConfig, DialogsConfig],\r\n do_translate: bool = False,\r\n enable_sub_task_thread: bool = True, # Enable splitting a large list into sublist if a list of one example is too large to process\r\n # This argument go with max_list_length_per_thread\r\n no_translated_code: bool = False,\r\n max_example_per_thread: int = 400, # How many examples, each thread can contain\r\n large_chunks_threshold: int = 20000, # Maximum number of examples that will be distributed evenly across threads, any examples exceed this threshold will be process in queue\r\n max_list_length_per_thread: int = 3, # Maximum number of strings contain in a list in a single thread.\r\n # if larger, split the list into sub-list and process in parallel\r\n translator: Provider = GoogleProvider,\r\n source_lang: str = \"en\",\r\n target_lang: str = \"vi\",\r\n fail_translation_code: str=\"P1OP1_F\" # Fail code for *expected* fail translation and can be removed\r\n # post-translation\r\n ) -> None:\r\n\r\n self.data_read = None\r\n self.converted_data = None\r\n self.file_path = file_path\r\n self.output_dir = output_dir\r\n assert os.path.isdir(self.output_dir), \"Please provide the correct output directory\"\r\n\r\n self.parser_name = parser_name\r\n assert target_config, \"Please specified the target config (Choose from the configs dir)\"\r\n self.target_config = target_config\r\n\r\n self.do_translate = do_translate\r\n\r\n if self.do_translate:\r\n self.fail_translation_code = fail_translation_code\r\n self.enable_sub_task_thread = enable_sub_task_thread\r\n self.source_lang = source_lang\r\n self.target_lang = target_lang\r\n assert target_fields, f\"Please specified target fields to be translate from the {self.target_config} config\"\r\n self.target_fields = target_fields\r\n assert set(self.target_fields).issubset(set(self.target_config.get_keys())), \\\r\n f\"The target fields {self.target_fields} do not exist in the target config {self.target_config.get_keys()}\"\r\n self.no_translated_code = no_translated_code\r\n assert max_example_per_thread < large_chunks_threshold, \\\r\n \" Large chunks threshold can't be smaller than max_example per thread!\"\r\n self.max_example_per_thread = max_example_per_thread\r\n self.large_chunks_threshold = large_chunks_threshold\r\n if self.enable_sub_task_thread:\r\n self.max_list_length_per_thread = max_list_length_per_thread\r\n\r\n self.converted_data_translated = None\r\n\r\n self.translator = translator\r\n\r\n @property\r\n def get_translator(self) -> Provider:\r\n return deepcopy(self.translator)()\r\n\r\n @staticmethod\r\n def id_generator(size=6, chars=string.ascii_uppercase + string.digits) -> str:\r\n return ''.join(random.choice(chars) for _ in range(size))\r\n\r\n @staticmethod\r\n def split_list(input_list: List[str], max_sub_length: int) -> List[list]:\r\n return [input_list[x:x + max_sub_length] for x in range(0, len(input_list), max_sub_length)]\r\n\r\n def validate(self, keys: List[str]) -> bool:\r\n dict_fields = self.target_config.get_keys()\r\n for key in dict_fields:\r\n assert key in keys, f\"\\n Invalid parser, the key '{key}' is missing from {dict_fields}\\n\" \\\r\n f\"you can adjust the fields {self.target_config.__name__} in the 'configs/*.py'\" \\\r\n f\" or fill in the missing field\"\r\n return True\r\n\r\n @timeit\r\n def pre_translate_validate(self) -> None:\r\n validated_translate_data = []\r\n # Note: This validates will override the original self.converted_data\r\n for idx, example in enumerate(tqdm(self.converted_data, desc=\"Validating data for translation:\")):\r\n for key in self.target_fields:\r\n if self.no_translated_code:\r\n example_filters = 0\r\n contain_code, score, found_elements = have_code(example[key])\r\n if contain_code:\r\n example_filters += 1\r\n if len(self.converted_data) - 2 == idx:\r\n tqdm.write(f\"Number of example with code: {example_filters}\")\r\n break\r\n elif key == self.target_fields[-1]:\r\n validated_translate_data.append(example)\r\n else:\r\n if key == self.target_fields[-1]: validated_translate_data.append(example)\r\n\r\n print(f\"\\nTotal data left after filtering for translation: {len(validated_translate_data)}\\n\")\r\n self.converted_data = validated_translate_data\r\n\r\n @timeit\r\n def post_translate_validate(self) -> None:\r\n post_validated_translate_data = []\r\n # Note: This validates will override the original self.converted_data_translated\r\n for idx, example in enumerate(tqdm(self.converted_data_translated, desc=\"Validating data after translation:\")):\r\n for key in self.target_fields:\r\n example_filters = 0\r\n if have_re_code(example[key], code=self.fail_translation_code):\r\n example_filters += 1\r\n if len(self.converted_data_translated) - 2 == idx:\r\n tqdm.write(f\"Number of example with fail code: {example_filters}\")\r\n break\r\n elif key == self.target_fields[-1]:\r\n post_validated_translate_data.append(example)\r\n\r\n print(f\"\\nTotal data left after filtering fail translation: {len(post_validated_translate_data)}\\n\")\r\n self.converted_data_translated = post_validated_translate_data\r\n\r\n def __translate_per_key(self, example: Dict, translator: Provider = None, progress_idx: int = 0) -> Dict:\r\n '''\r\n This function loop through each key of one example and send to __translate_texts if the value of the key is\r\n under a certain threshold. If exceeded, then send to __sublist_multithread_translate\r\n '''\r\n\r\n assert self.do_translate, \"Please enable translate via self.do_translate\"\r\n keys = self.target_config.get_keys()\r\n for key in keys:\r\n if key in self.target_fields:\r\n type = \"str\" if isinstance(example[key], str) else \"list\"\r\n if example[key] == \"\":\r\n continue\r\n if type == \"list\":\r\n for data in example[key]:\r\n if len(data) > 15000:\r\n warnings.warn(\"Example\" + example[\"qas_id\"] + \" have field len larger than 15000\")\r\n example[key].append(data[:15000])\r\n else:\r\n if len(example[key]) > 15000:\r\n warnings.warn(\"Example\" + example[\"qas_id\"] + \" have field len larger than 15000\")\r\n example[key] = example[key][:15000]\r\n\r\n if self.enable_sub_task_thread:\r\n average_length_sub_task_criteria = False\r\n if type == \"list\" and len(example[key]) > 2:\r\n average_length = sum(len(lst) for lst in example[key]) / len(example[key])\r\n if average_length > 1600: average_length_sub_task_criteria = True\r\n if type == \"list\" and average_length_sub_task_criteria and len(example[key]) >= self.max_list_length_per_thread:\r\n # tqdm.write(f\"\\nSplitting {key} field which contain {len(example[key])} items on chunk {progress_idx}\\n\")\r\n del translator\r\n example[key] = self.__sublist_multithread_translate(example[key],\r\n progress_idx,\r\n key)\r\n else:\r\n example[key] = self.__translate_texts(src_texts=example[key], translator=translator)\r\n else:\r\n example[key] = self.__translate_texts(src_texts=example[key], translator=translator)\r\n\r\n return example\r\n\r\n def __sublist_multithread_translate(self,\r\n list_str: List[str],\r\n progress_idx: int = 0,\r\n field_name: str=None # The field name (key name) of one example that exceed a certain threshold and needed to be split and translate in parallel\r\n ) -> List[str]:\r\n '''\r\n This function split a large list into sub-list and translate it in parallel, orders are maintained when merge all\r\n sub-lists, this is useful when order are necessary (e.g Dialogs example)\r\n '''\r\n\r\n translated_list_data = []\r\n num_threads = len(list_str) / self.max_list_length_per_thread\r\n sub_str_lists = self.split_list(list_str, max_sub_length=self.max_list_length_per_thread)\r\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\r\n futures = []\r\n finished_task = 0\r\n lock = threading.RLock()\r\n\r\n def callback_sub_list_done(future):\r\n nonlocal translated_list_data\r\n nonlocal finished_task\r\n nonlocal lock\r\n if not future.exception():\r\n with lock:\r\n # This need to be .append to keep the list structure\r\n # Since this deal with sub-list and needed to be merged later\r\n translated_list_data.append(future.result())\r\n finished_task += 1\r\n else:\r\n tqdm.write(f\"Sub task of chunk {progress_idx} with field {field_name} failed with the following error: {future.exception()}.\"\r\n f\"Restarting thread when others finished...\")\r\n pass\r\n\r\n for idx, list_chunk in enumerate(sub_str_lists):\r\n # Assign each thread with a new Translator instance\r\n future_chunk = executor.submit(self.__translate_texts,\r\n src_texts=list_chunk,\r\n translator=self.get_translator,\r\n sub_list_idx=idx)\r\n future_chunk.add_done_callback(callback_sub_list_done)\r\n future_dict = {\r\n \"future\": future_chunk,\r\n \"idx\": idx\r\n }\r\n futures.append(future_dict)\r\n\r\n # Wait for all threads to complete\r\n while finished_task < len(futures):\r\n for future_dict in futures:\r\n # If exception occurs in one of the thread, restart the thread with its specific chunk\r\n if future_dict['future'].exception():\r\n tqdm.write(\r\n f\"Thread {future_dict['idx']} failed, restarting thread with chunk {future_dict['idx']}\")\r\n backup_future_chunk = executor.submit(self.__translate_texts,\r\n src_texts=sub_str_lists[future_dict['idx']],\r\n translator=self.get_translator,\r\n sub_list_idx=future_dict['idx'])\r\n backup_future_chunk.add_done_callback(callback_sub_list_done)\r\n backup_future_dict = {\"future\": backup_future_chunk,\r\n \"idx\": future_dict['idx']}\r\n futures[future_dict['idx']] = backup_future_dict\r\n continue\r\n\r\n # Sorting the list of dictionaries based on the 'key' value\r\n translated_list_data = sorted(translated_list_data, key=lambda x: x['key'])\r\n # Extracting values after sorting\r\n translated_list_data = [item['text_list'] for item in translated_list_data]\r\n\r\n def flatten_list(nested_list):\r\n '''\r\n Turn a list from [[], [], []] -> []\r\n '''\r\n\r\n flattened_list = []\r\n for item in nested_list:\r\n if isinstance(item, list):\r\n flattened_list.extend(flatten_list(item))\r\n else:\r\n flattened_list.append(item)\r\n return flattened_list\r\n\r\n translated_list_data = flatten_list(translated_list_data)\r\n\r\n return translated_list_data\r\n\r\n def __translate_texts(self,\r\n src_texts: Union[List[str], str],\r\n translator: Provider = None,\r\n sub_list_idx: int=None, # sub_list_idx is for pass through of index information and can be merge later by __sublist_multithread_translate\r\n ) -> Union[List[str], str, Dict[List[str], int]]:\r\n '''\r\n Actual place where translation take place\r\n '''\r\n\r\n assert self.do_translate, \"Please enable translate via self.do_translate\"\r\n # This if is for multithread Translator instance\r\n translator_instance = deepcopy(self.translator)() if not translator else translator\r\n\r\n target_texts = translator_instance.translate(src_texts,\r\n src=self.source_lang,\r\n dest=self.target_lang,\r\n fail_translation_code=self.fail_translation_code)\r\n\r\n return {'text_list': target_texts, 'key': sub_list_idx} if sub_list_idx is not None else target_texts\r\n\r\n def translate_converted(self,\r\n en_data: List[str] = None,\r\n desc: str = None,\r\n translator: Provider = None,\r\n large_chunk: List[str] = None) -> Union[None, List[str]]:\r\n '''\r\n This function support translation in multithread for large dataset\r\n (Does not maintain order for the final dataset)\r\n '''\r\n\r\n assert self.converted_data is not None or en_data is not None or large_chunk is not None, \\\r\n \"Please implement the convert function for DataParser \" \\\r\n \"and assign converted_data to self.converted_data\"\r\n\r\n if not en_data and not large_chunk:\r\n converted_data = self.converted_data\r\n elif not en_data:\r\n converted_data = large_chunk\r\n else:\r\n converted_data = en_data\r\n\r\n translated_data = []\r\n\r\n # Split large data into large chunks, recursive feed to the same function\r\n if len(converted_data) > self.large_chunks_threshold and large_chunk is None:\r\n num_large_chunks = len(converted_data) / self.large_chunks_threshold\r\n large_chunks = self.split_list(converted_data, max_sub_length=self.large_chunks_threshold)\r\n tqdm.write(\r\n f\"Data is way too large, spliting data into {num_large_chunks} large chunk for sequential translation\")\r\n\r\n for idx, large_chunk in enumerate(tqdm(large_chunks, desc=f\"Translating large chunk \", colour=\"red\")):\r\n tqdm.write(f\"Processing large chunk No: {idx}\")\r\n self.translate_converted(large_chunk=large_chunk)\r\n return None\r\n\r\n # Split large chunk into large example, recursive feed to the same function via multithread\r\n if len(converted_data) > self.max_example_per_thread and en_data is None:\r\n num_threads = len(converted_data) / self.max_example_per_thread\r\n chunks = self.split_list(converted_data, max_sub_length=self.max_example_per_thread)\r\n tqdm.write(f\"Data too large, splitting data into {num_threads} chunk, each chunk is {len(chunks[0])}\"\r\n f\" Processing with multithread...\")\r\n\r\n # Progress bar\r\n desc = \"Translating total converted large chunk data\" if large_chunk else \"Translating total converted data\"\r\n progress_bar = tqdm(total=math.ceil(num_threads), desc=desc, position=math.ceil(num_threads)+1)\r\n\r\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\r\n futures = []\r\n finished_task = 0\r\n # https://stackoverflow.com/questions/22885775/what-is-the-difference-between-lock-and-rlock#22885810\r\n lock = threading.RLock()\r\n\r\n def callback_done(future):\r\n nonlocal translated_data\r\n nonlocal finished_task\r\n nonlocal progress_bar\r\n nonlocal lock\r\n if not future.exception():\r\n with lock:\r\n # This need to be += or .extend to shallow flatten the list structure\r\n translated_data += future.result()\r\n finished_task += 1\r\n progress_bar.update(1)\r\n else:\r\n tqdm.write(f\"Task failed with the following error: {future.exception()}.\"\r\n f\" Restarting thread when others finished\")\r\n pass\r\n\r\n for idx, chunk in enumerate(chunks):\r\n # Assign each thread with a new Translator instance\r\n future_chunk = executor.submit(self.translate_converted,\r\n en_data=chunk,\r\n desc=f\"chunk {idx}\",\r\n translator=self.get_translator)\r\n future_chunk.add_done_callback(callback_done)\r\n future_dict = {\"future\": future_chunk,\r\n \"idx\": idx}\r\n futures.append(future_dict)\r\n\r\n # Wait for all threads to complete\r\n while finished_task < len(futures):\r\n for future_dict in futures:\r\n # If exception occurs in one of the thread, restart the thread with its specific chunk\r\n if future_dict['future'].exception():\r\n tqdm.write(\r\n f\"Thread {future_dict['idx']} failed, restarting thread with chunk {future_dict['idx']}\")\r\n backup_future_chunk = executor.submit(self.translate_converted,\r\n en_data=chunks[future_dict['idx']],\r\n desc=f\"Backup chunk {future_dict['idx']}\",\r\n translator=self.get_translator)\r\n backup_future_chunk.add_done_callback(callback_done)\r\n backup_future_dict = {\"future\": backup_future_chunk,\r\n \"idx\": future_dict['idx']}\r\n futures[future_dict['idx']] = backup_future_dict\r\n continue\r\n\r\n if large_chunk:\r\n if not self.converted_data_translated:\r\n self.converted_data_translated = translated_data\r\n else:\r\n self.converted_data_translated += translated_data\r\n return None\r\n\r\n self.converted_data_translated = translated_data\r\n return None\r\n\r\n progress_bar_desc = \"Translating converted data\" if not desc else f\"Translating converted data {desc}\"\r\n for example in tqdm(converted_data, desc=progress_bar_desc, colour=\"#add8e6\"):\r\n translated_data_example = self.__translate_per_key(example,\r\n translator,\r\n progress_idx=int(re.findall(r'\\d+', desc)[0]) if desc and re.findall(r'\\d+', desc) else 0)\r\n translated_data.append(translated_data_example)\r\n if en_data: return translated_data\r\n if large_chunk:\r\n # Assuming that the previous large chunk process already create self.converted_data_translated\r\n # This cover the case where last large chunk only contain a single thread\r\n self.converted_data_translated += translated_data\r\n else:\r\n self.converted_data_translated = translated_data\r\n\r\n @abstractmethod\r\n @force_super_call\r\n def convert(self) -> Union[List[Dict], None]:\r\n assert self.data_read is not None, \"Please implement the read function for DataParser\" \\\r\n \" and assign data to self.data_read\"\r\n pass\r\n\r\n @abstractmethod\r\n @force_super_call\r\n def read(self) -> Union[List, Dict, None]:\r\n assert os.path.isfile(self.file_path), f\"Invalid path file for {self.file_path}\"\r\n pass\r\n\r\n @property\r\n @force_super_call\r\n @timeit\r\n def save(self) -> None:\r\n '''\r\n Save the correct format that pyarrow supported, which is \"line-delimited JSON\" and can be load by\r\n huggingface-datasets load_datasets function\r\n '''\r\n output_path = os.path.join(self.output_dir, f\"{self.parser_name}.json\")\r\n with open(output_path, 'w', encoding='utf-8') as jfile:\r\n print(f\"\\n Saving {self.parser_name} to {output_path}... \")\r\n for idx, data in enumerate(tqdm(self.converted_data, desc=\"Writing data to file\")):\r\n if self.validate(self.converted_data[idx].keys()):\r\n jfile.write(json.dumps(data, ensure_ascii=False) + \"\\n\")\r\n print(f\"\\n Total line printed: {idx + 1}\")\r\n\r\n if IN_COLAB:\r\n print(f\"\\n Downloading converted data to local machine...\")\r\n files.download(output_path)\r\n\r\n if self.do_translate:\r\n self.pre_translate_validate()\r\n self.translate_converted()\r\n self.post_translate_validate()\r\n assert self.converted_data_translated is not None, \"Converted data haven't been translated yet!\"\r\n output_translated_path = os.path.join(self.output_dir,\r\n f\"{self.parser_name}_translated_{self.target_lang}.json\")\r\n with open(output_translated_path, 'w', encoding='utf-8') as jfile:\r\n print(f\"\\n Saving {self.parser_name} translated to {output_translated_path}... \")\r\n for idx, data in enumerate(\r\n tqdm(self.converted_data_translated, desc=\"Writing translated data to file\")):\r\n jfile.write(json.dumps(data, ensure_ascii=False) + \"\\n\")\r\n print(f\"\\n Total line printed: {idx + 1}\")\r\n\r\n if IN_COLAB:\r\n print(f\"\\n Downloading converted translated data to local machine...\")\r\n files.download(output_translated_path)\r" } ]
import sys from tqdm.auto import tqdm from datasets import load_dataset from configs import BaseConfig from translator import DataParser
5,573
sys.path.insert(0,r'./') PARSER_NAME = "OpenOrca" class OpenOrcaParser(DataParser): def __init__(self, file_path: str, output_path: str): super().__init__(file_path, output_path, parser_name=PARSER_NAME,
sys.path.insert(0,r'./') PARSER_NAME = "OpenOrca" class OpenOrcaParser(DataParser): def __init__(self, file_path: str, output_path: str): super().__init__(file_path, output_path, parser_name=PARSER_NAME,
target_config=BaseConfig, # The data config to be validated to check if self implement "convert" function is correct or not,
0
2023-10-27 08:55:44+00:00
8k
Khushiyant/dockerpulse
dockerpulse/lgbert/bert_pytorch/train_log.py
[ { "identifier": "BERT", "path": "dockerpulse/lgbert/bert_pytorch/model/bert.py", "snippet": "class BERT(nn.Module):\r\n \"\"\"\r\n BERT model : Bidirectional Encoder Representations from Transformers.\r\n \"\"\"\r\n\r\n def __init__(self, vocab_size, max_len=512, hidden=768, n_layers=12,\r\n attn_heads=12, dropout=0.1, is_logkey=True, is_time=False):\r\n \"\"\"\r\n :param vocab_size: vocab_size of total words\r\n :param hidden: BERT model hidden size\r\n :param n_layers: numbers of Transformer blocks(layers)\r\n :param attn_heads: number of attention heads\r\n :param dropout: dropout rate\r\n \"\"\"\r\n\r\n super().__init__()\r\n self.hidden = hidden\r\n self.n_layers = n_layers\r\n self.attn_heads = attn_heads\r\n\r\n # paper noted they used 4*hidden_size for ff_network_hidden_size\r\n self.feed_forward_hidden = hidden * 2\r\n\r\n # embedding for BERT, sum of positional, segment, token embeddings\r\n self.embedding = BERTEmbedding(\r\n vocab_size=vocab_size,\r\n embed_size=hidden,\r\n max_len=max_len,\r\n is_logkey=is_logkey,\r\n is_time=is_time)\r\n\r\n # multi-layers transformer blocks, deep network\r\n self.transformer_blocks = nn.ModuleList(\r\n [TransformerBlock(hidden, attn_heads, hidden * 2, dropout) for _ in range(n_layers)])\r\n\r\n def forward(self, x, segment_info=None, time_info=None):\r\n # attention masking for padded token\r\n # torch.ByteTensor([batch_size, 1, seq_len, seq_len)\r\n mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)\r\n\r\n # embedding the indexed sequence to sequence of vectors\r\n x = self.embedding(x, segment_info, time_info)\r\n\r\n # running over multiple transformer blocks\r\n for transformer in self.transformer_blocks:\r\n x = transformer.forward(x, mask)\r\n\r\n return x\r" }, { "identifier": "BERTTrainer", "path": "dockerpulse/lgbert/bert_pytorch/trainer/pretrain.py", "snippet": "class BERTTrainer:\r\n \"\"\"\r\n BERTTrainer make the pretrained BERT model with two LM training method.\r\n\r\n 1. Masked Language Model : 3.3.1 Task #1: Masked LM\r\n 2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction\r\n\r\n please check the details on README.md with simple example.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, bert: BERT, vocab_size: int,\r\n train_dataloader: DataLoader, valid_dataloader: DataLoader = None,\r\n lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,\r\n with_cuda: bool = True, cuda_devices=None, log_freq: int = 10, is_logkey=True, is_time=False,\r\n hypersphere_loss=False):\r\n \"\"\"\r\n :param bert: BERT model which you want to train\r\n :param vocab_size: total word vocab size\r\n :param train_dataloader: train dataset data loader\r\n :param valid_dataloader: valid dataset data loader [can be None]\r\n :param lr: learning rate of optimizer\r\n :param betas: Adam optimizer betas\r\n :param weight_decay: Adam optimizer weight decay param\r\n :param with_cuda: traning with cuda\r\n :param log_freq: logging frequency of the batch iteration\r\n \"\"\"\r\n\r\n # Setup cuda device for BERT training, argument -c, --cuda should be\r\n # true\r\n cuda_condition = torch.cuda.is_available() and with_cuda\r\n self.device = torch.device(\"cuda:0\" if cuda_condition else \"cpu\")\r\n\r\n # This BERT model will be saved every epoch\r\n self.bert = bert\r\n # Initialize the BERT Language Model, with BERT model\r\n self.model = BERTLog(bert, vocab_size).to(self.device)\r\n\r\n # Distributed GPU training if CUDA can detect more than 1 GPU\r\n # if with_cuda and torch.cuda.device_count() > 1:\r\n # print(\"Using %d GPUS for BERT\" % torch.cuda.device_count())\r\n # self.model = nn.DataParallel(self.model, device_ids=cuda_devices)\r\n\r\n # Setting the train and valid data loader\r\n self.train_data = train_dataloader\r\n self.valid_data = valid_dataloader\r\n\r\n self.lr = lr\r\n self.betas = betas\r\n self.weight_decay = weight_decay\r\n self.warmup_steps = warmup_steps\r\n self.optim = None\r\n self.optim_schedule = None\r\n self.init_optimizer()\r\n\r\n # Using Negative Log Likelihood Loss function for predicting the\r\n # masked_token\r\n self.criterion = nn.NLLLoss(ignore_index=0)\r\n self.time_criterion = nn.MSELoss()\r\n self.hyper_criterion = nn.MSELoss()\r\n\r\n # deep SVDD hyperparameters\r\n self.hypersphere_loss = hypersphere_loss\r\n self.radius = 0\r\n self.hyper_center = None\r\n self.nu = 0.25\r\n # self.objective = \"soft-boundary\"\r\n self.objective = None\r\n\r\n self.log_freq = log_freq\r\n\r\n self.log = {\r\n \"train\": {key: []\r\n for key in [\"epoch\", \"lr\", \"time\", \"loss\"]},\r\n \"valid\": {key: []\r\n for key in [\"epoch\", \"lr\", \"time\", \"loss\"]}\r\n }\r\n\r\n print(\"Total Parameters:\", sum([p.nelement()\r\n for p in self.model.parameters()]))\r\n\r\n self.is_logkey = is_logkey\r\n self.is_time = is_time\r\n\r\n def init_optimizer(self):\r\n # Setting the Adam optimizer with hyper-param\r\n self.optim = Adam(\r\n self.model.parameters(),\r\n lr=self.lr,\r\n betas=self.betas,\r\n weight_decay=self.weight_decay)\r\n self.optim_schedule = ScheduledOptim(\r\n self.optim, self.bert.hidden, n_warmup_steps=self.warmup_steps)\r\n\r\n def train(self, epoch):\r\n return self.iteration(epoch, self.train_data, start_train=True)\r\n\r\n def valid(self, epoch):\r\n return self.iteration(epoch, self.valid_data, start_train=False)\r\n\r\n def iteration(self, epoch, data_loader, start_train):\r\n \"\"\"\r\n loop over the data_loader for training or validing\r\n if on train status, backward operation is activated\r\n and also auto save the model every peoch\r\n\r\n :param epoch: current epoch index\r\n :param data_loader: torch.utils.data.DataLoader for iteration\r\n :param train: boolean value of is train or valid\r\n :return: None\r\n \"\"\"\r\n str_code = \"train\" if start_train else \"valid\"\r\n\r\n lr = self.optim.state_dict()['param_groups'][0]['lr']\r\n start = time.strftime(\"%H:%M:%S\")\r\n self.log[str_code]['lr'].append(lr)\r\n self.log[str_code]['time'].append(start)\r\n\r\n # Setting the tqdm progress bar\r\n totol_length = len(data_loader)\r\n # data_iter = tqdm.tqdm(enumerate(data_loader), total=totol_length)\r\n data_iter = enumerate(data_loader)\r\n\r\n total_loss = 0.0\r\n total_logkey_loss = 0.0\r\n total_hyper_loss = 0.0\r\n\r\n total_dist = []\r\n for i, data in data_iter:\r\n data = {key: value.to(self.device) for key, value in data.items()}\r\n\r\n result = self.model.forward(data[\"bert_input\"], data[\"time_input\"])\r\n mask_lm_output, mask_time_output = result[\"logkey_output\"], result[\"time_output\"]\r\n\r\n # 2-2. NLLLoss of predicting masked token word ignore_index = 0 to\r\n # ignore unmasked tokens\r\n mask_loss = torch.tensor(0) if not self.is_logkey else self.criterion(\r\n mask_lm_output.transpose(1, 2), data[\"bert_label\"])\r\n total_logkey_loss += mask_loss.item()\r\n\r\n # 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure\r\n loss = mask_loss\r\n\r\n # hypersphere loss\r\n if self.hypersphere_loss:\r\n # version 1.0\r\n # hyper_loss = self.hyper_criterion(result[\"cls_fnn_output\"].squeeze(), self.hyper_center.expand(data[\"bert_input\"].shape[0],-1))\r\n hyper_loss = self.hyper_criterion(\r\n result[\"cls_output\"].squeeze(), self.hyper_center.expand(\r\n data[\"bert_input\"].shape[0], -1))\r\n\r\n # version 2.0\r\n # https://github.com/lukasruff/Deep-SVDD-PyTorch/blob/master/src/optim/deepSVDD_trainer.py\r\n dist = torch.sum(\r\n (result[\"cls_output\"] - self.hyper_center) ** 2, dim=1)\r\n total_dist += dist.cpu().tolist()\r\n\r\n # if self.objective == 'soft-boundary':\r\n # scores = dist - self.radius ** 2\r\n # hyper_loss = torch.sqrt(self.radius ** 2 + (1 / self.nu) * torch.mean(torch.max(torch.zeros_like(scores), scores)))\r\n # else:\r\n # hyper_loss = torch.sqrt(torch.mean(dist))\r\n\r\n # # add radius and center to training\r\n # self.radius = self.get_radius(dist, self.nu)\r\n # self.hyper_center = torch.mean(result[\"cls_output\"], dim=0)\r\n\r\n total_hyper_loss += hyper_loss.item()\r\n\r\n # with deepsvdd loss\r\n loss = loss + 0.1 * hyper_loss\r\n\r\n total_loss += loss.item()\r\n\r\n # 3. backward and optimization only in train\r\n if start_train:\r\n self.optim_schedule.zero_grad()\r\n loss.backward()\r\n self.optim_schedule.step_and_update_lr()\r\n\r\n avg_loss = total_loss / totol_length\r\n self.log[str_code]['epoch'].append(epoch)\r\n self.log[str_code]['loss'].append(avg_loss)\r\n print(\r\n \"Epoch: {} | phase: {}, loss={}\".format(\r\n epoch,\r\n str_code,\r\n avg_loss))\r\n print(\r\n f\"logkey loss: {total_logkey_loss/totol_length}, hyper loss: {total_hyper_loss/totol_length}\\n\")\r\n\r\n return avg_loss, total_dist\r\n\r\n def save_log(self, save_dir, surfix_log):\r\n try:\r\n for key, values in self.log.items():\r\n pd.DataFrame(values).to_csv(save_dir + key + f\"_{surfix_log}.csv\",\r\n index=False)\r\n print(\"Log saved\")\r\n except BaseException:\r\n print(\"Failed to save logs\")\r\n\r\n def save(self, save_dir=\"output/bert_trained.pth\"):\r\n \"\"\"\r\n Saving the current BERT model on file_path\r\n\r\n :param epoch: current epoch number\r\n :param file_path: model output path which gonna be file_path+\"ep%d\" % epoch\r\n :return: final_output_path\r\n \"\"\"\r\n torch.save(self.model, save_dir)\r\n # self.bert.to(self.device)\r\n print(\" Model Saved on:\", save_dir)\r\n return save_dir\r\n\r\n @staticmethod\r\n def get_radius(dist: list, nu: float):\r\n \"\"\"Optimally solve for radius R via the (1-nu)-quantile of distances.\"\"\"\r\n return np.quantile(np.sqrt(dist), 1 - nu)\r" }, { "identifier": "WordVocab", "path": "dockerpulse/lgbert/bert_pytorch/dataset/vocab.py", "snippet": "class WordVocab(Vocab):\r\n def __init__(self, texts, max_size=None, min_freq=1):\r\n print(\"Building Vocab\")\r\n counter = Counter()\r\n for line in tqdm.tqdm(texts):\r\n if isinstance(line, list):\r\n words = line\r\n else:\r\n words = line.replace(\"\\n\", \"\").replace(\"\\t\", \"\").split()\r\n\r\n for word in words:\r\n counter[word] += 1\r\n super().__init__(counter, max_size=max_size, min_freq=min_freq)\r\n\r\n def to_seq(self, sentence, seq_len=None, with_eos=False,\r\n with_sos=False, with_len=False):\r\n if isinstance(sentence, str):\r\n sentence = sentence.split()\r\n\r\n seq = [self.stoi.get(word, self.unk_index) for word in sentence]\r\n\r\n if with_eos:\r\n seq += [self.eos_index] # this would be index 1\r\n if with_sos:\r\n seq = [self.sos_index] + seq\r\n\r\n origin_seq_len = len(seq)\r\n\r\n if seq_len is None:\r\n pass\r\n elif len(seq) <= seq_len:\r\n seq += [self.pad_index for _ in range(seq_len - len(seq))]\r\n else:\r\n seq = seq[:seq_len]\r\n\r\n return (seq, origin_seq_len) if with_len else seq\r\n\r\n def from_seq(self, seq, join=False, with_pad=False):\r\n words = [self.itos[idx]\r\n if idx < len(self.itos)\r\n else \"<%d>\" % idx\r\n for idx in seq\r\n if not with_pad or idx != self.pad_index]\r\n\r\n return \" \".join(words) if join else words\r\n\r\n @staticmethod\r\n def load_vocab(vocab_path: str) -> 'WordVocab':\r\n with open(vocab_path, \"rb\") as f:\r\n return pickle.load(f)\r" }, { "identifier": "LogDataset", "path": "dockerpulse/lgbert/bert_pytorch/dataset/log_dataset.py", "snippet": "class LogDataset(Dataset):\r\n def __init__(self, log_corpus, time_corpus, vocab, seq_len, corpus_lines=None,\r\n encoding=\"utf-8\", on_memory=True, predict_mode=False, mask_ratio=0.15):\r\n \"\"\"\r\n\r\n :param corpus: log sessions/line\r\n :param vocab: log events collection including pad, ukn ...\r\n :param seq_len: max sequence length\r\n :param corpus_lines: number of log sessions\r\n :param encoding:\r\n :param on_memory:\r\n :param predict_mode: if predict\r\n \"\"\"\r\n self.vocab = vocab\r\n self.seq_len = seq_len\r\n\r\n self.on_memory = on_memory\r\n self.encoding = encoding\r\n\r\n self.predict_mode = predict_mode\r\n self.log_corpus = log_corpus\r\n self.time_corpus = time_corpus\r\n self.corpus_lines = len(log_corpus)\r\n\r\n self.mask_ratio = mask_ratio\r\n\r\n def __len__(self):\r\n return self.corpus_lines\r\n\r\n def __getitem__(self, idx):\r\n k, t = self.log_corpus[idx], self.time_corpus[idx]\r\n\r\n k_masked, k_label, t_masked, t_label = self.random_item(k, t)\r\n\r\n # [CLS] tag = SOS tag, [SEP] tag = EOS tag\r\n k = [self.vocab.sos_index] + k_masked\r\n k_label = [self.vocab.pad_index] + k_label\r\n # k_label = [self.vocab.sos_index] + k_label\r\n\r\n t = [0] + t_masked\r\n t_label = [self.vocab.pad_index] + t_label\r\n\r\n return k, k_label, t, t_label\r\n\r\n def random_item(self, k, t):\r\n tokens = list(k)\r\n output_label = []\r\n\r\n time_intervals = list(t)\r\n time_label = []\r\n\r\n for i, token in enumerate(tokens):\r\n time_int = time_intervals[i]\r\n prob = random.random()\r\n # replace 15% of tokens in a sequence to a masked token\r\n if prob < self.mask_ratio:\r\n # raise AttributeError(\"no mask in visualization\")\r\n\r\n if self.predict_mode:\r\n tokens[i] = self.vocab.mask_index\r\n output_label.append(\r\n self.vocab.stoi.get(\r\n token, self.vocab.unk_index))\r\n\r\n time_label.append(time_int)\r\n time_intervals[i] = 0\r\n continue\r\n\r\n prob /= self.mask_ratio\r\n\r\n # 80% randomly change token to mask token\r\n if prob < 0.8:\r\n tokens[i] = self.vocab.mask_index\r\n\r\n # 10% randomly change token to random token\r\n elif prob < 0.9:\r\n tokens[i] = random.randrange(len(self.vocab))\r\n\r\n # 10% randomly change token to current token\r\n else:\r\n tokens[i] = self.vocab.stoi.get(\r\n token, self.vocab.unk_index)\r\n\r\n output_label.append(\r\n self.vocab.stoi.get(\r\n token, self.vocab.unk_index))\r\n\r\n time_intervals[i] = 0 # time mask value = 0\r\n time_label.append(time_int)\r\n\r\n else:\r\n tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)\r\n output_label.append(0)\r\n time_label.append(0)\r\n\r\n return tokens, output_label, time_intervals, time_label\r\n\r\n def collate_fn(self, batch, percentile=100, dynamical_pad=True):\r\n lens = [len(seq[0]) for seq in batch]\r\n\r\n # find the max len in each batch\r\n if dynamical_pad:\r\n # dynamical padding\r\n seq_len = int(np.percentile(lens, percentile))\r\n if self.seq_len is not None:\r\n seq_len = min(seq_len, self.seq_len)\r\n else:\r\n # fixed length padding\r\n seq_len = self.seq_len\r\n\r\n output = defaultdict(list)\r\n for seq in batch:\r\n bert_input = seq[0][:seq_len]\r\n bert_label = seq[1][:seq_len]\r\n time_input = seq[2][:seq_len]\r\n time_label = seq[3][:seq_len]\r\n\r\n padding = [\r\n self.vocab.pad_index for _ in range(\r\n seq_len - len(bert_input))]\r\n bert_input.extend(padding), bert_label.extend(padding), time_input.extend(padding), time_label.extend(\r\n padding)\r\n\r\n time_input = np.array(time_input)[:, np.newaxis]\r\n output[\"bert_input\"].append(bert_input)\r\n output[\"bert_label\"].append(bert_label)\r\n output[\"time_input\"].append(time_input)\r\n output[\"time_label\"].append(time_label)\r\n\r\n output[\"bert_input\"] = torch.tensor(\r\n output[\"bert_input\"], dtype=torch.long)\r\n output[\"bert_label\"] = torch.tensor(\r\n output[\"bert_label\"], dtype=torch.long)\r\n output[\"time_input\"] = torch.tensor(\r\n output[\"time_input\"], dtype=torch.float)\r\n output[\"time_label\"] = torch.tensor(\r\n output[\"time_label\"], dtype=torch.float)\r\n\r\n return output\r" }, { "identifier": "generate_train_valid", "path": "dockerpulse/lgbert/bert_pytorch/dataset/sample.py", "snippet": "def generate_train_valid(data_path, window_size=20, adaptive_window=True,\r\n sample_ratio=1, valid_size=0.1, output_path=None,\r\n scale=None, scale_path=None, seq_len=None, min_len=0):\r\n with open(data_path, 'r') as f:\r\n data_iter = f.readlines()\r\n\r\n num_session = int(len(data_iter) * sample_ratio)\r\n # only even number of samples, or drop_last=True in DataLoader API\r\n # coz in parallel computing in CUDA, odd number of samples reports issue when merging the result\r\n # num_session += num_session % 2\r\n\r\n test_size = int(min(num_session, len(data_iter)) * valid_size)\r\n # only even number of samples\r\n # test_size += test_size % 2\r\n\r\n print(\"before filtering short session\")\r\n print(\"train size \", int(num_session - test_size))\r\n print(\"valid size \", int(test_size))\r\n print(\"=\" * 40)\r\n\r\n logkey_seq_pairs = []\r\n time_seq_pairs = []\r\n session = 0\r\n for line in tqdm(data_iter):\r\n if session >= num_session:\r\n break\r\n session += 1\r\n\r\n logkeys, times = fixed_window(\r\n line, window_size, adaptive_window, seq_len, min_len)\r\n logkey_seq_pairs += logkeys\r\n time_seq_pairs += times\r\n\r\n logkey_seq_pairs = np.array(logkey_seq_pairs)\r\n time_seq_pairs = np.array(time_seq_pairs)\r\n\r\n logkey_trainset, logkey_validset, time_trainset, time_validset = train_test_split(logkey_seq_pairs,\r\n time_seq_pairs,\r\n test_size=test_size,\r\n random_state=1234)\r\n\r\n # sort seq_pairs by seq len\r\n train_len = list(map(len, logkey_trainset))\r\n valid_len = list(map(len, logkey_validset))\r\n\r\n train_sort_index = np.argsort(-1 * np.array(train_len))\r\n valid_sort_index = np.argsort(-1 * np.array(valid_len))\r\n\r\n logkey_trainset = logkey_trainset[train_sort_index]\r\n logkey_validset = logkey_validset[valid_sort_index]\r\n\r\n time_trainset = time_trainset[train_sort_index]\r\n time_validset = time_validset[valid_sort_index]\r\n\r\n print(\"=\" * 40)\r\n print(\"Num of train seqs\", len(logkey_trainset))\r\n print(\"Num of valid seqs\", len(logkey_validset))\r\n print(\"=\" * 40)\r\n\r\n return logkey_trainset, logkey_validset, time_trainset, time_validset\r" }, { "identifier": "save_parameters", "path": "dockerpulse/lgbert/bert_pytorch/dataset/utils.py", "snippet": "def save_parameters(options, filename):\r\n with open(filename, \"w+\") as f:\r\n for key in options.keys():\r\n f.write(\"{}: {}\\n\".format(key, options[key]))\r" } ]
from torch.utils.data import DataLoader from .model import BERT from .trainer import BERTTrainer from .dataset import LogDataset, WordVocab from .dataset.sample import generate_train_valid from .dataset.utils import save_parameters import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import torch import tqdm import gc
5,629
class Trainer(): def __init__(self, options): self.device = options["device"] self.model_dir = options["model_dir"] self.model_path = options["model_path"] self.vocab_path = options["vocab_path"] self.output_path = options["output_dir"] self.window_size = options["window_size"] self.adaptive_window = options["adaptive_window"] self.sample_ratio = options["train_ratio"] self.valid_ratio = options["valid_ratio"] self.seq_len = options["seq_len"] self.max_len = options["max_len"] self.corpus_lines = options["corpus_lines"] self.on_memory = options["on_memory"] self.batch_size = options["batch_size"] self.num_workers = options["num_workers"] self.lr = options["lr"] self.adam_beta1 = options["adam_beta1"] self.adam_beta2 = options["adam_beta2"] self.adam_weight_decay = options["adam_weight_decay"] self.with_cuda = options["with_cuda"] self.cuda_devices = options["cuda_devices"] self.log_freq = options["log_freq"] self.epochs = options["epochs"] self.hidden = options["hidden"] self.layers = options["layers"] self.attn_heads = options["attn_heads"] self.is_logkey = options["is_logkey"] self.is_time = options["is_time"] self.scale = options["scale"] self.scale_path = options["scale_path"] self.n_epochs_stop = options["n_epochs_stop"] self.hypersphere_loss = options["hypersphere_loss"] self.mask_ratio = options["mask_ratio"] self.min_len = options['min_len'] print("Save options parameters") save_parameters(options, self.model_dir + "parameters.txt") def train(self): print("Loading vocab", self.vocab_path)
class Trainer(): def __init__(self, options): self.device = options["device"] self.model_dir = options["model_dir"] self.model_path = options["model_path"] self.vocab_path = options["vocab_path"] self.output_path = options["output_dir"] self.window_size = options["window_size"] self.adaptive_window = options["adaptive_window"] self.sample_ratio = options["train_ratio"] self.valid_ratio = options["valid_ratio"] self.seq_len = options["seq_len"] self.max_len = options["max_len"] self.corpus_lines = options["corpus_lines"] self.on_memory = options["on_memory"] self.batch_size = options["batch_size"] self.num_workers = options["num_workers"] self.lr = options["lr"] self.adam_beta1 = options["adam_beta1"] self.adam_beta2 = options["adam_beta2"] self.adam_weight_decay = options["adam_weight_decay"] self.with_cuda = options["with_cuda"] self.cuda_devices = options["cuda_devices"] self.log_freq = options["log_freq"] self.epochs = options["epochs"] self.hidden = options["hidden"] self.layers = options["layers"] self.attn_heads = options["attn_heads"] self.is_logkey = options["is_logkey"] self.is_time = options["is_time"] self.scale = options["scale"] self.scale_path = options["scale_path"] self.n_epochs_stop = options["n_epochs_stop"] self.hypersphere_loss = options["hypersphere_loss"] self.mask_ratio = options["mask_ratio"] self.min_len = options['min_len'] print("Save options parameters") save_parameters(options, self.model_dir + "parameters.txt") def train(self): print("Loading vocab", self.vocab_path)
vocab = WordVocab.load_vocab(self.vocab_path)
2
2023-10-29 09:52:36+00:00
8k
audiodude/rainfall
rainfall/main.py
[ { "identifier": "file", "path": "rainfall/blueprint/file.py", "snippet": "def delete_file(file_id, user):" }, { "identifier": "UserBlueprintFactory", "path": "rainfall/blueprint/user.py", "snippet": "class UserBlueprintFactory:\n\n def __init__(self, csrf):\n self.csrf = csrf\n\n def get_blueprint(self):\n user = flask.Blueprint('user', __name__)\n\n log = logging.getLogger(__name__)\n\n @user.route('/user')\n @with_current_user\n def get_user(user):\n user_without_sites = dict((field.name, getattr(user, field.name))\n for field in fields(user)\n if field.name != 'sites')\n return flask.jsonify(user_without_sites)\n\n @user.route('/logout', methods=['POST'])\n def logout():\n if 'user_id' in flask.session:\n del flask.session['user_id']\n return '', 204\n\n @self.csrf.exempt\n @user.route('/login', methods=['POST'])\n def login():\n resp = check_csrf()\n if resp:\n return resp\n\n client_id = flask.current_app.config['GOOGLE_CLIENT_ID']\n token = flask.request.form.get('credential')\n try:\n idinfo = id_token.verify_oauth2_token(token, goog_requests.Request(),\n client_id)\n except ValueError:\n log.exception('Could not verify token, using: %s', client_id)\n return flask.jsonify(status=400, error='Could not verify token'), 400\n\n user_id = save_or_update_google_user(idinfo)\n flask.session['user_id'] = user_id\n user = db.session.get(User, user_id)\n\n frontend_url = flask.current_app.config['RAINFALL_FRONTEND_URL']\n if user.is_welcomed:\n return flask.redirect(urljoin(frontend_url, '/sites'))\n else:\n return flask.redirect(urljoin(frontend_url, '/welcome'))\n\n @user.route('/user/welcome', methods=['POST'])\n @with_current_user\n def welcome(user):\n user.is_welcomed = True\n db.session.commit()\n\n return '', 204\n\n return user" }, { "identifier": "release", "path": "rainfall/blueprint/release.py", "snippet": "def create_release(user):" }, { "identifier": "site", "path": "rainfall/blueprint/site.py", "snippet": "def create_site(user):\ndef list_sites(user):\ndef get_site(site, user):" }, { "identifier": "db", "path": "rainfall/db.py", "snippet": "class Base(DeclarativeBase):" }, { "identifier": "with_current_site", "path": "rainfall/decorators.py", "snippet": "def with_current_site(f):\n '''Requires the with_current_user decorator above'''\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n if 'site_id' not in kwargs:\n return flask.jsonify(status=500,\n error='Wrapper requires site_id kwarg'), 500\n\n site_id = kwargs.pop('site_id')\n user = kwargs['user']\n site = db.session.get(Site, UUID(site_id))\n if site is None:\n return flask.jsonify(\n status=404, error=f'Could not find a site with id={site_id}'), 404\n\n if site.user.id != user.id:\n return flask.jsonify(status=401,\n error='Not authorized for that site'), 401\n\n value = f(*args, site=site, **kwargs)\n return value\n\n return wrapped" }, { "identifier": "with_current_user", "path": "rainfall/decorators.py", "snippet": "def with_current_user(f):\n '''\n Retrieves the current user from the session, performs some checks, and then\n calls the underlying handler\n '''\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n user_id = flask.session.get('user_id')\n if user_id is None:\n return flask.jsonify(status=404, error='No signed in user'), 404\n\n user = db.session.get(User, user_id)\n if user is None:\n return flask.jsonify(status=404, error='User does not exist'), 404\n\n value = f(*args, user=user, **kwargs)\n return value\n\n return wrapped" }, { "identifier": "File", "path": "rainfall/models/file.py", "snippet": "class File(db.Model):\n __tablename__ = 'files'\n\n id: Mapped[bytes] = mapped_column(Uuid, primary_key=True, default=uuid7)\n release_id: Mapped[bytes] = mapped_column(ForeignKey(\"releases.id\"))\n release: Mapped[\"Release\"] = relationship(back_populates=\"files\")\n filename: Mapped[str] = mapped_column(String(1024))\n original_filename: Mapped[str] = mapped_column(String(1024), nullable=True)\n\n def __repr__(self) -> str:\n return f'File(id={self.id!r}, release_id={self.release_id!r})'\n\n def serialize(self):\n return dict((field.name, getattr(self, field.name))\n for field in fields(self)\n if field.name != 'release')\n\n def _new_name(self):\n if self.release is None:\n raise ValueError('Cannot rename a file that does not belong to a release')\n\n dupe_file = None\n for f in self.release.files:\n if self is f:\n continue\n if self.filename == f.filename:\n dupe_file = f\n break\n\n if dupe_file is None:\n # Return whether rename was necessary.\n return False\n dupe_name = dupe_file.filename\n\n regex = RE_NAME if dupe_file.original_filename is None else RE_DUPE_NAME\n md = regex.match(dupe_name)\n if not md:\n raise ValueError(f'Invalid file, name={dupe_file.filename}, '\n f'original_name={dupe_file.original_filename}')\n\n if dupe_file.original_filename is not None:\n # Increment the numerical part, minus the _\n num = int(md.group(3).split('_')[1]) + 1\n new_name = f'{md.group(1)}_{num}{md.group(4)}'\n else:\n # Add a _1 tag to the name\n new_name = f'{md.group(1)}_1{md.group(2)}'\n\n # Return whether rename was necessary.\n self.original_filename = self.filename\n self.filename = new_name\n return True\n\n def maybe_rename(self):\n # Keep trying names until a free one is found\n while self._new_name():\n pass" }, { "identifier": "Release", "path": "rainfall/models/release.py", "snippet": "class Release(db.Model):\n __tablename__ = 'releases'\n\n id: Mapped[bytes] = mapped_column(Uuid, primary_key=True, default=uuid7)\n site_id: Mapped[bytes] = mapped_column(ForeignKey(\"sites.id\"))\n site: Mapped[\"Site\"] = relationship(back_populates=\"releases\")\n name: Mapped[str] = mapped_column(String(255))\n\n files: Mapped[List[\"File\"]] = relationship(back_populates=\"release\")\n\n def __repr__(self) -> str:\n return f'Release(id={self.id!r}, site_id={self.site_id!r})'\n\n def serialize(self):\n props = []\n for field in fields(self):\n if field.name == 'site':\n continue\n\n if field.name == 'files':\n props.append(('files', [file.serialize() for file in self.files]))\n continue\n\n props.append((field.name, getattr(self, field.name)))\n return dict(props)" }, { "identifier": "Site", "path": "rainfall/models/site.py", "snippet": "class Site(db.Model):\n __tablename__ = 'sites'\n\n id: Mapped[bytes] = mapped_column(Uuid, primary_key=True, default=uuid7)\n user_id: Mapped[bytes] = mapped_column(ForeignKey(\"users.id\"))\n user: Mapped[\"User\"] = relationship(back_populates=\"sites\")\n name: Mapped[str] = mapped_column(String(255))\n\n releases: Mapped[List[\"Release\"]] = relationship(back_populates=\"site\")\n\n def __repr__(self) -> str:\n return f'Site(id={self.id!r}, user_id={self.user_id!r}, name={self.name!r})'\n\n def serialize(self):\n props = []\n for field in fields(self):\n if field.name == 'user':\n continue\n\n if field.name == 'releases':\n props.append(\n ('releases', [release.serialize() for release in self.releases]))\n continue\n\n props.append((field.name, getattr(self, field.name)))\n return dict(props)" }, { "identifier": "User", "path": "rainfall/models/user.py", "snippet": "class User(db.Model):\n __tablename__ = 'users'\n\n id: Mapped[bytes] = mapped_column(Uuid, primary_key=True, default=uuid7)\n google_id: Mapped[str] = mapped_column(String(255), unique=True)\n name: Mapped[str] = mapped_column(String(255), nullable=True)\n email: Mapped[str] = mapped_column(String(1024), nullable=True)\n picture_url: Mapped[str] = mapped_column(String(1024), nullable=True)\n is_welcomed: Mapped[bool] = mapped_column(Boolean, default=False)\n\n sites: Mapped[List[\"Site\"]] = relationship(back_populates=\"user\")\n\n def __repr__(self) -> str:\n return f'User(id={self.id!r}, google_id={self.google_id!r})'" }, { "identifier": "generate_site", "path": "rainfall/site.py", "snippet": "def generate_site(data_dir_path, preview_dir_path, site_id):\n try:\n out = subprocess.run([\n 'faircamp', '--catalog-dir',\n catalog_dir(data_dir_path, site_id), '--build-dir',\n build_dir(preview_dir_path, site_id), '--cache-dir',\n cache_dir(preview_dir_path, site_id), '--no-clean-urls'\n ],\n capture_output=True,\n check=True)\n except subprocess.CalledProcessError as e:\n return (False, e.stderr.decode('utf-8'))\n return (True, None)" }, { "identifier": "generate_zip", "path": "rainfall/site.py", "snippet": "def generate_zip(preview_dir_path, site_id):\n root_dir = zip_file_path(preview_dir_path, site_id)\n out_path = os.path.join(root_dir, 'rainfall_site')\n shutil.make_archive(out_path, 'zip', root_dir=root_dir, base_dir='public')" }, { "identifier": "public_dir", "path": "rainfall/site.py", "snippet": "def public_dir(site):\n return os.path.join(str(site.user.id), secure_filename(site.name), 'public')" }, { "identifier": "release_path", "path": "rainfall/site.py", "snippet": "def release_path(data_dir_path, release):\n return os.path.join(data_dir_path, str(release.site.user.id),\n secure_filename(release.site.name),\n secure_filename(release.name))" }, { "identifier": "site_exists", "path": "rainfall/site.py", "snippet": "def site_exists(preview_dir_path, site_id):\n dir_ = build_dir(preview_dir_path, site_id)\n return os.path.exists(dir_) and len(os.listdir(dir_)) > 0" }, { "identifier": "zip_file_path", "path": "rainfall/site.py", "snippet": "def zip_file_path(preview_dir_path, site_id):\n site = db.session.get(Site, UUID(site_id))\n return os.path.join(preview_dir_path, str(site.user.id),\n secure_filename(site.name))" } ]
import logging import os import time import flask import sqlalchemy from uuid import UUID from flask_seasurf import SeaSurf from werkzeug.utils import secure_filename from rainfall.blueprint.file import file as file_blueprint from rainfall.blueprint.user import UserBlueprintFactory from rainfall.blueprint.release import release as release_blueprint from rainfall.blueprint.site import site as site_blueprint from rainfall.db import db from rainfall.decorators import with_current_site, with_current_user from rainfall.models.file import File from rainfall.models.release import Release from rainfall.models.site import Site from rainfall.models.user import User from rainfall.site import generate_site, generate_zip, public_dir, release_path, site_exists, zip_file_path
3,960
app.config['TESTING'] = True csrf = SeaSurf(app) os.makedirs(app.config['DATA_DIR'], exist_ok=True) os.makedirs(app.config['PREVIEW_DIR'], exist_ok=True) app.register_blueprint(UserBlueprintFactory(csrf).get_blueprint(), url_prefix='/api/v1') app.register_blueprint(site_blueprint, url_prefix='/api/v1') app.register_blueprint(release_blueprint, url_prefix='/api/v1') app.register_blueprint(file_blueprint, url_prefix='/api/v1') FRONTEND_DIR = '../rainfall-frontend/dist' @app.route('/api/v1/upload', methods=['POST']) @with_current_user def upload(user): def allowed_file(filename): if '.' not in filename: return False return '.' + filename.rsplit('.', 1)[1].lower() in ALLOWED_SONG_EXTS def check_song_file_types(song_files): for f in song_files: if not allowed_file(f.filename): return flask.jsonify( status=400, error='File %s is not an allowed file type (%s)' % (f.filename, ' '.join(ALLOWED_SONG_EXTS))), 400 release_id = flask.request.form.get('release_id') if release_id is None: return flask.jsonify(status=400, error='No release id given'), 400 release = db.session.get(Release, UUID(release_id)) site = release.site upload_user = site.user if upload_user.id != user.id: return flask.jsonify(status=401, error='Cannot upload data to that release'), 401 song_files = flask.request.files.getlist("song[]") if not song_files: return flask.jsonify(status=400, error='No songs uploaded'), 400 resp = check_song_file_types(song_files) if resp is not None: return resp cur_release_path = release_path(app.config['DATA_DIR'], release) os.makedirs(cur_release_path, exist_ok=True) for song in song_files: name = secure_filename(song.filename) if len(name) > 1024: return flask.jsonify(status=400, error=f'File name {name} is too long'), 400 file = File(filename=name) release.files.append(file) # Give the file a new name if it's a dupe. This must be done after # the file is added to the release. file.maybe_rename() # Write the file to the filesystem. song.save(os.path.join(cur_release_path, file.filename)) db.session.add(release) db.session.commit() return '', 204 @app.route('/api/v1/preview/<site_id>', methods=['GET', 'POST']) @with_current_user @with_current_site def create_preview(site, user): if len(site.releases) == 0 or not any(f for release in site.releases for f in release.files): return flask.jsonify( status=400, error='Cannot preview site without releases/files'), 400 if flask.request.method == 'GET': if site_exists(app.config['PREVIEW_DIR'], str(site.id)): return '', 204 else: return '', 404 result = generate_site(app.config['DATA_DIR'], app.config['PREVIEW_DIR'], str(site.id)) if result[0]: return '', 204 else: return flask.jsonify(status=500, error=result[1]) @app.route('/preview/<site_id>/') @with_current_user @with_current_site def preview_index(site, user): # The decorators ensure that the site belongs to the user. return flask.send_from_directory( os.path.join('..', app.config['PREVIEW_DIR'], public_dir(site)), 'index.html') @app.route('/preview/<site_id>/<path:filename>') @with_current_user @with_current_site def preview_asset(site, user, filename): # The decorators ensure that the site belongs to the user. if filename.endswith('/'): filename += 'index.html' return flask.send_from_directory( os.path.join('..', app.config['PREVIEW_DIR'], public_dir(site)), filename) @app.route('/api/v1/zip/<site_id>') @with_current_user @with_current_site def zip(site, user): generate_zip(app.config['PREVIEW_DIR'], str(site.id))
ALLOWED_SONG_EXTS = ['.aiff', '.aif', '.flac', '.mp3', '.ogg', '.opus', '.wav'] log = logging.getLogger(__name__) logging.basicConfig(level=logging.WARNING) def create_app(): app = flask.Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI'] app.config['SECRET_KEY'] = os.environ['FLASK_SECRET_KEY'] app.config['GOOGLE_CLIENT_ID'] = os.environ['GOOGLE_CLIENT_ID'] app.config['RAINFALL_FRONTEND_URL'] = os.environ['RAINFALL_FRONTEND_URL'] app.config['DATA_DIR'] = os.environ['DATA_DIR'] app.config['PREVIEW_DIR'] = os.environ['PREVIEW_DIR'] app.config['MAX_CONTENT_LENGTH'] = 100 * 1024 * 1024 # 100 MB max upload if os.environ.get('RAINFALL_ENV') != 'test': db.init_app(app) else: app.config['TESTING'] = True csrf = SeaSurf(app) os.makedirs(app.config['DATA_DIR'], exist_ok=True) os.makedirs(app.config['PREVIEW_DIR'], exist_ok=True) app.register_blueprint(UserBlueprintFactory(csrf).get_blueprint(), url_prefix='/api/v1') app.register_blueprint(site_blueprint, url_prefix='/api/v1') app.register_blueprint(release_blueprint, url_prefix='/api/v1') app.register_blueprint(file_blueprint, url_prefix='/api/v1') FRONTEND_DIR = '../rainfall-frontend/dist' @app.route('/api/v1/upload', methods=['POST']) @with_current_user def upload(user): def allowed_file(filename): if '.' not in filename: return False return '.' + filename.rsplit('.', 1)[1].lower() in ALLOWED_SONG_EXTS def check_song_file_types(song_files): for f in song_files: if not allowed_file(f.filename): return flask.jsonify( status=400, error='File %s is not an allowed file type (%s)' % (f.filename, ' '.join(ALLOWED_SONG_EXTS))), 400 release_id = flask.request.form.get('release_id') if release_id is None: return flask.jsonify(status=400, error='No release id given'), 400 release = db.session.get(Release, UUID(release_id)) site = release.site upload_user = site.user if upload_user.id != user.id: return flask.jsonify(status=401, error='Cannot upload data to that release'), 401 song_files = flask.request.files.getlist("song[]") if not song_files: return flask.jsonify(status=400, error='No songs uploaded'), 400 resp = check_song_file_types(song_files) if resp is not None: return resp cur_release_path = release_path(app.config['DATA_DIR'], release) os.makedirs(cur_release_path, exist_ok=True) for song in song_files: name = secure_filename(song.filename) if len(name) > 1024: return flask.jsonify(status=400, error=f'File name {name} is too long'), 400 file = File(filename=name) release.files.append(file) # Give the file a new name if it's a dupe. This must be done after # the file is added to the release. file.maybe_rename() # Write the file to the filesystem. song.save(os.path.join(cur_release_path, file.filename)) db.session.add(release) db.session.commit() return '', 204 @app.route('/api/v1/preview/<site_id>', methods=['GET', 'POST']) @with_current_user @with_current_site def create_preview(site, user): if len(site.releases) == 0 or not any(f for release in site.releases for f in release.files): return flask.jsonify( status=400, error='Cannot preview site without releases/files'), 400 if flask.request.method == 'GET': if site_exists(app.config['PREVIEW_DIR'], str(site.id)): return '', 204 else: return '', 404 result = generate_site(app.config['DATA_DIR'], app.config['PREVIEW_DIR'], str(site.id)) if result[0]: return '', 204 else: return flask.jsonify(status=500, error=result[1]) @app.route('/preview/<site_id>/') @with_current_user @with_current_site def preview_index(site, user): # The decorators ensure that the site belongs to the user. return flask.send_from_directory( os.path.join('..', app.config['PREVIEW_DIR'], public_dir(site)), 'index.html') @app.route('/preview/<site_id>/<path:filename>') @with_current_user @with_current_site def preview_asset(site, user, filename): # The decorators ensure that the site belongs to the user. if filename.endswith('/'): filename += 'index.html' return flask.send_from_directory( os.path.join('..', app.config['PREVIEW_DIR'], public_dir(site)), filename) @app.route('/api/v1/zip/<site_id>') @with_current_user @with_current_site def zip(site, user): generate_zip(app.config['PREVIEW_DIR'], str(site.id))
zip_path = zip_file_path(app.config['PREVIEW_DIR'], str(site.id))
16
2023-10-30 04:43:03+00:00
8k
LasticXYZ/price-simulation
main.py
[ { "identifier": "Config", "path": "config.py", "snippet": "class Config:\n def __init__(\n self,\n interlude_length,\n leadin_length,\n region_length,\n ideal_bulk_proportion,\n limit_cores_offered,\n renewal_bump,\n ):\n # The length in blocks of the Interlude Period for forthcoming sales.\n self.interlude_length = interlude_length\n # The length in blocks of the Leadin Period for forthcoming sales.\n self.leadin_length = leadin_length\n # The length in blocks of the Region Period for forthcoming sales.\n self.region_length = region_length\n # The proportion of cores available for sale which should be sold in order for the price to remain the same in the next sale.\n self.ideal_bulk_proportion = ideal_bulk_proportion\n # An artificial limit to the number of cores which are allowed to be sold. If `Some` then no more cores will be sold than this.\n self.limit_cores_offered = limit_cores_offered\n # The amount by which the renewal price increases each sale period.\n self.renewal_bump = renewal_bump\n\n def update_config(self, updated_values):\n for key, value in updated_values.items():\n if hasattr(self, key):\n setattr(self, key, value)" }, { "identifier": "CalculatePrice", "path": "price.py", "snippet": "class CalculatePrice:\n \"\"\"\n This class is responsible for calculating the prices associated with sales over time.\n\n \"\"\"\n\n def __init__(self, config):\n # The leadin factor is either linear or exponential depending on the value of self.linear\n self.linear = False\n self.factor = 1\n # price for which the cores were bought - important for renewal\n self.initial_bought_price = 1000\n # price for which the cores will be bought in the next sale\n self.new_buy_price = 1000\n # The latest price at which Bulk Coretime was purchased until surpassing the ideal number of cores were sold.\n # we will assume that the last purchase was done at the lowest price of the sale.\n self.sellout_price = None\n self.config = config\n self.price = 1000\n # The number of cores sold in the previous sale.\n self.cores_sold_in_renewal = 40\n self.cores_sold_in_sale = 6\n self.cores_sold = self.cores_sold_in_renewal + self.cores_sold_in_sale\n\n def get_factor(self):\n \"\"\"\n Get the factor of the exponential or linear function.\n \"\"\"\n return self.factor\n\n def get_linear(self):\n \"\"\"\n Get the factor of the exponential or linear function.\n \"\"\"\n return self.linear\n\n def change_linear(self, linear):\n \"\"\"\n Update the linear factor.\n\n :param linear: The new linear factor to set.\n \"\"\"\n self.linear = linear\n\n def change_factor(self, factor):\n \"\"\"\n Update the factor. Of the exponential or linear function.\n\n :param factor: The new factor to set.\n \"\"\"\n self.factor = factor\n\n def change_initial_price(self, new_initial_price):\n \"\"\"\n Update the initial price of the core.\n\n :param new_initial_price: The new initial price to set.\n \"\"\"\n self.price = new_initial_price\n\n def change_bought_price(self, new_bought_price):\n \"\"\"\n Update the initial bought price of the core.\n\n :param new_bought_price: The new initial bought price to set.\n \"\"\"\n self.initial_bought_price = new_bought_price\n\n def update_config(self, config):\n \"\"\"\n Update the configuration object.\n\n :param config: The new configuration object to set.\n \"\"\"\n self.config = config\n\n def update_renewal_price(self):\n \"\"\"\n Update the renewal price based on the initial bought price and the new buy price.\n Checkout imitated code at: https://github.com/paritytech/polkadot-sdk/blob/2610450a18e64079abfe98f0a5b57069bbb61009/substrate/frame/broker/src/dispatchable_impls.rs#L155C7-L157\n \"\"\"\n price_cap = self.initial_bought_price * (1 + self.config.renewal_bump)\n self.initial_bought_price = min(price_cap, self.new_buy_price)\n\n def rotate_sale(self, renewed_cores, sold_cores):\n \"\"\"\n Calculate the starting price for the upcoming sale based on the number of cores sold.\n Imitates function `rotate_sale`: https://github.com/paritytech/polkadot-sdk/blob/4298bc608fa8e5d8b8fb1ca0c1028613d82bc99b/substrate/frame/broker/src/tick_impls.rs#L138\n\n :param renewed_cores: The number of cores sold in renewal.\n :param sold_cores: The number of cores sold in the previous sale.\n \"\"\"\n self.cores_sold_in_renewal = renewed_cores\n self.cores_sold_in_sale = sold_cores\n self.cores_sold = self.cores_sold_in_renewal + self.cores_sold_in_sale\n # Calculate the start price for the upcoming sale\n # Update price for new cycle\n offered = (\n self.config.limit_cores_offered\n if self.config.limit_cores_offered is not None\n else 0\n )\n\n ideal_bulk_proportion = (\n self.config.ideal_bulk_proportion\n if self.config.ideal_bulk_proportion is not None\n else 0\n )\n\n ideal = int(ideal_bulk_proportion * offered)\n if offered == 0:\n # No cores offered for sale - no purchase price.\n purchase_price = None\n elif self.cores_sold >= ideal:\n # Sold more than the ideal amount. We should look for the last purchase price\n # before the sell-out. If there was no purchase at all, then we avoid having a\n # price here so that we make no alterations to it (since otherwise we would\n # increase it).\n purchase_price = self.sellout_price\n else:\n # Sold less than the ideal - we fall back to the regular price.\n purchase_price = self.price\n\n if purchase_price is not None:\n self.price = (\n Linear.adapt_price(self.cores_sold, ideal, offered) * purchase_price\n )\n\n def __sale_price_calculate(self, region_start, block_now):\n \"\"\"\n Calculate the sale price at a given block time.\n Function imitates `do_purchase`: https://github.com/paritytech/polkadot-sdk/blob/2610450a18e64079abfe98f0a5b57069bbb61009/substrate/frame/broker/src/dispatchable_impls.rs#L97\n and `sale_price`: https://github.com/paritytech/polkadot-sdk/blob/4298bc608fa8e5d8b8fb1ca0c1028613d82bc99b/substrate/frame/broker/src/utility_impls.rs#L63\n\n :param region_start: The starting block of the current region.\n :param block_now: The current block.\n :return: The calculated sale price.\n \"\"\"\n # Calculate the sale price at a given block time\n leadin_length = self.config.leadin_length\n\n # Calculate num\n num = max(block_now - region_start, 0)\n num = min(num, leadin_length)\n\n # Calculate through\n through = num / leadin_length\n\n # Calculate the lead-in factor (LF). You need to define how LF is calculated based on through.\n # Choose linear or exponential.\n if self.linear:\n LF = Linear.leadin_factor_at(through, factor=self.factor)\n else:\n LF = Exponential.leadin_factor_at(through, factor=self.factor)\n\n # Calculate sale price\n sale_price = LF * self.price\n\n # Update the sellout price\n self.__sellout_price_update()\n\n return sale_price\n\n def __sellout_price_update(self):\n \"\"\"\n Update the sellout price until we have sold less than the ideal number\n of cores or if we have not yet set a sellout price.\n We assume that the cores that were sold in the sell period were sold at the lowest price of the sale.\n \"\"\"\n offered = (\n self.config.limit_cores_offered\n if self.config.limit_cores_offered is not None\n else 0\n )\n\n ideal_bulk_proportion = (\n self.config.ideal_bulk_proportion\n if self.config.ideal_bulk_proportion is not None\n else 0\n )\n\n ideal = int(ideal_bulk_proportion * offered)\n if (\n self.cores_sold_in_renewal <= ideal and self.cores_sold_in_sale > 0\n ) or self.sellout_price is None:\n self.sellout_price = self.price\n\n def __renew_price(self, region_start, block_now):\n \"\"\"\n Calculate the new buy price after renewal.\n Function imitates `do_renew`: https://github.com/paritytech/polkadot-sdk/blob/2610450a18e64079abfe98f0a5b57069bbb61009/substrate/frame/broker/src/dispatchable_impls.rs#L125\n\n :param region_start: The starting block of the current region.\n :param block_now: The current block.\n :return: The new buy price after renewal.\n \"\"\"\n cap_price = self.initial_bought_price * (1 + self.config.renewal_bump)\n self.new_buy_price = min(\n cap_price, self.__sale_price_calculate(region_start, block_now)\n )\n return self.new_buy_price\n\n def calculate_price(self, region_start, block_now):\n \"\"\"\n Calculate the price at a specific block, taking into account whether it is in the renewal period or sale period.\n\n :param region_start: The starting block of the current region.\n :param block_now: The current block.\n :return: The calculated price.\n \"\"\"\n if not region_start <= block_now <= (region_start + self.config.region_length):\n raise ValueError(\n \"Invalid input: block_now must be greater than or equal to region_start.\"\n )\n elif block_now < region_start + self.config.interlude_length:\n return self.__renew_price(region_start, block_now)\n else:\n return self.__sale_price_calculate(\n region_start + self.config.interlude_length, block_now\n )" }, { "identifier": "StreamlitApp", "path": "streamlitapp.py", "snippet": "class StreamlitApp:\n def __init__(self, config, price_calculator):\n \"\"\"\n Initialize the Streamlit app with configuration and price calculator.\n \"\"\"\n self.config = config\n self.price_calculator = price_calculator\n\n def _get_config_input(self):\n \"\"\"\n Create input fields for configuration and collect updated values.\n \"\"\"\n help_texts = self._get_help_texts()\n\n # Create input fields and collect updated values\n with st.expander(\"Configuration values\"):\n updated_values = {}\n for attribute_name in dir(self.config):\n if not attribute_name.startswith(\"__\") and not callable(getattr(self.config, attribute_name)):\n help_text = help_texts.get(attribute_name, \"\") # Get help text or default to empty string\n value = st.number_input(attribute_name, value=getattr(self.config, attribute_name), help=help_text)\n updated_values[attribute_name] = value\n return updated_values\n\n def _get_help_texts(self):\n \"\"\"\n Returns a dictionary of help texts for each configuration attribute.\n \"\"\"\n return {\n \"interlude_length\": \"Length in blocks of the Interlude Period for forthcoming sales.\",\n \"leadin_length\": \"Length in blocks of the Leadin Period for forthcoming sales.\",\n \"region_length\": \"Length in blocks of the Region Period for forthcoming sales.\",\n \"ideal_bulk_proportion\": \"Proportion of cores available for sale to maintain stable price.\",\n \"limit_cores_offered\": \"Artificial limit to the number of cores allowed to be sold.\",\n \"renewal_bump\": \"Amount by which the renewal price increases each sale period.\"\n }\n\n def _get_observation_time_input(self):\n \"\"\"\n Create a slider to set the observation time.\n \"\"\"\n observe_time = st.slider(\n 'X-AXIS - Observing time', min_value=1, max_value=20, value=2, step=1,\n help='Number of regions to observe: Nb.of regions (28 day chunks)'\n )\n return observe_time\n\n def _get_price_input(self):\n \"\"\"\n Create sliders for setting the initial bought price and starting price.\n \"\"\"\n initial_bought_price = st.slider(\n 'Y-AXIS - Start Price of the Core You Bought', min_value=0, max_value=10000, value=1000, step=10, help='This represents the initial price of the core you bought in the previous region. If we are comparing it to the graph this would be Region 0.'\n )\n self.price_calculator.change_bought_price(initial_bought_price)\n\n price = st.slider(\n 'Y-AXIS Starting Price', min_value=0, max_value=10000, value=1000, step=10, help='This is the starting price at which the price of the cores are initialized in the when the sales are started by admin.'\n )\n self.price_calculator.change_initial_price(price)\n\n def _get_factor_curve_input(self):\n \"\"\"\n Create inputs for setting the leading period curve.\n \"\"\"\n with st.expander(\"Change the Leadin period curve\"):\n st.write(\"Change the lead-in factor (LF) curve - to exponential or linear. The default implementation in the `broker pallet` code is currently linear, with a factor value of 1.\")\n linear = st.toggle('-', value=True, help='Toggle between exponential and linear')\n linear_text = 'Current value: Linear' if linear else 'Current value: Exponential'\n st.write(linear_text)\n\n self.price_calculator.change_linear(linear)\n factor_value = st.slider(\n 'Change the Factor Value to see changes in exp or linear.', min_value=1, max_value=10, value=1, step=1,\n help='Change the factor value for the lead-in factor curve. The defualt value is 1. This factor is not implemented in the `broker pallet` code. It is given as an example of how would an exponential function be implemented if it were to be implemented. '\n )\n self.price_calculator.change_factor(factor_value)\n\n def _get_cores_input(self, observe_time):\n \"\"\"\n Create sliders for setting the number of cores renewed and sold in each sale.\n \"\"\"\n st.header(\"Cores Renewed and Sold in Each Sale\")\n const_or_regions = st.toggle('Toggle between const and variable sales', value=True, help='Switch between constant sales of cores over all regions or variable sales.')\n monthly_renewals = {}\n monthly_sales = {}\n\n if const_or_regions:\n st.markdown(\"### Constant sales of cores over all regions\")\n\n renewed_cores_in_each_sale = st.slider(\n 'Cores renewed in each sale', min_value=0, max_value=self.config.limit_cores_offered, value=10, step=1, help='This represents the number of cores that are renewed in each sale. This is a constant value for all the regions.'\n )\n\n max_sold_cores = self.config.limit_cores_offered - renewed_cores_in_each_sale\n sold_cores_in_each_sale = 0 if max_sold_cores <= 0 else st.slider(\n 'Cores sold in each sale', min_value=0, max_value=max_sold_cores, value=0, step=1, help='This represents the number of cores that are sold in each sale. This is a constant value for all the regions.'\n )\n\n for month in range(1, observe_time + 1):\n monthly_renewals[month] = renewed_cores_in_each_sale\n monthly_sales[month] = sold_cores_in_each_sale\n\n else:\n st.markdown(\"### Adjustment for each region length (28 days)\")\n for month in range(1, observe_time + 1):\n with st.expander(f\"Region {month} Adjustments\"):\n renewed_cores = st.slider(f'Cores renewed in Month {month}', min_value=0, max_value=self.config.limit_cores_offered, value=10, step=1)\n if self.config.limit_cores_offered - renewed_cores > 0:\n sold_cores = st.slider(f'Cores sold in Month {month}', min_value=0, max_value=self.config.limit_cores_offered - renewed_cores, value=0, step=1)\n else:\n sold_cores = 0\n monthly_renewals[month] = renewed_cores\n monthly_sales[month] = sold_cores\n st.write(\"Region nb. \", month, \": Renewals \", renewed_cores, \", Sold \", sold_cores)\n\n return monthly_renewals, monthly_sales\n\n\n def _get_slider_input(self):\n \"\"\"\n Combine all slider inputs into one method.\n \"\"\"\n observe_time = self._get_observation_time_input()\n observe_blocks = observe_time * self.config.region_length\n\n self._get_price_input()\n self._get_factor_curve_input()\n monthly_renewals, monthly_sales = self._get_cores_input(observe_time)\n\n return observe_blocks, monthly_renewals, monthly_sales\n\n def _plot_sale_price(self, ax, block_times, sale_prices, region_start, label):\n ax.axvline(x=region_start, color='#F88379', linestyle='--')\n ax.axvline(x=region_start + self.config.interlude_length, color='#dd3', linestyle='--')\n ax.axvline(x=region_start + self.config.interlude_length + self.config.leadin_length, color='#097969', linestyle='--')\n ax.plot(block_times, sale_prices, label=label)\n ax.axvline(x=region_start + self.config.region_length, color='b', linestyle='--')\n\n def _create_sidebar(self):\n \"\"\"\n Creates sidebar for configuration input and slider input.\n \"\"\"\n with st.sidebar:\n st.header(\"Configuration Settings\")\n # Update the configuration based on user input\n updated_values = self._get_config_input()\n self.config.update_config(updated_values)\n self.price_calculator.update_config(self.config)\n\n st.header(\"Sale Settings\")\n observe_blocks, monthly_renewals, monthly_sales = self._get_slider_input()\n\n return observe_blocks, monthly_renewals, monthly_sales\n\n def _explaination_section(self):\n st.markdown(\"#### 🎉 Welcome to the Coretime Price Simulator! 🎉\")\n st.markdown(\"To get started and learn how to effectively use this tool, please refer to our comprehensive guide at [docs.lastic.xyz](https://docs.lastic.xyz/price-simulator/). This simulator is designed with a key presumption: it assumes that purchases are made at the lowest possible price in each cycle. However, please note that this may not always reflect real-world scenarios.\")\n st.write(\"To enhance your experience and understanding of the graph, here's a quick rundown of some essential terms.\")\n st.markdown(create_tooltip(\"Red-Yellow: INTERLUDE PERIOD\", \"The area between the red and yellow section represents the INTERLUDE Period, this is the time when accounts who bought their cores in previous blocks can renew them. It's the span between the end of one Leadin Period and the start of the next, where the core's price and allocation are stable.\"), unsafe_allow_html=True)\n st.markdown(create_tooltip(\"Yellow-Green: LEADIN PERIOD\", \"The area between the yellow and green section represents the LEADIN Period, this is the time when new sales occur.\"), unsafe_allow_html=True)\n st.markdown(create_tooltip(\"Green-Green: REGION PERIOD\", \"The area between two green sections represents a REGION Period, This represents the duration of each core allocation following the sale.\"), unsafe_allow_html=True)\n\n def _plot_graph(self, observe_blocks, monthly_renewals, monthly_sales):\n region_nb = int(observe_blocks / self.config.region_length)\n\n fig, ax = plt.subplots()\n for region_i in range(region_nb):\n region_start = SALE_START + region_i * self.config.region_length\n block_times = np.linspace(region_start, region_start + self.config.region_length, self.config.region_length)\n \n sale_prices = [self.price_calculator.calculate_price(region_start, block_now) for block_now in block_times]\n self._plot_sale_price(ax, block_times, sale_prices, region_start, f'Region {region_i+1}')\n\n # Recalculate the price of renewal of the core\n self.price_calculator.update_renewal_price()\n # Recalculate the price at the end of each region\n self.price_calculator.rotate_sale(monthly_renewals.get(region_i + 1, 0), monthly_sales.get(region_i + 1, 0))\n\n ax.set_xlabel('Block Time')\n ax.set_ylabel('Sale Price')\n ax.set_title('Sale Price over Time')\n ax.legend()\n st.pyplot(fig)\n\n def run(self):\n \"\"\"\n Run the Streamlit application.\n \"\"\"\n observe_blocks, monthly_renewals, monthly_sales = self._create_sidebar()\n\n st.title('Coretime Sale Price over Time')\n\n self._explaination_section()\n self._plot_graph(observe_blocks, monthly_renewals, monthly_sales)" } ]
from config import Config from price import CalculatePrice from streamlitapp import StreamlitApp
5,114
BLOCKS_PER_DAY = 5 SALE_START = 0 def main(): # Initial configuration
BLOCKS_PER_DAY = 5 SALE_START = 0 def main(): # Initial configuration
config = Config(
0
2023-10-30 12:49:00+00:00
8k
dangeng/flowmag
flow_models/gmflow/gmflow.py
[ { "identifier": "CNNEncoder", "path": "flow_models/gmflow/backbone.py", "snippet": "class CNNEncoder(nn.Module):\n def __init__(self, output_dim=128,\n norm_layer=nn.InstanceNorm2d,\n num_output_scales=1,\n **kwargs,\n ):\n super(CNNEncoder, self).__init__()\n self.num_branch = num_output_scales\n\n feature_dims = [64, 96, 128]\n\n self.conv1 = nn.Conv2d(3, feature_dims[0], kernel_size=7, stride=2, padding=3, bias=False) # 1/2\n self.norm1 = norm_layer(feature_dims[0])\n self.relu1 = nn.ReLU(inplace=True)\n\n self.in_planes = feature_dims[0]\n self.layer1 = self._make_layer(feature_dims[0], stride=1, norm_layer=norm_layer) # 1/2\n self.layer2 = self._make_layer(feature_dims[1], stride=2, norm_layer=norm_layer) # 1/4\n\n # highest resolution 1/4 or 1/8\n stride = 2 if num_output_scales == 1 else 1\n self.layer3 = self._make_layer(feature_dims[2], stride=stride,\n norm_layer=norm_layer,\n ) # 1/4 or 1/8\n\n self.conv2 = nn.Conv2d(feature_dims[2], output_dim, 1, 1, 0)\n\n if self.num_branch > 1:\n if self.num_branch == 4:\n strides = (1, 2, 4, 8)\n elif self.num_branch == 3:\n strides = (1, 2, 4)\n elif self.num_branch == 2:\n strides = (1, 2)\n else:\n raise ValueError\n\n self.trident_conv = MultiScaleTridentConv(output_dim, output_dim,\n kernel_size=3,\n strides=strides,\n paddings=1,\n num_branch=self.num_branch,\n )\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, dim, stride=1, dilation=1, norm_layer=nn.InstanceNorm2d):\n layer1 = ResidualBlock(self.in_planes, dim, norm_layer=norm_layer, stride=stride, dilation=dilation)\n layer2 = ResidualBlock(dim, dim, norm_layer=norm_layer, stride=1, dilation=dilation)\n\n layers = (layer1, layer2)\n\n self.in_planes = dim\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n\n x = self.layer1(x) # 1/2\n x = self.layer2(x) # 1/4\n x = self.layer3(x) # 1/8 or 1/4\n\n x = self.conv2(x)\n\n if self.num_branch > 1:\n out = self.trident_conv([x] * self.num_branch) # high to low res\n else:\n out = [x]\n\n return out" }, { "identifier": "FeatureTransformer", "path": "flow_models/gmflow/transformer.py", "snippet": "class FeatureTransformer(nn.Module):\n def __init__(self,\n num_layers=6,\n d_model=128,\n nhead=1,\n attention_type='swin',\n ffn_dim_expansion=4,\n **kwargs,\n ):\n super(FeatureTransformer, self).__init__()\n\n self.attention_type = attention_type\n\n self.d_model = d_model\n self.nhead = nhead\n\n self.layers = nn.ModuleList([\n TransformerBlock(d_model=d_model,\n nhead=nhead,\n attention_type=attention_type,\n ffn_dim_expansion=ffn_dim_expansion,\n with_shift=True if attention_type == 'swin' and i % 2 == 1 else False,\n )\n for i in range(num_layers)])\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, feature0, feature1,\n attn_num_splits=None,\n **kwargs,\n ):\n\n b, c, h, w = feature0.shape\n assert self.d_model == c\n\n feature0 = feature0.flatten(-2).permute(0, 2, 1) # [B, H*W, C]\n feature1 = feature1.flatten(-2).permute(0, 2, 1) # [B, H*W, C]\n\n if self.attention_type == 'swin' and attn_num_splits > 1:\n # global and refine use different number of splits\n window_size_h = h // attn_num_splits\n window_size_w = w // attn_num_splits\n\n # compute attn mask once\n shifted_window_attn_mask = generate_shift_window_attn_mask(\n input_resolution=(h, w),\n window_size_h=window_size_h,\n window_size_w=window_size_w,\n shift_size_h=window_size_h // 2,\n shift_size_w=window_size_w // 2,\n device=feature0.device,\n ) # [K*K, H/K*W/K, H/K*W/K]\n else:\n shifted_window_attn_mask = None\n\n # concat feature0 and feature1 in batch dimension to compute in parallel\n concat0 = torch.cat((feature0, feature1), dim=0) # [2B, H*W, C]\n concat1 = torch.cat((feature1, feature0), dim=0) # [2B, H*W, C]\n\n for layer in self.layers:\n concat0 = layer(concat0, concat1,\n height=h,\n width=w,\n shifted_window_attn_mask=shifted_window_attn_mask,\n attn_num_splits=attn_num_splits,\n )\n\n # update feature1\n concat1 = torch.cat(concat0.chunk(chunks=2, dim=0)[::-1], dim=0)\n\n feature0, feature1 = concat0.chunk(chunks=2, dim=0) # [B, H*W, C]\n\n # reshape back\n feature0 = feature0.view(b, h, w, c).permute(0, 3, 1, 2).contiguous() # [B, C, H, W]\n feature1 = feature1.view(b, h, w, c).permute(0, 3, 1, 2).contiguous() # [B, C, H, W]\n\n return feature0, feature1" }, { "identifier": "FeatureFlowAttention", "path": "flow_models/gmflow/transformer.py", "snippet": "class FeatureFlowAttention(nn.Module):\n \"\"\"\n flow propagation with self-attention on feature\n query: feature0, key: feature0, value: flow\n \"\"\"\n\n def __init__(self, in_channels,\n **kwargs,\n ):\n super(FeatureFlowAttention, self).__init__()\n\n self.q_proj = nn.Linear(in_channels, in_channels)\n self.k_proj = nn.Linear(in_channels, in_channels)\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, feature0, flow,\n local_window_attn=False,\n local_window_radius=1,\n **kwargs,\n ):\n # q, k: feature [B, C, H, W], v: flow [B, 2, H, W]\n if local_window_attn:\n return self.forward_local_window_attn(feature0, flow,\n local_window_radius=local_window_radius)\n\n b, c, h, w = feature0.size()\n\n query = feature0.view(b, c, h * w).permute(0, 2, 1) # [B, H*W, C]\n\n # a note: the ``correct'' implementation should be:\n # ``query = self.q_proj(query), key = self.k_proj(query)''\n # this problem is observed while cleaning up the code\n # however, this doesn't affect the performance since the projection is a linear operation,\n # thus the two projection matrices for key can be merged\n # so I just leave it as is in order to not re-train all models :)\n query = self.q_proj(query) # [B, H*W, C]\n key = self.k_proj(query) # [B, H*W, C]\n\n value = flow.view(b, flow.size(1), h * w).permute(0, 2, 1) # [B, H*W, 2]\n\n scores = torch.matmul(query, key.permute(0, 2, 1)) / (c ** 0.5) # [B, H*W, H*W]\n prob = torch.softmax(scores, dim=-1)\n\n out = torch.matmul(prob, value) # [B, H*W, 2]\n out = out.view(b, h, w, value.size(-1)).permute(0, 3, 1, 2) # [B, 2, H, W]\n\n return out\n\n def forward_local_window_attn(self, feature0, flow,\n local_window_radius=1,\n ):\n assert flow.size(1) == 2\n assert local_window_radius > 0\n\n b, c, h, w = feature0.size()\n\n feature0_reshape = self.q_proj(feature0.view(b, c, -1).permute(0, 2, 1)\n ).reshape(b * h * w, 1, c) # [B*H*W, 1, C]\n\n kernel_size = 2 * local_window_radius + 1\n\n feature0_proj = self.k_proj(feature0.view(b, c, -1).permute(0, 2, 1)).permute(0, 2, 1).reshape(b, c, h, w)\n\n feature0_window = F.unfold(feature0_proj, kernel_size=kernel_size,\n padding=local_window_radius) # [B, C*(2R+1)^2), H*W]\n\n feature0_window = feature0_window.view(b, c, kernel_size ** 2, h, w).permute(\n 0, 3, 4, 1, 2).reshape(b * h * w, c, kernel_size ** 2) # [B*H*W, C, (2R+1)^2]\n\n flow_window = F.unfold(flow, kernel_size=kernel_size,\n padding=local_window_radius) # [B, 2*(2R+1)^2), H*W]\n\n flow_window = flow_window.view(b, 2, kernel_size ** 2, h, w).permute(\n 0, 3, 4, 2, 1).reshape(b * h * w, kernel_size ** 2, 2) # [B*H*W, (2R+1)^2, 2]\n\n scores = torch.matmul(feature0_reshape, feature0_window) / (c ** 0.5) # [B*H*W, 1, (2R+1)^2]\n\n prob = torch.softmax(scores, dim=-1)\n\n out = torch.matmul(prob, flow_window).view(b, h, w, 2).permute(0, 3, 1, 2).contiguous() # [B, 2, H, W]\n\n return out" }, { "identifier": "global_correlation_softmax", "path": "flow_models/gmflow/matching.py", "snippet": "def global_correlation_softmax(feature0, feature1,\n pred_bidir_flow=False,\n ):\n # global correlation\n b, c, h, w = feature0.shape\n feature0 = feature0.view(b, c, -1).permute(0, 2, 1) # [B, H*W, C]\n feature1 = feature1.view(b, c, -1) # [B, C, H*W]\n\n correlation = torch.matmul(feature0, feature1).view(b, h, w, h, w) / (c ** 0.5) # [B, H, W, H, W]\n\n # flow from softmax\n init_grid = coords_grid(b, h, w).to(correlation.device) # [B, 2, H, W]\n grid = init_grid.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2]\n\n correlation = correlation.view(b, h * w, h * w) # [B, H*W, H*W]\n\n if pred_bidir_flow:\n correlation = torch.cat((correlation, correlation.permute(0, 2, 1)), dim=0) # [2*B, H*W, H*W]\n init_grid = init_grid.repeat(2, 1, 1, 1) # [2*B, 2, H, W]\n grid = grid.repeat(2, 1, 1) # [2*B, H*W, 2]\n b = b * 2\n\n prob = F.softmax(correlation, dim=-1) # [B, H*W, H*W]\n\n correspondence = torch.matmul(prob, grid).view(b, h, w, 2).permute(0, 3, 1, 2) # [B, 2, H, W]\n\n # when predicting bidirectional flow, flow is the concatenation of forward flow and backward flow\n flow = correspondence - init_grid\n\n return flow, prob" }, { "identifier": "local_correlation_softmax", "path": "flow_models/gmflow/matching.py", "snippet": "def local_correlation_softmax(feature0, feature1, local_radius,\n padding_mode='zeros',\n ):\n b, c, h, w = feature0.size()\n coords_init = coords_grid(b, h, w).to(feature0.device) # [B, 2, H, W]\n coords = coords_init.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2]\n\n local_h = 2 * local_radius + 1\n local_w = 2 * local_radius + 1\n\n window_grid = generate_window_grid(-local_radius, local_radius,\n -local_radius, local_radius,\n local_h, local_w, device=feature0.device) # [2R+1, 2R+1, 2]\n window_grid = window_grid.reshape(-1, 2).repeat(b, 1, 1, 1) # [B, 1, (2R+1)^2, 2]\n sample_coords = coords.unsqueeze(-2) + window_grid # [B, H*W, (2R+1)^2, 2]\n\n sample_coords_softmax = sample_coords\n\n # exclude coords that are out of image space\n valid_x = (sample_coords[:, :, :, 0] >= 0) & (sample_coords[:, :, :, 0] < w) # [B, H*W, (2R+1)^2]\n valid_y = (sample_coords[:, :, :, 1] >= 0) & (sample_coords[:, :, :, 1] < h) # [B, H*W, (2R+1)^2]\n\n valid = valid_x & valid_y # [B, H*W, (2R+1)^2], used to mask out invalid values when softmax\n\n # normalize coordinates to [-1, 1]\n sample_coords_norm = normalize_coords(sample_coords, h, w) # [-1, 1]\n window_feature = F.grid_sample(feature1, sample_coords_norm,\n padding_mode=padding_mode, align_corners=True\n ).permute(0, 2, 1, 3) # [B, H*W, C, (2R+1)^2]\n feature0_view = feature0.permute(0, 2, 3, 1).view(b, h * w, 1, c) # [B, H*W, 1, C]\n\n corr = torch.matmul(feature0_view, window_feature).view(b, h * w, -1) / (c ** 0.5) # [B, H*W, (2R+1)^2]\n\n # mask invalid locations\n corr[~valid] = -1e9\n\n prob = F.softmax(corr, -1) # [B, H*W, (2R+1)^2]\n\n correspondence = torch.matmul(prob.unsqueeze(-2), sample_coords_softmax).squeeze(-2).view(\n b, h, w, 2).permute(0, 3, 1, 2) # [B, 2, H, W]\n\n flow = correspondence - coords_init\n match_prob = prob\n\n return flow, match_prob" }, { "identifier": "flow_warp", "path": "flow_models/gmflow/geometry.py", "snippet": "def flow_warp(feature, flow, mask=False, padding_mode='zeros'):\n b, c, h, w = feature.size()\n assert flow.size(1) == 2\n\n grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]\n\n return bilinear_sample(feature, grid, padding_mode=padding_mode,\n return_mask=mask)" }, { "identifier": "normalize_img", "path": "flow_models/gmflow/utils.py", "snippet": "def normalize_img(img0, img1):\n # loaded images are in [0, 255]\n # normalize by ImageNet mean and std\n mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(img1.device)\n std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(img1.device)\n img0 = (img0 / 255. - mean) / std\n img1 = (img1 / 255. - mean) / std\n\n return img0, img1" }, { "identifier": "feature_add_position", "path": "flow_models/gmflow/utils.py", "snippet": "def feature_add_position(feature0, feature1, attn_splits, feature_channels):\n pos_enc = PositionEmbeddingSine(num_pos_feats=feature_channels // 2)\n\n if attn_splits > 1: # add position in splited window\n feature0_splits = split_feature(feature0, num_splits=attn_splits)\n feature1_splits = split_feature(feature1, num_splits=attn_splits)\n\n position = pos_enc(feature0_splits)\n\n feature0_splits = feature0_splits + position\n feature1_splits = feature1_splits + position\n\n feature0 = merge_splits(feature0_splits, num_splits=attn_splits)\n feature1 = merge_splits(feature1_splits, num_splits=attn_splits)\n else:\n position = pos_enc(feature0)\n\n feature0 = feature0 + position\n feature1 = feature1 + position\n\n return feature0, feature1" } ]
import torch import torch.nn as nn import torch.nn.functional as F from .backbone import CNNEncoder from .transformer import FeatureTransformer, FeatureFlowAttention from .matching import global_correlation_softmax, local_correlation_softmax from .geometry import flow_warp from .utils import normalize_img, feature_add_position
6,065
): super(GMFlow, self).__init__() self.num_scales = num_scales self.feature_channels = feature_channels self.upsample_factor = upsample_factor self.attention_type = attention_type self.num_transformer_layers = num_transformer_layers # CNN backbone self.backbone = CNNEncoder(output_dim=feature_channels, num_output_scales=num_scales) # Transformer self.transformer = FeatureTransformer(num_layers=num_transformer_layers, d_model=feature_channels, nhead=num_head, attention_type=attention_type, ffn_dim_expansion=ffn_dim_expansion, ) # flow propagation with self-attn self.feature_flow_attn = FeatureFlowAttention(in_channels=feature_channels) # convex upsampling: concat feature0 and flow as input self.upsampler = nn.Sequential(nn.Conv2d(2 + feature_channels, 256, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(256, upsample_factor ** 2 * 9, 1, 1, 0)) def extract_feature(self, img0, img1): concat = torch.cat((img0, img1), dim=0) # [2B, C, H, W] features = self.backbone(concat) # list of [2B, C, H, W], resolution from high to low # reverse: resolution from low to high features = features[::-1] feature0, feature1 = [], [] for i in range(len(features)): feature = features[i] chunks = torch.chunk(feature, 2, 0) # tuple feature0.append(chunks[0]) feature1.append(chunks[1]) return feature0, feature1 def upsample_flow(self, flow, feature, bilinear=False, upsample_factor=8, ): if bilinear: up_flow = F.interpolate(flow, scale_factor=upsample_factor, mode='bilinear', align_corners=True) * upsample_factor else: # convex upsampling concat = torch.cat((flow, feature), dim=1) mask = self.upsampler(concat) b, flow_channel, h, w = flow.shape mask = mask.view(b, 1, 9, self.upsample_factor, self.upsample_factor, h, w) # [B, 1, 9, K, K, H, W] mask = torch.softmax(mask, dim=2) up_flow = F.unfold(self.upsample_factor * flow, [3, 3], padding=1) up_flow = up_flow.view(b, flow_channel, 9, 1, 1, h, w) # [B, 2, 9, 1, 1, H, W] up_flow = torch.sum(mask * up_flow, dim=2) # [B, 2, K, K, H, W] up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) # [B, 2, K, H, K, W] up_flow = up_flow.reshape(b, flow_channel, self.upsample_factor * h, self.upsample_factor * w) # [B, 2, K*H, K*W] return up_flow def forward(self, img0, img1, attn_splits_list=None, corr_radius_list=None, prop_radius_list=None, pred_bidir_flow=False, **kwargs, ): results_dict = {} flow_preds = [] img0, img1 = normalize_img(img0, img1) # [B, 3, H, W] # resolution low to high feature0_list, feature1_list = self.extract_feature(img0, img1) # list of features flow = None assert len(attn_splits_list) == len(corr_radius_list) == len(prop_radius_list) == self.num_scales for scale_idx in range(self.num_scales): feature0, feature1 = feature0_list[scale_idx], feature1_list[scale_idx] if pred_bidir_flow and scale_idx > 0: # predicting bidirectional flow with refinement feature0, feature1 = torch.cat((feature0, feature1), dim=0), torch.cat((feature1, feature0), dim=0) upsample_factor = self.upsample_factor * (2 ** (self.num_scales - 1 - scale_idx)) if scale_idx > 0: flow = F.interpolate(flow, scale_factor=2, mode='bilinear', align_corners=True) * 2 if flow is not None: flow = flow.detach() feature1 = flow_warp(feature1, flow) # [B, C, H, W] attn_splits = attn_splits_list[scale_idx] corr_radius = corr_radius_list[scale_idx] prop_radius = prop_radius_list[scale_idx] # add position to features feature0, feature1 = feature_add_position(feature0, feature1, attn_splits, self.feature_channels) # Transformer feature0, feature1 = self.transformer(feature0, feature1, attn_num_splits=attn_splits) # correlation and softmax if corr_radius == -1: # global matching flow_pred = global_correlation_softmax(feature0, feature1, pred_bidir_flow)[0] else: # local matching
class GMFlow(nn.Module): def __init__(self, num_scales=1, upsample_factor=8, feature_channels=128, attention_type='swin', num_transformer_layers=6, ffn_dim_expansion=4, num_head=1, **kwargs, ): super(GMFlow, self).__init__() self.num_scales = num_scales self.feature_channels = feature_channels self.upsample_factor = upsample_factor self.attention_type = attention_type self.num_transformer_layers = num_transformer_layers # CNN backbone self.backbone = CNNEncoder(output_dim=feature_channels, num_output_scales=num_scales) # Transformer self.transformer = FeatureTransformer(num_layers=num_transformer_layers, d_model=feature_channels, nhead=num_head, attention_type=attention_type, ffn_dim_expansion=ffn_dim_expansion, ) # flow propagation with self-attn self.feature_flow_attn = FeatureFlowAttention(in_channels=feature_channels) # convex upsampling: concat feature0 and flow as input self.upsampler = nn.Sequential(nn.Conv2d(2 + feature_channels, 256, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(256, upsample_factor ** 2 * 9, 1, 1, 0)) def extract_feature(self, img0, img1): concat = torch.cat((img0, img1), dim=0) # [2B, C, H, W] features = self.backbone(concat) # list of [2B, C, H, W], resolution from high to low # reverse: resolution from low to high features = features[::-1] feature0, feature1 = [], [] for i in range(len(features)): feature = features[i] chunks = torch.chunk(feature, 2, 0) # tuple feature0.append(chunks[0]) feature1.append(chunks[1]) return feature0, feature1 def upsample_flow(self, flow, feature, bilinear=False, upsample_factor=8, ): if bilinear: up_flow = F.interpolate(flow, scale_factor=upsample_factor, mode='bilinear', align_corners=True) * upsample_factor else: # convex upsampling concat = torch.cat((flow, feature), dim=1) mask = self.upsampler(concat) b, flow_channel, h, w = flow.shape mask = mask.view(b, 1, 9, self.upsample_factor, self.upsample_factor, h, w) # [B, 1, 9, K, K, H, W] mask = torch.softmax(mask, dim=2) up_flow = F.unfold(self.upsample_factor * flow, [3, 3], padding=1) up_flow = up_flow.view(b, flow_channel, 9, 1, 1, h, w) # [B, 2, 9, 1, 1, H, W] up_flow = torch.sum(mask * up_flow, dim=2) # [B, 2, K, K, H, W] up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) # [B, 2, K, H, K, W] up_flow = up_flow.reshape(b, flow_channel, self.upsample_factor * h, self.upsample_factor * w) # [B, 2, K*H, K*W] return up_flow def forward(self, img0, img1, attn_splits_list=None, corr_radius_list=None, prop_radius_list=None, pred_bidir_flow=False, **kwargs, ): results_dict = {} flow_preds = [] img0, img1 = normalize_img(img0, img1) # [B, 3, H, W] # resolution low to high feature0_list, feature1_list = self.extract_feature(img0, img1) # list of features flow = None assert len(attn_splits_list) == len(corr_radius_list) == len(prop_radius_list) == self.num_scales for scale_idx in range(self.num_scales): feature0, feature1 = feature0_list[scale_idx], feature1_list[scale_idx] if pred_bidir_flow and scale_idx > 0: # predicting bidirectional flow with refinement feature0, feature1 = torch.cat((feature0, feature1), dim=0), torch.cat((feature1, feature0), dim=0) upsample_factor = self.upsample_factor * (2 ** (self.num_scales - 1 - scale_idx)) if scale_idx > 0: flow = F.interpolate(flow, scale_factor=2, mode='bilinear', align_corners=True) * 2 if flow is not None: flow = flow.detach() feature1 = flow_warp(feature1, flow) # [B, C, H, W] attn_splits = attn_splits_list[scale_idx] corr_radius = corr_radius_list[scale_idx] prop_radius = prop_radius_list[scale_idx] # add position to features feature0, feature1 = feature_add_position(feature0, feature1, attn_splits, self.feature_channels) # Transformer feature0, feature1 = self.transformer(feature0, feature1, attn_num_splits=attn_splits) # correlation and softmax if corr_radius == -1: # global matching flow_pred = global_correlation_softmax(feature0, feature1, pred_bidir_flow)[0] else: # local matching
flow_pred = local_correlation_softmax(feature0, feature1, corr_radius)[0]
4
2023-10-27 05:23:08+00:00
8k
Gene-Weaver/VoucherVision
vouchervision/VoucherVision_Config_Builder.py
[ { "identifier": "get_default_download_folder", "path": "vouchervision/LeafMachine2_Config_Builder.py", "snippet": "def get_default_download_folder():\n system_platform = platform.system() # Gets the system platform, e.g., 'Linux', 'Windows', 'Darwin'\n\n if system_platform == \"Windows\":\n # Typically, the Downloads folder for Windows is in the user's profile folder\n default_output_folder = os.path.join(os.getenv('USERPROFILE'), 'Downloads')\n elif system_platform == \"Darwin\":\n # Typically, the Downloads folder for macOS is in the user's home directory\n default_output_folder = os.path.join(os.path.expanduser(\"~\"), 'Downloads')\n elif system_platform == \"Linux\":\n # Typically, the Downloads folder for Linux is in the user's home directory\n default_output_folder = os.path.join(os.path.expanduser(\"~\"), 'Downloads')\n else:\n default_output_folder = \"set/path/to/downloads/folder\"\n print(\"Please manually set the output folder\")\n return default_output_folder" }, { "identifier": "write_config_file", "path": "vouchervision/LeafMachine2_Config_Builder.py", "snippet": "def write_config_file(config_data, dir_home, filename=\"LeafMachine2.yaml\"):\n file_path = os.path.join(dir_home, filename)\n\n # Write the data to a YAML file\n with open(file_path, \"w\") as outfile:\n yaml.dump(config_data, outfile, default_flow_style=False)" }, { "identifier": "validate_dir", "path": "vouchervision/general_utils.py", "snippet": "def validate_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir, exist_ok=True)" }, { "identifier": "print_main_fail", "path": "vouchervision/general_utils.py", "snippet": "def print_main_fail(message):\n indent_level = 1\n white_space = \" \" * 5 * indent_level\n end = \" \" * int(80 - len(message) - len(white_space))\n # end_white_space = \" \" * end\n blank = \" \" * 80\n print(f\"{bcolors.CREDBG2}{blank}{bcolors.ENDC}\")\n print(f\"{bcolors.CREDBG2}{white_space}{message}{end}{bcolors.ENDC}\")\n print(f\"{bcolors.CREDBG2}{blank}{bcolors.ENDC}\")" }, { "identifier": "voucher_vision", "path": "vouchervision/vouchervision_main.py", "snippet": "def voucher_vision(cfg_file_path, dir_home, path_custom_prompts, cfg_test, progress_report, path_api_cost=None, test_ind = None, is_real_run=False):\n # get_n_overall = progress_report.get_n_overall()\n # progress_report.update_overall(f\"Working on {test_ind+1} of {get_n_overall}\")\n\n t_overall = perf_counter()\n\n # Load config file\n report_config(dir_home, cfg_file_path, system='VoucherVision')\n\n if cfg_test is None:\n cfg = load_config_file(dir_home, cfg_file_path, system='VoucherVision') # For VoucherVision\n else:\n cfg = cfg_test \n # user_cfg = load_config_file(dir_home, cfg_file_path)\n # cfg = Config(user_cfg)\n\n # Check to see if there are subdirs\n # Yes --> use the names of the subsirs as run_name\n run_name, dirs_list, has_subdirs = check_for_subdirs_VV(cfg)\n print(f\"run_name {run_name} dirs_list{dirs_list} has_subdirs{has_subdirs}\")\n\n # for dir_ind, dir_in in enumerate(dirs_list):\n # if has_subdirs:\n # cfg['leafmachine']['project']['dir_images_local'] = dir_in\n # cfg['leafmachine']['project']['run_name'] = run_name[dir_ind]\n\n # Dir structure\n if is_real_run:\n progress_report.update_overall(f\"Creating Output Directory Structure\")\n print_main_start(\"Creating Directory Structure\")\n Dirs = Dir_Structure(cfg)\n\n # logging.info(\"Hi\")\n logger = start_logging(Dirs, cfg)\n\n # Check to see if required ML files are ready to use\n if is_real_run:\n progress_report.update_overall(f\"Fetching LeafMachine2 Files\")\n ready_to_use = fetch_data(logger, dir_home, cfg_file_path)\n assert ready_to_use, \"Required ML files are not ready to use!\\nThe download may have failed,\\nor\\nthe directory structure of LM2 has been altered\"\n\n # Wrangle images and preprocess\n print_main_start(\"Gathering Images and Image Metadata\")\n Project = Project_Info(cfg, logger, dir_home, Dirs) # Where file names are modified\n\n # Save config file\n save_config_file(cfg, logger, Dirs)\n\n # Detect Archival Components\n print_main_start(\"Locating Archival Components\")\n Project = detect_archival_components(cfg, logger, dir_home, Project, Dirs, is_real_run, progress_report)\n\n # Save cropped detections\n crop_detections_from_images_VV(cfg, logger, dir_home, Project, Dirs)\n\n # Process labels\n Voucher_Vision = VoucherVision(cfg, logger, dir_home, path_custom_prompts, Project, Dirs)\n n_images = len(Voucher_Vision.img_paths)\n last_JSON_response, total_tokens_in, total_tokens_out = Voucher_Vision.process_specimen_batch(progress_report, is_real_run)\n \n if path_api_cost:\n cost_summary, data, total_cost = save_token_info_as_csv(Dirs, cfg['leafmachine']['LLM_version'], path_api_cost, total_tokens_in, total_tokens_out, n_images)\n add_to_expense_report(dir_home, data)\n logger.info(cost_summary)\n else:\n total_cost = None #TODO add config tests to expense_report\n\n t_overall_s = perf_counter()\n logger.name = 'Run Complete! :)'\n logger.info(f\"[Total elapsed time] {round((t_overall_s - t_overall)/60)} minutes\")\n space_saver(cfg, Dirs, logger)\n\n if is_real_run:\n progress_report.update_overall(f\"Run Complete! :sunglasses:\")\n\n for handler in logger.handlers[:]:\n handler.close()\n logger.removeHandler(handler)\n\n return last_JSON_response, total_cost" } ]
import os, yaml, platform, traceback from vouchervision.LeafMachine2_Config_Builder import get_default_download_folder, write_config_file from vouchervision.general_utils import validate_dir, print_main_fail from vouchervision.vouchervision_main import voucher_vision from general_utils import get_cfg_from_full_path
3,796
'save_individual_csv_files_measurements': False, 'save_individual_csv_files_landmarks': False, 'save_individual_efd_files': False, 'include_darwin_core_data_from_combined_file': False, 'do_apply_conversion_factor': False } overlay_section = { 'save_overlay_to_pdf': False, 'save_overlay_to_jpgs': True, 'overlay_dpi': 300, # Between 100 to 300 'overlay_background_color': 'black', # Either 'white' or 'black' 'show_archival_detections': True, 'show_plant_detections': True, 'show_segmentations': True, 'show_landmarks': True, 'ignore_archival_detections_classes': [], 'ignore_plant_detections_classes': ['leaf_whole', 'specimen'], # Could also include 'leaf_partial' and others if needed 'ignore_landmark_classes': [], 'line_width_archival': 12, # Previous value given was 2 'line_width_plant': 12, # Previous value given was 6 'line_width_seg': 12, # 12 is specified as "thick" 'line_width_efd': 12, # 3 is specified as "thick" but 12 is given here 'alpha_transparency_archival': 0.3, 'alpha_transparency_plant': 0, 'alpha_transparency_seg_whole_leaf': 0.4, 'alpha_transparency_seg_partial_leaf': 0.3 } archival_component_detector_section = { 'detector_type': 'Archival_Detector', 'detector_version': 'PREP_final', 'detector_iteration': 'PREP_final', 'detector_weights': 'best.pt', 'minimum_confidence_threshold': 0.5, # Default is 0.5 'do_save_prediction_overlay_images': True, 'ignore_objects_for_overlay': [] } # Add the sections to the 'leafmachine' key config_data['leafmachine']['do'] = do_section config_data['leafmachine']['print'] = print_section config_data['leafmachine']['logging'] = logging_section config_data['leafmachine']['project'] = project_section config_data['leafmachine']['LLM_version'] = LLM_version config_data['leafmachine']['use_RGB_label_images'] = use_RGB_label_images config_data['leafmachine']['do_create_OCR_helper_image'] = do_create_OCR_helper_image config_data['leafmachine']['cropped_components'] = cropped_components_section config_data['leafmachine']['modules'] = modules_section config_data['leafmachine']['data'] = data_section config_data['leafmachine']['overlay'] = overlay_section config_data['leafmachine']['archival_component_detector'] = archival_component_detector_section return config_data, dir_home def build_api_tests(api): dir_home = os.path.dirname(os.path.dirname(__file__)) path_to_configs = os.path.join(dir_home,'demo','demo_configs') dir_home = os.path.dirname(os.path.dirname(__file__)) dir_images_local = os.path.join(dir_home,'demo','demo_images') validate_dir(os.path.join(dir_home,'demo','demo_configs')) path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx') embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0] prefix_removal = '' suffix_removal = '' catalog_numerical_only = False batch_size = 500 do_create_OCR_helper_image = False # ### Option 1: "GPT 4" of ["GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"] # LLM_version_user = 'Azure GPT 4' # ### Option 2: False of [False, True] # use_LeafMachine2_collage_images = False # ### Option 3: False of [False, True] # use_domain_knowledge = True test_results = {} if api == 'openai': OPT1, OPT2, OPT3 = TestOptionsAPI_openai.get_options() elif api == 'palm': OPT1, OPT2, OPT3 = TestOptionsAPI_palm.get_options() elif api == 'azure_openai': OPT1, OPT2, OPT3 = TestOptionsAPI_azure_openai.get_options() else: raise ind = -1 ind_opt1 = -1 ind_opt2 = -1 ind_opt3 = -1 for opt1 in OPT1: ind_opt1+= 1 for opt2 in OPT2: ind_opt2 += 1 for opt3 in OPT3: ind += 1 ind_opt3 += 1 LLM_version_user = opt1 use_LeafMachine2_collage_images = opt2 prompt_version = opt3 filename = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}.yaml" run_name = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}" dir_output = os.path.join(dir_home,'demo','demo_output','run_name') validate_dir(dir_output) config_data, dir_home = assemble_config(dir_home, run_name, dir_images_local,dir_output, prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, prompt_version,do_create_OCR_helper_image)
def build_VV_config(): ############################################# ############ Set common defaults ############ ############################################# # Changing the values below will set new # default values each time you open the # VoucherVision user interface ############################################# ############################################# ############################################# dir_home = os.path.dirname(os.path.dirname(__file__)) run_name = 'test' # dir_images_local = 'D:/Dropbox/LM2_Env/Image_Datasets/GBIF_BroadSample_3SppPerFamily1' dir_images_local = os.path.join(dir_home,'demo','demo_images') # The default output location is the computer's "Downloads" folder # You can set dir_output directly by typing the folder path, # OR you can uncomment the line "dir_output = default_output_folder" # to have VoucherVision save to the Downloads folder by default default_output_folder = get_default_download_folder() dir_output = default_output_folder # dir_output = 'D:/D_Desktop/LM2' prefix_removal = '' #'MICH-V-' suffix_removal = '' catalog_numerical_only = False LLM_version_user = 'Azure GPT 4' prompt_version = 'Version 2' # from ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] use_LeafMachine2_collage_images = False # Use LeafMachine2 collage images do_create_OCR_helper_image = False batch_size = 500 path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx') embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0] ############################################# ############################################# ########## DO NOT EDIT BELOW HERE ########### ############################################# ############################################# return assemble_config(dir_home, run_name, dir_images_local,dir_output, prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, prompt_version, do_create_OCR_helper_image, use_domain_knowledge=False) def assemble_config(dir_home, run_name, dir_images_local,dir_output, prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, prompt_version, do_create_OCR_helper_image_user, use_domain_knowledge=False): # Initialize the base structure config_data = { 'leafmachine': {} } # Modular sections to be added to 'leafmachine' do_section = { 'check_for_illegal_filenames': False, 'check_for_corrupt_images_make_vertical': True, } print_section = { 'verbose': True, 'optional_warnings': True } logging_section = { 'log_level': None } project_section = { 'dir_output': dir_output, 'run_name': run_name, 'image_location': 'local', 'batch_size': batch_size, 'num_workers': 1, 'dir_images_local': dir_images_local, 'continue_run_from_partial_xlsx': '', 'prefix_removal': prefix_removal, 'suffix_removal': suffix_removal, 'catalog_numerical_only': catalog_numerical_only, 'use_domain_knowledge': use_domain_knowledge, 'embeddings_database_name': embeddings_database_name, 'build_new_embeddings_database': False, 'path_to_domain_knowledge_xlsx': path_domain_knowledge, 'prompt_version': prompt_version, 'delete_all_temps': False, 'delete_temps_keep_VVE': False, } modules_section = { 'specimen_crop': True } LLM_version = LLM_version_user use_RGB_label_images = use_LeafMachine2_collage_images # Use LeafMachine2 collage images do_create_OCR_helper_image = do_create_OCR_helper_image_user cropped_components_section = { 'do_save_cropped_annotations': True, 'save_cropped_annotations': ['label','barcode'], 'save_per_image': False, 'save_per_annotation_class': True, 'binarize_labels': False, 'binarize_labels_skeletonize': False } data_section = { 'save_json_rulers': False, 'save_json_measurements': False, 'save_individual_csv_files_rulers': False, 'save_individual_csv_files_measurements': False, 'save_individual_csv_files_landmarks': False, 'save_individual_efd_files': False, 'include_darwin_core_data_from_combined_file': False, 'do_apply_conversion_factor': False } overlay_section = { 'save_overlay_to_pdf': False, 'save_overlay_to_jpgs': True, 'overlay_dpi': 300, # Between 100 to 300 'overlay_background_color': 'black', # Either 'white' or 'black' 'show_archival_detections': True, 'show_plant_detections': True, 'show_segmentations': True, 'show_landmarks': True, 'ignore_archival_detections_classes': [], 'ignore_plant_detections_classes': ['leaf_whole', 'specimen'], # Could also include 'leaf_partial' and others if needed 'ignore_landmark_classes': [], 'line_width_archival': 12, # Previous value given was 2 'line_width_plant': 12, # Previous value given was 6 'line_width_seg': 12, # 12 is specified as "thick" 'line_width_efd': 12, # 3 is specified as "thick" but 12 is given here 'alpha_transparency_archival': 0.3, 'alpha_transparency_plant': 0, 'alpha_transparency_seg_whole_leaf': 0.4, 'alpha_transparency_seg_partial_leaf': 0.3 } archival_component_detector_section = { 'detector_type': 'Archival_Detector', 'detector_version': 'PREP_final', 'detector_iteration': 'PREP_final', 'detector_weights': 'best.pt', 'minimum_confidence_threshold': 0.5, # Default is 0.5 'do_save_prediction_overlay_images': True, 'ignore_objects_for_overlay': [] } # Add the sections to the 'leafmachine' key config_data['leafmachine']['do'] = do_section config_data['leafmachine']['print'] = print_section config_data['leafmachine']['logging'] = logging_section config_data['leafmachine']['project'] = project_section config_data['leafmachine']['LLM_version'] = LLM_version config_data['leafmachine']['use_RGB_label_images'] = use_RGB_label_images config_data['leafmachine']['do_create_OCR_helper_image'] = do_create_OCR_helper_image config_data['leafmachine']['cropped_components'] = cropped_components_section config_data['leafmachine']['modules'] = modules_section config_data['leafmachine']['data'] = data_section config_data['leafmachine']['overlay'] = overlay_section config_data['leafmachine']['archival_component_detector'] = archival_component_detector_section return config_data, dir_home def build_api_tests(api): dir_home = os.path.dirname(os.path.dirname(__file__)) path_to_configs = os.path.join(dir_home,'demo','demo_configs') dir_home = os.path.dirname(os.path.dirname(__file__)) dir_images_local = os.path.join(dir_home,'demo','demo_images') validate_dir(os.path.join(dir_home,'demo','demo_configs')) path_domain_knowledge = os.path.join(dir_home,'domain_knowledge','SLTP_UM_AllAsiaMinimalInRegion.xlsx') embeddings_database_name = os.path.splitext(os.path.basename(path_domain_knowledge))[0] prefix_removal = '' suffix_removal = '' catalog_numerical_only = False batch_size = 500 do_create_OCR_helper_image = False # ### Option 1: "GPT 4" of ["GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"] # LLM_version_user = 'Azure GPT 4' # ### Option 2: False of [False, True] # use_LeafMachine2_collage_images = False # ### Option 3: False of [False, True] # use_domain_knowledge = True test_results = {} if api == 'openai': OPT1, OPT2, OPT3 = TestOptionsAPI_openai.get_options() elif api == 'palm': OPT1, OPT2, OPT3 = TestOptionsAPI_palm.get_options() elif api == 'azure_openai': OPT1, OPT2, OPT3 = TestOptionsAPI_azure_openai.get_options() else: raise ind = -1 ind_opt1 = -1 ind_opt2 = -1 ind_opt3 = -1 for opt1 in OPT1: ind_opt1+= 1 for opt2 in OPT2: ind_opt2 += 1 for opt3 in OPT3: ind += 1 ind_opt3 += 1 LLM_version_user = opt1 use_LeafMachine2_collage_images = opt2 prompt_version = opt3 filename = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}.yaml" run_name = f"{ind}__OPT1-{ind_opt1}__OPT2-{ind_opt2}__OPT3-{ind_opt3}" dir_output = os.path.join(dir_home,'demo','demo_output','run_name') validate_dir(dir_output) config_data, dir_home = assemble_config(dir_home, run_name, dir_images_local,dir_output, prefix_removal,suffix_removal,catalog_numerical_only,LLM_version_user,batch_size, path_domain_knowledge,embeddings_database_name,use_LeafMachine2_collage_images, prompt_version,do_create_OCR_helper_image)
write_config_file(config_data, os.path.join(dir_home,'demo','demo_configs'),filename=filename)
1
2023-10-30 23:25:20+00:00
8k