language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def create_bgw(config, dirname='.'): """Create a new directory '2-bgw' and all its directories.""" dirpath = os.path.join(dirname, '2-bgw') os.makedirs(dirpath) create_link_files(config, dirpath) create_epsilon(config, dirpath) create_sigma(config, dirpath) create_kernel(config, dirpath) create_absorption(config, dirpath) create_inteqp(config, dirpath)
def create_bgw(config, dirname='.'): """Create a new directory '2-bgw' and all its directories.""" dirpath = os.path.join(dirname, '2-bgw') os.makedirs(dirpath) create_link_files(config, dirpath) create_epsilon(config, dirpath) create_sigma(config, dirpath) create_kernel(config, dirpath) create_absorption(config, dirpath) create_inteqp(config, dirpath)
Python
def deep_merge(original, update): """Recursively update a dictionary. Subdictionaries will not be overwritten but also updated. Keyword arguments: original -- the original dictionary to be overwritten update -- the dictionary to overwrite the original dictionary """ for key in original: if key not in update: update[key] = original[key] elif isinstance(original[key], dict): deep_merge(original[key], update[key]) return copy.deepcopy(update)
def deep_merge(original, update): """Recursively update a dictionary. Subdictionaries will not be overwritten but also updated. Keyword arguments: original -- the original dictionary to be overwritten update -- the dictionary to overwrite the original dictionary """ for key in original: if key not in update: update[key] = original[key] elif isinstance(original[key], dict): deep_merge(original[key], update[key]) return copy.deepcopy(update)
Python
def validate_manager_options(user_input): """Validation function that checks if 'user_input' argument is an int 0-3. No errors.""" switcher = { '0': (True, '0'), '1': (True, '1'), '2': (True, '2'), } return switcher.get(user_input, (True, None))
def validate_manager_options(user_input): """Validation function that checks if 'user_input' argument is an int 0-3. No errors.""" switcher = { '0': (True, '0'), '1': (True, '1'), '2': (True, '2'), } return switcher.get(user_input, (True, None))
Python
def sessions(self, event_id=1): """Returns the session ids for the given event_id.""" auth = self.build_authentication_headers() r = request.Request(SERVER + "/sessions/{}.json".format(event_id), headers=auth) with request.urlopen(r) as fp: data = json.load(fp) return data
def sessions(self, event_id=1): """Returns the session ids for the given event_id.""" auth = self.build_authentication_headers() r = request.Request(SERVER + "/sessions/{}.json".format(event_id), headers=auth) with request.urlopen(r) as fp: data = json.load(fp) return data
Python
def events(self): """Returns the events to which you have access""" auth = self.build_authentication_headers() r = request.Request(SERVER + "/events.json", headers=auth) with request.urlopen(r) as fp: data = json.load(fp) return data
def events(self): """Returns the events to which you have access""" auth = self.build_authentication_headers() r = request.Request(SERVER + "/events.json", headers=auth) with request.urlopen(r) as fp: data = json.load(fp) return data
Python
def metadataTableBasicKeys(): """Return a list of key names for the metadata table, which form a subset of all keys. Useful when hiding all the other keys which may are not often used. """ return ["Title", "Authors", "Description", "PubMed ID", "Contact Name", "Contact Email", "Release Date", "dataType", "Platform"]
def metadataTableBasicKeys(): """Return a list of key names for the metadata table, which form a subset of all keys. Useful when hiding all the other keys which may are not often used. """ return ["Title", "Authors", "Description", "PubMed ID", "Contact Name", "Contact Email", "Release Date", "dataType", "Platform"]
Python
def name(self): """It's often useful to have a short human readable name for a dataset, which is unique. Eg: "Ruiz_2012_22991473". """ value = self.metadataTable().at["handle", "ds_value"] return value if value else self.metadataTable().at["Title", "ds_value"]
def name(self): """It's often useful to have a short human readable name for a dataset, which is unique. Eg: "Ruiz_2012_22991473". """ value = self.metadataTable().at["handle", "ds_value"] return value if value else self.metadataTable().at["Title", "ds_value"]
Python
def isPrivate(self): """Return true if this dataset is private. """ myclient = pymongo.MongoClient(mongo_uri) # Mongo container name is 'mongo'. # local mongodb server. # Connects to the mongodb daabase and returns everything. database = myclient["dataportal_prod_meta"] collection = database["datasets"] result = collection.find({"dataset_id": self.datasetId}) return dumps(result)
def isPrivate(self): """Return true if this dataset is private. """ myclient = pymongo.MongoClient(mongo_uri) # Mongo container name is 'mongo'. # local mongodb server. # Connects to the mongodb daabase and returns everything. database = myclient["dataportal_prod_meta"] collection = database["datasets"] result = collection.find({"dataset_id": self.datasetId}) return dumps(result)
Python
def metadataTable(self, keys="basic"): """ Return a table of dataset metadata, such as description and pubmed id as a pandas DataFrame object. Example: > print(Dataset(6169).metadataTable().head()) ds_value ds_name cellularProteinExpression NULL Experimental Design transcription profiling by array Platform Affymetrix HuGene-1_0-ST V1 GEO Accession GSE39210 detectionThreshold 4.13 """ if self._metadataTable is None: # make a query and construct the DataFrame and cache it myclient = pymongo.MongoClient(mongo_uri) # Mongon container name is 'mongo'. # local mongodb server. # Connects to the mongodb daabase and returns everything. database = myclient["dataportal_prod_meta"] collection = database["datasets"] result = collection.find({"dataset_id": self.datasetId}) self._metadataTable = dumps(result) return self._metadataTable
def metadataTable(self, keys="basic"): """ Return a table of dataset metadata, such as description and pubmed id as a pandas DataFrame object. Example: > print(Dataset(6169).metadataTable().head()) ds_value ds_name cellularProteinExpression NULL Experimental Design transcription profiling by array Platform Affymetrix HuGene-1_0-ST V1 GEO Accession GSE39210 detectionThreshold 4.13 """ if self._metadataTable is None: # make a query and construct the DataFrame and cache it myclient = pymongo.MongoClient(mongo_uri) # Mongon container name is 'mongo'. # local mongodb server. # Connects to the mongodb daabase and returns everything. database = myclient["dataportal_prod_meta"] collection = database["datasets"] result = collection.find({"dataset_id": self.datasetId}) self._metadataTable = dumps(result) return self._metadataTable
Python
def updateMetadataValue(self, key, value, auditInstance=None): """ Update values in dataset metadata matching key. Returns the number of records affected. If auditInstance is supplied, we also record this update. """ if auditInstance: # get current value result = cursor.execute("select ds_value from dataset_metadata where ds_name=%s and ds_id=%s", (key, self.datasetId)) value_from = result[0][0] if len(result)>0 else None auditInstance.record("dataset_metadata", "ds_value", value_from, value, "ds_name='%s' and ds_id=%s" % (key, self.datasetId)) return _runSql("update dataset_metadata set ds_value=%s where ds_name=%s and ds_id=%s", (value, key, self.datasetId), type="update")
def updateMetadataValue(self, key, value, auditInstance=None): """ Update values in dataset metadata matching key. Returns the number of records affected. If auditInstance is supplied, we also record this update. """ if auditInstance: # get current value result = cursor.execute("select ds_value from dataset_metadata where ds_name=%s and ds_id=%s", (key, self.datasetId)) value_from = result[0][0] if len(result)>0 else None auditInstance.record("dataset_metadata", "ds_value", value_from, value, "ds_name='%s' and ds_id=%s" % (key, self.datasetId)) return _runSql("update dataset_metadata set ds_value=%s where ds_name=%s and ds_id=%s", (value, key, self.datasetId), type="update")
Python
def summaryTable(self): """ Return a pandas DataFrame object that summarises all the datasets in the system. This function should move outside this class, as it is not specific to one dataset. Example: > """ result = _runSql("select dataset_id, title, authors, description, generic_sample_type, handle, pubmed from merged_samples;") df = pandas.DataFrame(result, columns=["dataset_id", "title", "authors", "description", "generic_sample_type", "handle", "pubmed"]) #, "author", "description"]) df.drop_duplicates("dataset_id", inplace = True) # Drop duplicated records. return df
def summaryTable(self): """ Return a pandas DataFrame object that summarises all the datasets in the system. This function should move outside this class, as it is not specific to one dataset. Example: > """ result = _runSql("select dataset_id, title, authors, description, generic_sample_type, handle, pubmed from merged_samples;") df = pandas.DataFrame(result, columns=["dataset_id", "title", "authors", "description", "generic_sample_type", "handle", "pubmed"]) #, "author", "description"]) df.drop_duplicates("dataset_id", inplace = True) # Drop duplicated records. return df
Python
def sampleTable(self): """ Return samples in the dataset as a pandas DataFrame object. """ if self._sampleTable is None: # make a query, construct the DataFrame and cache it # result = cursor.execute("select sample_id, replicate_group_id, sample_name, sample_name_long, sample_type, sample_type_long, generic_sample_type, generic_sample_type_long, sample_description, tissue_organism_part, parental_cell_type, final_cell_type, cell_line, reprogramming_method, developmental_stage, media, disease_state,labelling, genetic_modification, facs_profile, age, sex, organism, chip_type, dataset_id from samples where dataset_id=%s", (self.datasetId,))# < -- Correct statement but because dataset_id columns not yet loaded into the database, using this query instead (limit 100). # data = cursor.fetchall() data = _runSql("select sample_id, replicate_group_id, sample_name, sample_name_long, sample_type, sample_type_long, generic_sample_type, generic_sample_type_long, sample_description, tissue_organism_part, parental_cell_type, final_cell_type, cell_line, reprogramming_method, developmental_stage, media, disease_state,labelling, genetic_modification, facs_profile, age, sex, organism, chip_type, dataset_id from samples where dataset_id=%s", (self.datasetId,)) df = pandas.DataFrame(data) # empty DataFrame with id as index df.columns=['sample_id', 'replicate_group_id', 'sample_name', 'sample_name_long', 'sample_type', 'sample_type_long', 'generic_sample_type', 'generic_sample_type_long', 'sample_description', 'tissue_organism_part', 'parental_cell_type', 'final_cell_type', 'cell_line', 'reprogramming_method', 'developmental_stage', 'media', 'disease_state', 'labelling', 'genetic_modification', 'facs_profile', 'age', 'sex', 'organism', 'chip_type', 'dataset_id'] # df.set_index('sample_id', inplace=True) self._sampleTable = df # df.drop_duplicates(inplace = True) #"sample_id", inplace = True) # Drop duplicated records. return self._sampleTable
def sampleTable(self): """ Return samples in the dataset as a pandas DataFrame object. """ if self._sampleTable is None: # make a query, construct the DataFrame and cache it # result = cursor.execute("select sample_id, replicate_group_id, sample_name, sample_name_long, sample_type, sample_type_long, generic_sample_type, generic_sample_type_long, sample_description, tissue_organism_part, parental_cell_type, final_cell_type, cell_line, reprogramming_method, developmental_stage, media, disease_state,labelling, genetic_modification, facs_profile, age, sex, organism, chip_type, dataset_id from samples where dataset_id=%s", (self.datasetId,))# < -- Correct statement but because dataset_id columns not yet loaded into the database, using this query instead (limit 100). # data = cursor.fetchall() data = _runSql("select sample_id, replicate_group_id, sample_name, sample_name_long, sample_type, sample_type_long, generic_sample_type, generic_sample_type_long, sample_description, tissue_organism_part, parental_cell_type, final_cell_type, cell_line, reprogramming_method, developmental_stage, media, disease_state,labelling, genetic_modification, facs_profile, age, sex, organism, chip_type, dataset_id from samples where dataset_id=%s", (self.datasetId,)) df = pandas.DataFrame(data) # empty DataFrame with id as index df.columns=['sample_id', 'replicate_group_id', 'sample_name', 'sample_name_long', 'sample_type', 'sample_type_long', 'generic_sample_type', 'generic_sample_type_long', 'sample_description', 'tissue_organism_part', 'parental_cell_type', 'final_cell_type', 'cell_line', 'reprogramming_method', 'developmental_stage', 'media', 'disease_state', 'labelling', 'genetic_modification', 'facs_profile', 'age', 'sex', 'organism', 'chip_type', 'dataset_id'] # df.set_index('sample_id', inplace=True) self._sampleTable = df # df.drop_duplicates(inplace = True) #"sample_id", inplace = True) # Drop duplicated records. return self._sampleTable
Python
def atlasSampleTable(self): """ Return atlas samples in the dataset as a pandas DataFrame object. """ if self._sampleTable is None: # make a query, construct the DataFrame and cache it # result = cursor.execute("select sample_id, replicate_group_id, sample_name, sample_name_long, sample_type, sample_type_long, generic_sample_type, generic_sample_type_long, sample_description, tissue_organism_part, parental_cell_type, final_cell_type, cell_line, reprogramming_method, developmental_stage, media, disease_state,labelling, genetic_modification, facs_profile, age, sex, organism, chip_type, dataset_id from samples where dataset_id=%s", (self.datasetId,))# < -- Correct statement but because dataset_id columns not yet loaded into the database, using this query instead (limit 100). # data = cursor.fetchall() data = _runSql("select sample_id, annotator, evidence, blood_tier1, blood_tier2, blood_tier3, imac_tier1, imac_tier2, imac_tier3, phenotype, activation_status, display_metadata, include_blood, include_imac, dataset_id from atlas where dataset_id=%s", (self.datasetId,)) df = pandas.DataFrame(data) # empty DataFrame with id as index df.columns=["sample_id", "annotator", "evidence", "blood_tier1", "blood_tier2", "blood_tier3", "imac_tier1", "imac_tier2", "imac_tier3", "phenotype", "activation_status", "display_metadata", "include_blood", "include_imac", "dataset_id"] # df.set_index('sample_id', inplace=True) df.drop_duplicates(inplace = True) # There are duplicate records in the atlas table - to be addressed in future table versions. self._sampleTable = df # df.drop_duplicates(inplace = True) #"sample_id", inplace = True) # Drop duplicated records. return self._sampleTable
def atlasSampleTable(self): """ Return atlas samples in the dataset as a pandas DataFrame object. """ if self._sampleTable is None: # make a query, construct the DataFrame and cache it # result = cursor.execute("select sample_id, replicate_group_id, sample_name, sample_name_long, sample_type, sample_type_long, generic_sample_type, generic_sample_type_long, sample_description, tissue_organism_part, parental_cell_type, final_cell_type, cell_line, reprogramming_method, developmental_stage, media, disease_state,labelling, genetic_modification, facs_profile, age, sex, organism, chip_type, dataset_id from samples where dataset_id=%s", (self.datasetId,))# < -- Correct statement but because dataset_id columns not yet loaded into the database, using this query instead (limit 100). # data = cursor.fetchall() data = _runSql("select sample_id, annotator, evidence, blood_tier1, blood_tier2, blood_tier3, imac_tier1, imac_tier2, imac_tier3, phenotype, activation_status, display_metadata, include_blood, include_imac, dataset_id from atlas where dataset_id=%s", (self.datasetId,)) df = pandas.DataFrame(data) # empty DataFrame with id as index df.columns=["sample_id", "annotator", "evidence", "blood_tier1", "blood_tier2", "blood_tier3", "imac_tier1", "imac_tier2", "imac_tier3", "phenotype", "activation_status", "display_metadata", "include_blood", "include_imac", "dataset_id"] # df.set_index('sample_id', inplace=True) df.drop_duplicates(inplace = True) # There are duplicate records in the atlas table - to be addressed in future table versions. self._sampleTable = df # df.drop_duplicates(inplace = True) #"sample_id", inplace = True) # Drop duplicated records. return self._sampleTable
Python
def updateSampleValue(self, key, sampleIds, value): """ Update values in samples in this dataset matching key and sampleIds. Returns the number of sampleIds affected. Example: > print(Dataset(6313).updateSampleValue("Organism", ["GSM1026799"], "Mus musculus") > 1 """ results = [] for sampleId in sampleIds: # get current value result = _runSql("select {} from samples where sample_id=%s and dataset_id=%s".format(key), (sampleId, self.datasetId)) results.append(_runSql("update samples set {}=%s where dataset_id=%s and sample_id=%s;".format(key), (value, self.datasetId, sampleId,), type="update")) value_from = result[0][0] if len(result)>0 else None print("New Value", value) print("Original", value_from) print("Updated: ", results) return {"Updated": value, "Original": value_from}
def updateSampleValue(self, key, sampleIds, value): """ Update values in samples in this dataset matching key and sampleIds. Returns the number of sampleIds affected. Example: > print(Dataset(6313).updateSampleValue("Organism", ["GSM1026799"], "Mus musculus") > 1 """ results = [] for sampleId in sampleIds: # get current value result = _runSql("select {} from samples where sample_id=%s and dataset_id=%s".format(key), (sampleId, self.datasetId)) results.append(_runSql("update samples set {}=%s where dataset_id=%s and sample_id=%s;".format(key), (value, self.datasetId, sampleId,), type="update")) value_from = result[0][0] if len(result)>0 else None print("New Value", value) print("Original", value_from) print("Updated: ", results) return {"Updated": value, "Original": value_from}
Python
def updateAtlasValue(self, key, sampleIds, value): """ Updates a record in the atlas table. """ results = [] for sampleId in sampleIds: # get current value result = _runSql("select {} from atlas where sample_id=%s and dataset_id=%s".format(key), (sampleId, self.datasetId)) results.append(_runSql("update atlas set {}=%s where dataset_id=%s and sample_id=%s;".format(key), (value, self.datasetId, sampleId,), type="update")) value_from = result[0][0] if len(result)>0 else None print("New Value", value) print("Original", value_from) print("Updated: ", results) return {"Updated": value, "Original": value_from}
def updateAtlasValue(self, key, sampleIds, value): """ Updates a record in the atlas table. """ results = [] for sampleId in sampleIds: # get current value result = _runSql("select {} from atlas where sample_id=%s and dataset_id=%s".format(key), (sampleId, self.datasetId)) results.append(_runSql("update atlas set {}=%s where dataset_id=%s and sample_id=%s;".format(key), (value, self.datasetId, sampleId,), type="update")) value_from = result[0][0] if len(result)>0 else None print("New Value", value) print("Original", value_from) print("Updated: ", results) return {"Updated": value, "Original": value_from}
Python
def expressionMatrix(self, key): """Return expression matrix for this dataset as a pandas DataFrame. Ran the following code and found that for this dataset (58302,24), it took 0.167 sec using read_csv and 0.127 sec using read_hdf (on my laptop). import time t0 = time.time() df = pandas.read_csv(os.path.join(path_to_expression_files, "7283_gene_count_frags.txt"), sep="\t", index_col=0) print(time.time()-t0) t1 = time.time() df = pandas.read_hdf(os.path.join(path_to_expression_files, "7283.h5"), "/dataframe/counts") print(time.time()-t1) """ if key not in self._expressionMatrix: self._expressionMatrix[key] = pandas.read_csv(self.expressionFilePath(key), sep="\t", index_col=0) return self._expressionMatrix[key]
def expressionMatrix(self, key): """Return expression matrix for this dataset as a pandas DataFrame. Ran the following code and found that for this dataset (58302,24), it took 0.167 sec using read_csv and 0.127 sec using read_hdf (on my laptop). import time t0 = time.time() df = pandas.read_csv(os.path.join(path_to_expression_files, "7283_gene_count_frags.txt"), sep="\t", index_col=0) print(time.time()-t0) t1 = time.time() df = pandas.read_hdf(os.path.join(path_to_expression_files, "7283.h5"), "/dataframe/counts") print(time.time()-t1) """ if key not in self._expressionMatrix: self._expressionMatrix[key] = pandas.read_csv(self.expressionFilePath(key), sep="\t", index_col=0) return self._expressionMatrix[key]
Python
def exportGeneIdProbeIdMapping(): """Use this function to export a file which is has geneId,probeId columns. """ conn = psycopg2.connect(postgres_uri) cursor = conn.cursor() cursor.execute("select distinct from_id,to_id from stemformatics.feature_mappings where db_id=59") result = cursor.fetchall() cursor.close() conn.close() pandas.DataFrame(result, columns=["geneId","probeId"]).to_csv("/mnt/data/portal_data/GeneIdProbeIdMapping.tsv", sep="\t", index=False)
def exportGeneIdProbeIdMapping(): """Use this function to export a file which is has geneId,probeId columns. """ conn = psycopg2.connect(postgres_uri) cursor = conn.cursor() cursor.execute("select distinct from_id,to_id from stemformatics.feature_mappings where db_id=59") result = cursor.fetchall() cursor.close() conn.close() pandas.DataFrame(result, columns=["geneId","probeId"]).to_csv("/mnt/data/portal_data/GeneIdProbeIdMapping.tsv", sep="\t", index=False)
Python
def move_serial(q_move): ''' El protocolo consiste en enviar el numero de letras para la cantidad de grados del robot ejemplo el motor uno(hombro) es la letra a(-1) o la letra A(+1) chr(ord(a)+1) ''' #print(i,move) comandos='' for move,i in zip(q_move,range(0, len(q_move))): command=65 if move>0: for j in range(0,int(move)): #self.serial.write(char(command+i)) comandos+=chr(command+i) elif move<0: for j in range(int(move),0): #self.serial.write(b'S') comandos+=chr(command+i+32) print(comandos.encode())
def move_serial(q_move): ''' El protocolo consiste en enviar el numero de letras para la cantidad de grados del robot ejemplo el motor uno(hombro) es la letra a(-1) o la letra A(+1) chr(ord(a)+1) ''' #print(i,move) comandos='' for move,i in zip(q_move,range(0, len(q_move))): command=65 if move>0: for j in range(0,int(move)): #self.serial.write(char(command+i)) comandos+=chr(command+i) elif move<0: for j in range(int(move),0): #self.serial.write(b'S') comandos+=chr(command+i+32) print(comandos.encode())
Python
def _get_shebang_path(executable: str, is_launcher: bool) -> bytes: """Get the interpreter path in the shebang line The launcher can just use the command as-is. Otherwise if the path contains whitespace or is too long, both distlib and installer use a clever hack to make the shebang after ``/bin/sh``, where the interpreter path is quoted. """ if is_launcher or " " not in executable and (len(executable) + 3) <= 127: return executable.encode("utf-8") return shlex.quote(executable).encode("utf-8")
def _get_shebang_path(executable: str, is_launcher: bool) -> bytes: """Get the interpreter path in the shebang line The launcher can just use the command as-is. Otherwise if the path contains whitespace or is too long, both distlib and installer use a clever hack to make the shebang after ``/bin/sh``, where the interpreter path is quoted. """ if is_launcher or " " not in executable and (len(executable) + 3) <= 127: return executable.encode("utf-8") return shlex.quote(executable).encode("utf-8")
Python
def which(self, command: str) -> str | None: """Get the full path of the given executable against this environment.""" if not os.path.isabs(command) and command.startswith("python"): python = os.path.splitext(command)[0] version = python[6:] this_version = self.interpreter.version if not version or str(this_version).startswith(version): return self.interpreter.executable # Fallback to use shutil.which to find the executable this_path = self.get_paths()["scripts"] python_root = os.path.dirname(self.interpreter.executable) new_path = os.pathsep.join([python_root, this_path, os.getenv("PATH", "")]) return shutil.which(command, path=new_path)
def which(self, command: str) -> str | None: """Get the full path of the given executable against this environment.""" if not os.path.isabs(command) and command.startswith("python"): python = os.path.splitext(command)[0] version = python[6:] this_version = self.interpreter.version if not version or str(this_version).startswith(version): return self.interpreter.executable # Fallback to use shutil.which to find the executable this_path = self.get_paths()["scripts"] python_root = os.path.dirname(self.interpreter.executable) new_path = os.pathsep.join([python_root, this_path, os.getenv("PATH", "")]) return shutil.which(command, path=new_path)
Python
def pip_command(self) -> list[str]: """Get a pip command for this environment, and download one if not available. Return a list of args like ['python', '-m', 'pip'] """ from pip import __file__ as pip_location python_major = self.interpreter.major executable = self.interpreter.executable proc = subprocess.run( [executable, "-Esm", "pip", "--version"], capture_output=True ) if proc.returncode == 0: # The pip has already been installed with the executable, just use it return [executable, "-Esm", "pip"] if python_major == 3: # Use the host pip package. return [executable, "-Es", os.path.dirname(pip_location)] # For py2, only pip<21 is eligible, download a pip wheel from the Internet. pip_wheel = self.project.cache_dir / "pip.whl" if not pip_wheel.is_file(): self._download_pip_wheel(pip_wheel) return [executable, str(pip_wheel / "pip")]
def pip_command(self) -> list[str]: """Get a pip command for this environment, and download one if not available. Return a list of args like ['python', '-m', 'pip'] """ from pip import __file__ as pip_location python_major = self.interpreter.major executable = self.interpreter.executable proc = subprocess.run( [executable, "-Esm", "pip", "--version"], capture_output=True ) if proc.returncode == 0: # The pip has already been installed with the executable, just use it return [executable, "-Esm", "pip"] if python_major == 3: # Use the host pip package. return [executable, "-Es", os.path.dirname(pip_location)] # For py2, only pip<21 is eligible, download a pip wheel from the Internet. pip_wheel = self.project.cache_dir / "pip.whl" if not pip_wheel.is_file(): self._download_pip_wheel(pip_wheel) return [executable, str(pip_wheel / "pip")]
Python
def install_wheel(candidate: Candidate, scheme: dict[str, str] | None = None) -> None: """Install a normal wheel file into the environment. Optional install scheme can be given to change the destination. """ wheel = candidate.build() env = candidate.environment destination = InstallDestination( scheme or env.get_paths(), interpreter=env.interpreter.executable, script_kind=_get_kind(env), ) with WheelFile.open(wheel) as source: install( source=source, destination=destination, # Additional metadata that is generated by the installation tool. additional_metadata={ "INSTALLER": f"installer {__version__}".encode(), }, )
def install_wheel(candidate: Candidate, scheme: dict[str, str] | None = None) -> None: """Install a normal wheel file into the environment. Optional install scheme can be given to change the destination. """ wheel = candidate.build() env = candidate.environment destination = InstallDestination( scheme or env.get_paths(), interpreter=env.interpreter.executable, script_kind=_get_kind(env), ) with WheelFile.open(wheel) as source: install( source=source, destination=destination, # Additional metadata that is generated by the installation tool. additional_metadata={ "INSTALLER": f"installer {__version__}".encode(), }, )
Python
def install_editable( candidate: Candidate, scheme: dict[str, str] | None = None ) -> None: """Install package in editable mode using the legacy `python setup.py develop`""" # TODO: PEP 660 from pdm.builders import EditableBuilder candidate.prepare() env = candidate.environment assert candidate.source_dir builder = EditableBuilder(candidate.source_dir, env) setup_path = builder.ensure_setup_py() paths = scheme or env.get_paths() install_script = Path(__file__).with_name("_editable_install.py") install_args = [ env.interpreter.executable, "-u", str(install_script), setup_path, paths["prefix"], paths["purelib"], paths["scripts"], ] builder.install(["setuptools"]) builder.subprocess_runner(install_args, candidate.source_dir)
def install_editable( candidate: Candidate, scheme: dict[str, str] | None = None ) -> None: """Install package in editable mode using the legacy `python setup.py develop`""" # TODO: PEP 660 from pdm.builders import EditableBuilder candidate.prepare() env = candidate.environment assert candidate.source_dir builder = EditableBuilder(candidate.source_dir, env) setup_path = builder.ensure_setup_py() paths = scheme or env.get_paths() install_script = Path(__file__).with_name("_editable_install.py") install_args = [ env.interpreter.executable, "-u", str(install_script), setup_path, paths["prefix"], paths["purelib"], paths["scripts"], ] builder.install(["setuptools"]) builder.subprocess_runner(install_args, candidate.source_dir)
Python
def install_wheel_with_cache( candidate: Candidate, scheme: dict[str, str] | None = None ) -> None: """Only create .pth files referring to the cached package. If the cache doesn't exist, create one. """ wheel = candidate.build() wheel_stem = Path(wheel).stem cache_path = candidate.environment.project.cache("packages") / wheel_stem package_cache = CachedPackage(cache_path) if not cache_path.is_dir(): logger.debug("Installing wheel into cached location %s", cache_path) cache_path.mkdir(exist_ok=True) install_wheel(candidate, package_cache.scheme()) _install_from_cache(candidate, package_cache)
def install_wheel_with_cache( candidate: Candidate, scheme: dict[str, str] | None = None ) -> None: """Only create .pth files referring to the cached package. If the cache doesn't exist, create one. """ wheel = candidate.build() wheel_stem = Path(wheel).stem cache_path = candidate.environment.project.cache("packages") / wheel_stem package_cache = CachedPackage(cache_path) if not cache_path.is_dir(): logger.debug("Installing wheel into cached location %s", cache_path) cache_path.mkdir(exist_ok=True) install_wheel(candidate, package_cache.scheme()) _install_from_cache(candidate, package_cache)
Python
def requirement_preference(self, requirement: Requirement) -> tuple: """Return the preference of a requirement to find candidates. - Editable requirements are preferered. - File links are preferred. - The one with narrower specifierset is perferred. """ editable = requirement.editable is_file = requirement.is_file_or_url is_prerelease = ( bool(requirement.specifier.prereleases) if requirement.specifier is not None else False ) specifier_parts = len(requirement.specifier) if requirement.specifier else 0 return (editable, is_file, is_prerelease, specifier_parts)
def requirement_preference(self, requirement: Requirement) -> tuple: """Return the preference of a requirement to find candidates. - Editable requirements are preferered. - File links are preferred. - The one with narrower specifierset is perferred. """ editable = requirement.editable is_file = requirement.is_file_or_url is_prerelease = ( bool(requirement.specifier.prereleases) if requirement.specifier is not None else False ) specifier_parts = len(requirement.specifier) if requirement.specifier else 0 return (editable, is_file, is_prerelease, specifier_parts)
Python
def pulse(self) -> bool: """:returns whether the octopus flashed as a result of the pulse""" if self.flashed: return False self.energy += 1 if self.energy > Octopus.MAX_ENERGY: self.flashed = True return self.flashed
def pulse(self) -> bool: """:returns whether the octopus flashed as a result of the pulse""" if self.flashed: return False self.energy += 1 if self.energy > Octopus.MAX_ENERGY: self.flashed = True return self.flashed
Python
def cycle(self) -> bool: """:returns whether all octopi flashed simultaneously this cycle""" coordinates = list(filter(lambda c: self._map[c] is not None, self._map.keys())) for coordinate in coordinates.copy(): if self._map[coordinate].pulse(): self.flash_at(coordinate) if all(self._map[c].flashed for c in coordinates): # don't bother with resetting in this case since we never want to simulate beyond this point anyway return True for coordinate in coordinates: self._map[coordinate].reset() return False
def cycle(self) -> bool: """:returns whether all octopi flashed simultaneously this cycle""" coordinates = list(filter(lambda c: self._map[c] is not None, self._map.keys())) for coordinate in coordinates.copy(): if self._map[coordinate].pulse(): self.flash_at(coordinate) if all(self._map[c].flashed for c in coordinates): # don't bother with resetting in this case since we never want to simulate beyond this point anyway return True for coordinate in coordinates: self._map[coordinate].reset() return False
Python
def run_a(): """This is the fun part where you get to model the fishies nicely""" school = parse_input() for _ in range(80): school.cycle() print(len(school))
def run_a(): """This is the fun part where you get to model the fishies nicely""" school = parse_input() for _ in range(80): school.cycle() print(len(school))
Python
def run_b(): """When the direct modeling no longer scales, we need to kill each fish's individuality :(""" school = parse_input() fish_by_timer: Dict[int, int] = {timer: 0 for timer in range(Lanternfish.cycling_time_young)} for fish in school.fish: fish_by_timer[fish.timer] += 1 for _ in range(256): fish_that_spawn = fish_by_timer[0] fish_by_timer = { timer: fish_by_timer[(timer + 1) % Lanternfish.cycling_time_young] for timer in range(Lanternfish.cycling_time_young) } fish_by_timer[Lanternfish.cycling_time - 1] += fish_that_spawn print(sum(fish_by_timer.values()))
def run_b(): """When the direct modeling no longer scales, we need to kill each fish's individuality :(""" school = parse_input() fish_by_timer: Dict[int, int] = {timer: 0 for timer in range(Lanternfish.cycling_time_young)} for fish in school.fish: fish_by_timer[fish.timer] += 1 for _ in range(256): fish_that_spawn = fish_by_timer[0] fish_by_timer = { timer: fish_by_timer[(timer + 1) % Lanternfish.cycling_time_young] for timer in range(Lanternfish.cycling_time_young) } fish_by_timer[Lanternfish.cycling_time - 1] += fish_that_spawn print(sum(fish_by_timer.values()))
Python
def calc_blur_gradient_loss(net_type, target_blur_folder, faig_folder, save_faig_maskdeblurfilter_blur_loss_txt, save_faig_maskdenoisefilter_blur_loss_txt, ig_folder, save_ig_maskdeblurfilter_blur_loss_txt, save_ig_maskdenoisefilter_blur_loss_txt, abs_filter_change_folder, save_abs_filter_change_maskdeblurfilter_blur_loss_txt, save_abs_filter_change_maskdenoisefilter_blur_loss_txt, random_folder, save_random_maskdeblurfilter_blur_loss_txt, save_random_maskdenoisefilter_blur_loss_txt, sub_func_folder_names, sub_input_folder_name): """ Quantity the discovered filters' contribution to the deblur function by measuring output difference of the target model and the substituted model. The output difference is calculated on image gradients of their gray counterpart. Args: net_type (str): network type. Default: srcnn_style or srresnet target_blur_folder (str): folder path that contains the gradient map of target model's output towards blurry input. faig_folder (str): folder path that contains the gradient map of substituted-faig-discovered model's output towards blurry input. save_faig_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (blur) model. save_faig_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (noise) model. ig_folder (str): folder path that contains the gradient map of substituted-ig-discovered model's output towards blurry input. save_ig_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (blur) model. save_ig_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (noise) model. abs_filter_change_folder (str): folder path that contains the gradient map of substituted-abs_filter_change-discovered model's output towards blurry input. save_abs_filter_change_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (blur) model. save_abs_filter_change_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (noise) model. random_folder (str): folder path that contains the gradient map of substituted-random-discovered model's output towards blurry input. save_random_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (blur) model. save_random_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (noise) model. sub_func_folder_names (list): Default: ['maskdeblurfilter', 'maskdenoisefilter'] sub_input_folder_name (str): Default: 'Blur2_LRbicx2' """ deblur_func_imglist = list(sorted(glob.glob(os.path.join(target_blur_folder, '*.npy')))) faig_maskdeblurfilter_blur_loss = [] faig_maskdenoisefilter_blur_loss = [] ig_maskdeblurfilter_blur_loss = [] ig_maskdenoisefilter_blur_loss = [] abs_filter_change_maskdeblurfilter_blur_loss = [] abs_filter_change_maskdenoisefilter_blur_loss = [] random_maskdeblurfilter_blur_loss = [] random_maskdenoisefilter_blur_loss = [] if net_type == 'srcnn_style': total_neuron_nums = 156224 elif net_type == 'srresnet': total_neuron_nums = 151936 for proportion in [1, 3, 5, 10]: # for proportion in range(0, 101): selected_num_neurons = int(total_neuron_nums * proportion / 100) neuron_folder = f'{selected_num_neurons}kernels' faig_neuron_folder_path = os.path.join(faig_folder, neuron_folder) ig_neuron_folder_path = os.path.join(ig_folder, neuron_folder) abs_filter_change_neuron_folder_path = os.path.join(abs_filter_change_folder, neuron_folder) random_neuron_folder_path = os.path.join(random_folder, neuron_folder) for idx, sub_folder in enumerate(sub_func_folder_names): faig_neuron_sub_folder_path = os.path.join(faig_neuron_folder_path, sub_folder) ig_neuron_sub_folder_path = os.path.join(ig_neuron_folder_path, sub_folder) abs_filter_change_neuron_sub_folder_path = os.path.join(abs_filter_change_neuron_folder_path, sub_folder) random_neuron_sub_folder_path = os.path.join(random_neuron_folder_path, sub_folder) faig_imglist = list( sorted(glob.glob(os.path.join(faig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) ig_imglist = list( sorted(glob.glob(os.path.join(ig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) abs_filter_change_imglist = list( sorted( glob.glob(os.path.join(abs_filter_change_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) random_imglist = list( sorted(glob.glob(os.path.join(random_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) faig_gradient_loss = 0.0 ig_gradient_loss = 0.0 abs_filter_change_gradient_loss = 0.0 random_gradient_loss = 0.0 for img_idx, img_path in enumerate(deblur_func_imglist): refer_img_path = img_path faig_img_path = faig_imglist[img_idx] ig_img_path = ig_imglist[img_idx] abs_filter_change_img_path = abs_filter_change_imglist[img_idx] random_img_path = random_imglist[img_idx] refer_gradient = np.load(refer_img_path) faig_gradient = np.load(faig_img_path) ig_gradient = np.load(ig_img_path) abs_filter_change_gradient = np.load(abs_filter_change_img_path) random_gradient = np.load(random_img_path) # for better visualization, we multiple the results with 1000 faig_gradient_loss += np.mean((faig_gradient - refer_gradient)**2) * 1000 ig_gradient_loss += np.mean((ig_gradient - refer_gradient)**2) * 1000 abs_filter_change_gradient_loss += np.mean((abs_filter_change_gradient - refer_gradient)**2) * 1000 random_gradient_loss += np.mean((random_gradient - refer_gradient)**2) * 1000 faig_gradient_loss /= len(deblur_func_imglist) ig_gradient_loss /= len(deblur_func_imglist) abs_filter_change_gradient_loss /= len(deblur_func_imglist) random_gradient_loss /= len(deblur_func_imglist) if idx == 0: print('Calculate the effectiveness of masking discovered deblur filters for deblur function. ' 'Higher value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdeblurfilter_blur_loss.append(faig_gradient_loss) ig_maskdeblurfilter_blur_loss.append(ig_gradient_loss) abs_filter_change_maskdeblurfilter_blur_loss.append(abs_filter_change_gradient_loss) random_maskdeblurfilter_blur_loss.append(random_gradient_loss) else: print('Calculate the effectiveness of masking discovered denoise filters for deblur function. ' 'Lower value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdenoisefilter_blur_loss.append(faig_gradient_loss) ig_maskdenoisefilter_blur_loss.append(ig_gradient_loss) abs_filter_change_maskdenoisefilter_blur_loss.append(abs_filter_change_gradient_loss) random_maskdenoisefilter_blur_loss.append(random_gradient_loss) faig_maskdeblurfilter_blur_loss = np.array(faig_maskdeblurfilter_blur_loss) faig_maskdenoisefilter_blur_loss = np.array(faig_maskdenoisefilter_blur_loss) ig_maskdeblurfilter_blur_loss = np.array(ig_maskdeblurfilter_blur_loss) ig_maskdenoisefilter_blur_loss = np.array(ig_maskdenoisefilter_blur_loss) abs_filter_change_maskdeblurfilter_blur_loss = np.array(abs_filter_change_maskdeblurfilter_blur_loss) abs_filter_change_maskdenoisefilter_blur_loss = np.array(abs_filter_change_maskdenoisefilter_blur_loss) random_maskdeblurfilter_blur_loss = np.array(random_maskdeblurfilter_blur_loss) random_maskdenoisefilter_blur_loss = np.array(random_maskdenoisefilter_blur_loss) # write the result to txt np.savetxt(save_faig_maskdeblurfilter_blur_loss_txt, faig_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_faig_maskdenoisefilter_blur_loss_txt, faig_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdeblurfilter_blur_loss_txt, ig_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdenoisefilter_blur_loss_txt, ig_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdeblurfilter_blur_loss_txt, abs_filter_change_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdenoisefilter_blur_loss_txt, abs_filter_change_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_random_maskdeblurfilter_blur_loss_txt, random_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt( save_random_maskdenoisefilter_blur_loss_txt, random_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f')
def calc_blur_gradient_loss(net_type, target_blur_folder, faig_folder, save_faig_maskdeblurfilter_blur_loss_txt, save_faig_maskdenoisefilter_blur_loss_txt, ig_folder, save_ig_maskdeblurfilter_blur_loss_txt, save_ig_maskdenoisefilter_blur_loss_txt, abs_filter_change_folder, save_abs_filter_change_maskdeblurfilter_blur_loss_txt, save_abs_filter_change_maskdenoisefilter_blur_loss_txt, random_folder, save_random_maskdeblurfilter_blur_loss_txt, save_random_maskdenoisefilter_blur_loss_txt, sub_func_folder_names, sub_input_folder_name): """ Quantity the discovered filters' contribution to the deblur function by measuring output difference of the target model and the substituted model. The output difference is calculated on image gradients of their gray counterpart. Args: net_type (str): network type. Default: srcnn_style or srresnet target_blur_folder (str): folder path that contains the gradient map of target model's output towards blurry input. faig_folder (str): folder path that contains the gradient map of substituted-faig-discovered model's output towards blurry input. save_faig_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (blur) model. save_faig_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (noise) model. ig_folder (str): folder path that contains the gradient map of substituted-ig-discovered model's output towards blurry input. save_ig_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (blur) model. save_ig_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (noise) model. abs_filter_change_folder (str): folder path that contains the gradient map of substituted-abs_filter_change-discovered model's output towards blurry input. save_abs_filter_change_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (blur) model. save_abs_filter_change_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (noise) model. random_folder (str): folder path that contains the gradient map of substituted-random-discovered model's output towards blurry input. save_random_maskdeblurfilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (blur) model. save_random_maskdenoisefilter_blur_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (noise) model. sub_func_folder_names (list): Default: ['maskdeblurfilter', 'maskdenoisefilter'] sub_input_folder_name (str): Default: 'Blur2_LRbicx2' """ deblur_func_imglist = list(sorted(glob.glob(os.path.join(target_blur_folder, '*.npy')))) faig_maskdeblurfilter_blur_loss = [] faig_maskdenoisefilter_blur_loss = [] ig_maskdeblurfilter_blur_loss = [] ig_maskdenoisefilter_blur_loss = [] abs_filter_change_maskdeblurfilter_blur_loss = [] abs_filter_change_maskdenoisefilter_blur_loss = [] random_maskdeblurfilter_blur_loss = [] random_maskdenoisefilter_blur_loss = [] if net_type == 'srcnn_style': total_neuron_nums = 156224 elif net_type == 'srresnet': total_neuron_nums = 151936 for proportion in [1, 3, 5, 10]: # for proportion in range(0, 101): selected_num_neurons = int(total_neuron_nums * proportion / 100) neuron_folder = f'{selected_num_neurons}kernels' faig_neuron_folder_path = os.path.join(faig_folder, neuron_folder) ig_neuron_folder_path = os.path.join(ig_folder, neuron_folder) abs_filter_change_neuron_folder_path = os.path.join(abs_filter_change_folder, neuron_folder) random_neuron_folder_path = os.path.join(random_folder, neuron_folder) for idx, sub_folder in enumerate(sub_func_folder_names): faig_neuron_sub_folder_path = os.path.join(faig_neuron_folder_path, sub_folder) ig_neuron_sub_folder_path = os.path.join(ig_neuron_folder_path, sub_folder) abs_filter_change_neuron_sub_folder_path = os.path.join(abs_filter_change_neuron_folder_path, sub_folder) random_neuron_sub_folder_path = os.path.join(random_neuron_folder_path, sub_folder) faig_imglist = list( sorted(glob.glob(os.path.join(faig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) ig_imglist = list( sorted(glob.glob(os.path.join(ig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) abs_filter_change_imglist = list( sorted( glob.glob(os.path.join(abs_filter_change_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) random_imglist = list( sorted(glob.glob(os.path.join(random_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) faig_gradient_loss = 0.0 ig_gradient_loss = 0.0 abs_filter_change_gradient_loss = 0.0 random_gradient_loss = 0.0 for img_idx, img_path in enumerate(deblur_func_imglist): refer_img_path = img_path faig_img_path = faig_imglist[img_idx] ig_img_path = ig_imglist[img_idx] abs_filter_change_img_path = abs_filter_change_imglist[img_idx] random_img_path = random_imglist[img_idx] refer_gradient = np.load(refer_img_path) faig_gradient = np.load(faig_img_path) ig_gradient = np.load(ig_img_path) abs_filter_change_gradient = np.load(abs_filter_change_img_path) random_gradient = np.load(random_img_path) # for better visualization, we multiple the results with 1000 faig_gradient_loss += np.mean((faig_gradient - refer_gradient)**2) * 1000 ig_gradient_loss += np.mean((ig_gradient - refer_gradient)**2) * 1000 abs_filter_change_gradient_loss += np.mean((abs_filter_change_gradient - refer_gradient)**2) * 1000 random_gradient_loss += np.mean((random_gradient - refer_gradient)**2) * 1000 faig_gradient_loss /= len(deblur_func_imglist) ig_gradient_loss /= len(deblur_func_imglist) abs_filter_change_gradient_loss /= len(deblur_func_imglist) random_gradient_loss /= len(deblur_func_imglist) if idx == 0: print('Calculate the effectiveness of masking discovered deblur filters for deblur function. ' 'Higher value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdeblurfilter_blur_loss.append(faig_gradient_loss) ig_maskdeblurfilter_blur_loss.append(ig_gradient_loss) abs_filter_change_maskdeblurfilter_blur_loss.append(abs_filter_change_gradient_loss) random_maskdeblurfilter_blur_loss.append(random_gradient_loss) else: print('Calculate the effectiveness of masking discovered denoise filters for deblur function. ' 'Lower value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdenoisefilter_blur_loss.append(faig_gradient_loss) ig_maskdenoisefilter_blur_loss.append(ig_gradient_loss) abs_filter_change_maskdenoisefilter_blur_loss.append(abs_filter_change_gradient_loss) random_maskdenoisefilter_blur_loss.append(random_gradient_loss) faig_maskdeblurfilter_blur_loss = np.array(faig_maskdeblurfilter_blur_loss) faig_maskdenoisefilter_blur_loss = np.array(faig_maskdenoisefilter_blur_loss) ig_maskdeblurfilter_blur_loss = np.array(ig_maskdeblurfilter_blur_loss) ig_maskdenoisefilter_blur_loss = np.array(ig_maskdenoisefilter_blur_loss) abs_filter_change_maskdeblurfilter_blur_loss = np.array(abs_filter_change_maskdeblurfilter_blur_loss) abs_filter_change_maskdenoisefilter_blur_loss = np.array(abs_filter_change_maskdenoisefilter_blur_loss) random_maskdeblurfilter_blur_loss = np.array(random_maskdeblurfilter_blur_loss) random_maskdenoisefilter_blur_loss = np.array(random_maskdenoisefilter_blur_loss) # write the result to txt np.savetxt(save_faig_maskdeblurfilter_blur_loss_txt, faig_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_faig_maskdenoisefilter_blur_loss_txt, faig_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdeblurfilter_blur_loss_txt, ig_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdenoisefilter_blur_loss_txt, ig_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdeblurfilter_blur_loss_txt, abs_filter_change_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdenoisefilter_blur_loss_txt, abs_filter_change_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt(save_random_maskdeblurfilter_blur_loss_txt, random_maskdeblurfilter_blur_loss, delimiter=',', fmt='%.6f') np.savetxt( save_random_maskdenoisefilter_blur_loss_txt, random_maskdenoisefilter_blur_loss, delimiter=',', fmt='%.6f')
Python
def faig(img1, img2, gt_img, baseline_model_path, target_model_path, total_step, conv_index): """ Filter Attribution Integrated Gradients of a single image When finding blurry filters, img1 is a blurry image, while img2 is a noisy image. When finding noisy filters, img1 is a noisy image, while img2 is a blurry image. Args: img1 (tensor): with the shape (1, 3, H, W) img2 (tensor): with the shape (1, 3, H, W) gt_img (tensor): with the shape (1, 3, H, W) baseline_model_path (str): path of baseline model target_model_path (str): path of target model total_step (int): total steps in the approximation of the integral conv_index (list): index of conv layer in srcnn-style like network Returns: faig_img1: faig result of img1 """ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') baseline_state_dict = torch.load(baseline_model_path)['params_ema'] target_state_dict = torch.load(target_model_path)['params_ema'] # calculate the gradient of two images with different degradation total_gradient_img1 = 0 total_gradient_img2 = 0 # approximate the integral via 100 discrete points uniformly # sampled along the straight-line path for step in range(0, total_step): alpha = step / total_step interpolate_net_state_dict = {} for key, _ in baseline_state_dict.items(): # a straight-line path between baseline model and target model interpolate_net_state_dict[key] = alpha * baseline_state_dict[key] + (1 - alpha) * target_state_dict[key] interpolate_net = srcnn_style_net(scale=2) interpolate_net.eval() interpolate_net = interpolate_net.to(device) interpolate_net.load_state_dict(interpolate_net_state_dict) # for degradation 1 interpolate_net.zero_grad() output1 = interpolate_net(img1) # measure the distance between the network output and the ground-truth # refer to the equation 3 in the main paper criterion = torch.nn.MSELoss(reduction='sum') loss1 = criterion(gt_img, output1) # calculate the gradient of F to every filter loss1.backward() grad_list_img1 = [] for idx in conv_index: grad = interpolate_net.features[idx].weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) grad_list_img1 = torch.cat(grad_list_img1, dim=0) total_gradient_img1 += grad_list_img1 # for degradation 2 interpolate_net.zero_grad() output2 = interpolate_net(img2) # measure the distance between the network output and the ground-truth # refer to the equation 3 in the main paper loss2 = criterion(gt_img, output2) # calculate the gradient of F to every filter loss2.backward() grad_list_img2 = [] for idx in conv_index: grad = interpolate_net.features[idx].weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) grad_list_img2 = torch.cat(grad_list_img2, dim=0) total_gradient_img2 += grad_list_img2 # calculate the diff of filters between the baseline model and target model diff_list = [] baseline_net = srcnn_style_net(scale=2) baseline_net.eval() baseline_net = baseline_net.to(device) baseline_net.load_state_dict(baseline_state_dict) target_net = srcnn_style_net(scale=2) target_net.eval() target_net = target_net.to(device) target_net.load_state_dict(target_state_dict) for idx in conv_index: variation = baseline_net.features[idx].weight.detach() - target_net.features[idx].weight.detach() variation = variation.reshape(-1, 3, 3) diff_list.append(variation) diff_list = torch.cat(diff_list, dim=0) # multiple the cumulated gradients of img1 with the diff # refer to equation 6 in the main paper single_faig_img1 = total_gradient_img1 * diff_list / total_step single_faig_img1 = torch.sum(torch.sum(abs(single_faig_img1), dim=1), dim=1) # multiple the cumulated gradients of img2 with the diff # refer to equation 6 in the main paper single_faig_img2 = total_gradient_img2 * diff_list / total_step single_faig_img2 = torch.sum(torch.sum(abs(single_faig_img2), dim=1), dim=1) # Find discriminative filters for a specific degradation # refer to equation 7 in the main paper faig_img1 = single_faig_img1 - single_faig_img2 return faig_img1.cpu().numpy()
def faig(img1, img2, gt_img, baseline_model_path, target_model_path, total_step, conv_index): """ Filter Attribution Integrated Gradients of a single image When finding blurry filters, img1 is a blurry image, while img2 is a noisy image. When finding noisy filters, img1 is a noisy image, while img2 is a blurry image. Args: img1 (tensor): with the shape (1, 3, H, W) img2 (tensor): with the shape (1, 3, H, W) gt_img (tensor): with the shape (1, 3, H, W) baseline_model_path (str): path of baseline model target_model_path (str): path of target model total_step (int): total steps in the approximation of the integral conv_index (list): index of conv layer in srcnn-style like network Returns: faig_img1: faig result of img1 """ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') baseline_state_dict = torch.load(baseline_model_path)['params_ema'] target_state_dict = torch.load(target_model_path)['params_ema'] # calculate the gradient of two images with different degradation total_gradient_img1 = 0 total_gradient_img2 = 0 # approximate the integral via 100 discrete points uniformly # sampled along the straight-line path for step in range(0, total_step): alpha = step / total_step interpolate_net_state_dict = {} for key, _ in baseline_state_dict.items(): # a straight-line path between baseline model and target model interpolate_net_state_dict[key] = alpha * baseline_state_dict[key] + (1 - alpha) * target_state_dict[key] interpolate_net = srcnn_style_net(scale=2) interpolate_net.eval() interpolate_net = interpolate_net.to(device) interpolate_net.load_state_dict(interpolate_net_state_dict) # for degradation 1 interpolate_net.zero_grad() output1 = interpolate_net(img1) # measure the distance between the network output and the ground-truth # refer to the equation 3 in the main paper criterion = torch.nn.MSELoss(reduction='sum') loss1 = criterion(gt_img, output1) # calculate the gradient of F to every filter loss1.backward() grad_list_img1 = [] for idx in conv_index: grad = interpolate_net.features[idx].weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) grad_list_img1 = torch.cat(grad_list_img1, dim=0) total_gradient_img1 += grad_list_img1 # for degradation 2 interpolate_net.zero_grad() output2 = interpolate_net(img2) # measure the distance between the network output and the ground-truth # refer to the equation 3 in the main paper loss2 = criterion(gt_img, output2) # calculate the gradient of F to every filter loss2.backward() grad_list_img2 = [] for idx in conv_index: grad = interpolate_net.features[idx].weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) grad_list_img2 = torch.cat(grad_list_img2, dim=0) total_gradient_img2 += grad_list_img2 # calculate the diff of filters between the baseline model and target model diff_list = [] baseline_net = srcnn_style_net(scale=2) baseline_net.eval() baseline_net = baseline_net.to(device) baseline_net.load_state_dict(baseline_state_dict) target_net = srcnn_style_net(scale=2) target_net.eval() target_net = target_net.to(device) target_net.load_state_dict(target_state_dict) for idx in conv_index: variation = baseline_net.features[idx].weight.detach() - target_net.features[idx].weight.detach() variation = variation.reshape(-1, 3, 3) diff_list.append(variation) diff_list = torch.cat(diff_list, dim=0) # multiple the cumulated gradients of img1 with the diff # refer to equation 6 in the main paper single_faig_img1 = total_gradient_img1 * diff_list / total_step single_faig_img1 = torch.sum(torch.sum(abs(single_faig_img1), dim=1), dim=1) # multiple the cumulated gradients of img2 with the diff # refer to equation 6 in the main paper single_faig_img2 = total_gradient_img2 * diff_list / total_step single_faig_img2 = torch.sum(torch.sum(abs(single_faig_img2), dim=1), dim=1) # Find discriminative filters for a specific degradation # refer to equation 7 in the main paper faig_img1 = single_faig_img1 - single_faig_img2 return faig_img1.cpu().numpy()
Python
def calc_noise_gradient_loss(net_type, target_noise_folder, faig_folder, save_faig_maskdenoisefilter_noise_loss_txt, save_faig_maskdeblurfilter_noise_loss_txt, ig_folder, save_ig_maskdenoisefilter_noise_loss_txt, save_ig_maskdeblurfilter_noise_loss_txt, abs_filter_change_folder, save_abs_filter_change_maskdenoisefilter_noise_loss_txt, save_abs_filter_change_maskdeblurfilter_noise_loss_txt, random_folder, save_random_maskdenoisefilter_noise_loss_txt, save_random_maskdeblurfilter_noise_loss_txt, sub_func_folder_names, sub_input_folder_name): """ Quantity the discovered filters' contribution to the deblur function by measuring output difference of the target model and the substituted model. The output difference is calculated on image gradients of their gray counterpart. Args: net_type (str): network type. Default: srcnn_style or srresnet target_noise_folder (str): folder path that contains the gradient map of target model's output towards blurry input. faig_folder (str): folder path that contains the gradient map of substituted-faig-discovered model's output towards blurry input. save_faig_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (noise) model. save_faig_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (blur) model. ig_folder (str): folder path that contains the gradient map of substituted-ig-discovered model's output towards blurry input. save_ig_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (noise) model. save_ig_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (blur) model. abs_filter_change_folder (str): folder path that contains the gradient map of substituted-abs_filter_change-discovered model's output towards blurry input. save_abs_filter_change_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (noise) model. save_abs_filter_change_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (blur) model. random_folder (str): folder path that contains the gradient map of substituted-random-discovered model's output towards blurry input. save_random_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (noise) model. save_random_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (blur) model. sub_func_folder_names (list): Default: ['maskdeblurfilter', 'maskdenoisefilter'] sub_input_folder_name (str): 'LRbicx2_noise0.1' """ denoise_func_imglist = list(sorted(glob.glob(os.path.join(target_noise_folder, '*.npy')))) faig_maskdenoisefilter_noise_loss = [] faig_maskdeblurfilter_noise_loss = [] ig_maskdenoisefilter_noise_loss = [] ig_maskdeblurfilter_noise_loss = [] abs_filter_change_maskdenoisefilter_noise_loss = [] abs_filter_change_maskdeblurfilter_noise_loss = [] random_maskdenoisefilter_noise_loss = [] random_maskdeblurfilter_noise_loss = [] if net_type == 'srcnn_style': total_neuron_nums = 156224 elif net_type == 'srresnet': total_neuron_nums = 151936 for proportion in [1, 3, 5, 10]: # for proportion in range(0, 101): selected_num_neurons = int(total_neuron_nums * proportion / 100) neuron_folder = f'{selected_num_neurons}kernels' faig_neuron_folder_path = os.path.join(faig_folder, neuron_folder) ig_neuron_folder_path = os.path.join(ig_folder, neuron_folder) abs_filter_change_neuron_folder_path = os.path.join(abs_filter_change_folder, neuron_folder) random_neuron_folder_path = os.path.join(random_folder, neuron_folder) for idx, sub_folder in enumerate(sub_func_folder_names): faig_neuron_sub_folder_path = os.path.join(faig_neuron_folder_path, sub_folder) ig_neuron_sub_folder_path = os.path.join(ig_neuron_folder_path, sub_folder) abs_filter_change_neuron_sub_folder_path = os.path.join(abs_filter_change_neuron_folder_path, sub_folder) random_neuron_sub_folder_path = os.path.join(random_neuron_folder_path, sub_folder) faig_imglist = list( sorted(glob.glob(os.path.join(faig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) ig_imglist = list( sorted(glob.glob(os.path.join(ig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) abs_filter_change_imglist = list( sorted( glob.glob(os.path.join(abs_filter_change_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) random_imglist = list( sorted(glob.glob(os.path.join(random_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) faig_gradient_loss = 0.0 ig_gradient_loss = 0.0 abs_filter_change_gradient_loss = 0.0 random_gradient_loss = 0.0 for img_idx, img_path in enumerate(denoise_func_imglist): refer_img_path = img_path faig_img_path = faig_imglist[img_idx] ig_img_path = ig_imglist[img_idx] abs_filter_change_img_path = abs_filter_change_imglist[img_idx] random_img_path = random_imglist[img_idx] refer_gradient = np.load(refer_img_path) faig_gradient = np.load(faig_img_path) ig_gradient = np.load(ig_img_path) abs_filter_change_gradient = np.load(abs_filter_change_img_path) random_gradient = np.load(random_img_path) # for better visualization, we multiple the results with 1000 faig_gradient_loss += np.mean((faig_gradient - refer_gradient)**2) * 1000 ig_gradient_loss += np.mean((ig_gradient - refer_gradient)**2) * 1000 abs_filter_change_gradient_loss += np.mean((abs_filter_change_gradient - refer_gradient)**2) * 1000 random_gradient_loss += np.mean((random_gradient - refer_gradient)**2) * 1000 faig_gradient_loss /= len(denoise_func_imglist) ig_gradient_loss /= len(denoise_func_imglist) abs_filter_change_gradient_loss /= len(denoise_func_imglist) random_gradient_loss /= len(denoise_func_imglist) if idx == 0: print('Calculate the effectiveness of masking discovered deblur filters for denoise function. ' 'Lower value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdenoisefilter_noise_loss.append(faig_gradient_loss) ig_maskdenoisefilter_noise_loss.append(ig_gradient_loss) abs_filter_change_maskdenoisefilter_noise_loss.append(abs_filter_change_gradient_loss) random_maskdenoisefilter_noise_loss.append(random_gradient_loss) else: print('Calculate the effectiveness of masking discovered denoise filters for denoise function. ' 'Higher value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdeblurfilter_noise_loss.append(faig_gradient_loss) ig_maskdeblurfilter_noise_loss.append(ig_gradient_loss) abs_filter_change_maskdeblurfilter_noise_loss.append(abs_filter_change_gradient_loss) random_maskdeblurfilter_noise_loss.append(random_gradient_loss) faig_maskdeblurfilter_noise_loss = np.array(faig_maskdeblurfilter_noise_loss) faig_maskdenoisefilter_noise_loss = np.array(faig_maskdenoisefilter_noise_loss) ig_maskdeblurfilter_noise_loss = np.array(ig_maskdeblurfilter_noise_loss) ig_maskdenoisefilter_noise_loss = np.array(ig_maskdenoisefilter_noise_loss) abs_filter_change_maskdeblurfilter_noise_loss = np.array(abs_filter_change_maskdeblurfilter_noise_loss) abs_filter_change_maskdenoisefilter_noise_loss = np.array(abs_filter_change_maskdenoisefilter_noise_loss) random_maskdeblurfilter_noise_loss = np.array(random_maskdeblurfilter_noise_loss) random_maskdenoisefilter_noise_loss = np.array(random_maskdenoisefilter_noise_loss) # write the result to txt np.savetxt(save_faig_maskdeblurfilter_noise_loss_txt, faig_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt(save_faig_maskdenoisefilter_noise_loss_txt, faig_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdeblurfilter_noise_loss_txt, ig_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdenoisefilter_noise_loss_txt, ig_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdeblurfilter_noise_loss_txt, abs_filter_change_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdenoisefilter_noise_loss_txt, abs_filter_change_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_random_maskdeblurfilter_noise_loss_txt, random_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_random_maskdenoisefilter_noise_loss_txt, random_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f')
def calc_noise_gradient_loss(net_type, target_noise_folder, faig_folder, save_faig_maskdenoisefilter_noise_loss_txt, save_faig_maskdeblurfilter_noise_loss_txt, ig_folder, save_ig_maskdenoisefilter_noise_loss_txt, save_ig_maskdeblurfilter_noise_loss_txt, abs_filter_change_folder, save_abs_filter_change_maskdenoisefilter_noise_loss_txt, save_abs_filter_change_maskdeblurfilter_noise_loss_txt, random_folder, save_random_maskdenoisefilter_noise_loss_txt, save_random_maskdeblurfilter_noise_loss_txt, sub_func_folder_names, sub_input_folder_name): """ Quantity the discovered filters' contribution to the deblur function by measuring output difference of the target model and the substituted model. The output difference is calculated on image gradients of their gray counterpart. Args: net_type (str): network type. Default: srcnn_style or srresnet target_noise_folder (str): folder path that contains the gradient map of target model's output towards blurry input. faig_folder (str): folder path that contains the gradient map of substituted-faig-discovered model's output towards blurry input. save_faig_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (noise) model. save_faig_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-faig-discovered (blur) model. ig_folder (str): folder path that contains the gradient map of substituted-ig-discovered model's output towards blurry input. save_ig_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (noise) model. save_ig_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-ig-discovered (blur) model. abs_filter_change_folder (str): folder path that contains the gradient map of substituted-abs_filter_change-discovered model's output towards blurry input. save_abs_filter_change_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (noise) model. save_abs_filter_change_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-abs_filter_change-discovered (blur) model. random_folder (str): folder path that contains the gradient map of substituted-random-discovered model's output towards blurry input. save_random_maskdenoisefilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (noise) model. save_random_maskdeblurfilter_noise_loss_txt (str): txt path that records the output different of target model and substituted-random-discovered (blur) model. sub_func_folder_names (list): Default: ['maskdeblurfilter', 'maskdenoisefilter'] sub_input_folder_name (str): 'LRbicx2_noise0.1' """ denoise_func_imglist = list(sorted(glob.glob(os.path.join(target_noise_folder, '*.npy')))) faig_maskdenoisefilter_noise_loss = [] faig_maskdeblurfilter_noise_loss = [] ig_maskdenoisefilter_noise_loss = [] ig_maskdeblurfilter_noise_loss = [] abs_filter_change_maskdenoisefilter_noise_loss = [] abs_filter_change_maskdeblurfilter_noise_loss = [] random_maskdenoisefilter_noise_loss = [] random_maskdeblurfilter_noise_loss = [] if net_type == 'srcnn_style': total_neuron_nums = 156224 elif net_type == 'srresnet': total_neuron_nums = 151936 for proportion in [1, 3, 5, 10]: # for proportion in range(0, 101): selected_num_neurons = int(total_neuron_nums * proportion / 100) neuron_folder = f'{selected_num_neurons}kernels' faig_neuron_folder_path = os.path.join(faig_folder, neuron_folder) ig_neuron_folder_path = os.path.join(ig_folder, neuron_folder) abs_filter_change_neuron_folder_path = os.path.join(abs_filter_change_folder, neuron_folder) random_neuron_folder_path = os.path.join(random_folder, neuron_folder) for idx, sub_folder in enumerate(sub_func_folder_names): faig_neuron_sub_folder_path = os.path.join(faig_neuron_folder_path, sub_folder) ig_neuron_sub_folder_path = os.path.join(ig_neuron_folder_path, sub_folder) abs_filter_change_neuron_sub_folder_path = os.path.join(abs_filter_change_neuron_folder_path, sub_folder) random_neuron_sub_folder_path = os.path.join(random_neuron_folder_path, sub_folder) faig_imglist = list( sorted(glob.glob(os.path.join(faig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) ig_imglist = list( sorted(glob.glob(os.path.join(ig_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) abs_filter_change_imglist = list( sorted( glob.glob(os.path.join(abs_filter_change_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) random_imglist = list( sorted(glob.glob(os.path.join(random_neuron_sub_folder_path, sub_input_folder_name, '*.npy')))) faig_gradient_loss = 0.0 ig_gradient_loss = 0.0 abs_filter_change_gradient_loss = 0.0 random_gradient_loss = 0.0 for img_idx, img_path in enumerate(denoise_func_imglist): refer_img_path = img_path faig_img_path = faig_imglist[img_idx] ig_img_path = ig_imglist[img_idx] abs_filter_change_img_path = abs_filter_change_imglist[img_idx] random_img_path = random_imglist[img_idx] refer_gradient = np.load(refer_img_path) faig_gradient = np.load(faig_img_path) ig_gradient = np.load(ig_img_path) abs_filter_change_gradient = np.load(abs_filter_change_img_path) random_gradient = np.load(random_img_path) # for better visualization, we multiple the results with 1000 faig_gradient_loss += np.mean((faig_gradient - refer_gradient)**2) * 1000 ig_gradient_loss += np.mean((ig_gradient - refer_gradient)**2) * 1000 abs_filter_change_gradient_loss += np.mean((abs_filter_change_gradient - refer_gradient)**2) * 1000 random_gradient_loss += np.mean((random_gradient - refer_gradient)**2) * 1000 faig_gradient_loss /= len(denoise_func_imglist) ig_gradient_loss /= len(denoise_func_imglist) abs_filter_change_gradient_loss /= len(denoise_func_imglist) random_gradient_loss /= len(denoise_func_imglist) if idx == 0: print('Calculate the effectiveness of masking discovered deblur filters for denoise function. ' 'Lower value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdenoisefilter_noise_loss.append(faig_gradient_loss) ig_maskdenoisefilter_noise_loss.append(ig_gradient_loss) abs_filter_change_maskdenoisefilter_noise_loss.append(abs_filter_change_gradient_loss) random_maskdenoisefilter_noise_loss.append(random_gradient_loss) else: print('Calculate the effectiveness of masking discovered denoise filters for denoise function. ' 'Higher value is better!') print(f'faig:{round(faig_gradient_loss, 4)}\t ig:{round(ig_gradient_loss, 4)}\t ' f'abs_filter_change:{round(abs_filter_change_gradient_loss, 4)}\t ' f'random:{round(random_gradient_loss, 4)}') faig_maskdeblurfilter_noise_loss.append(faig_gradient_loss) ig_maskdeblurfilter_noise_loss.append(ig_gradient_loss) abs_filter_change_maskdeblurfilter_noise_loss.append(abs_filter_change_gradient_loss) random_maskdeblurfilter_noise_loss.append(random_gradient_loss) faig_maskdeblurfilter_noise_loss = np.array(faig_maskdeblurfilter_noise_loss) faig_maskdenoisefilter_noise_loss = np.array(faig_maskdenoisefilter_noise_loss) ig_maskdeblurfilter_noise_loss = np.array(ig_maskdeblurfilter_noise_loss) ig_maskdenoisefilter_noise_loss = np.array(ig_maskdenoisefilter_noise_loss) abs_filter_change_maskdeblurfilter_noise_loss = np.array(abs_filter_change_maskdeblurfilter_noise_loss) abs_filter_change_maskdenoisefilter_noise_loss = np.array(abs_filter_change_maskdenoisefilter_noise_loss) random_maskdeblurfilter_noise_loss = np.array(random_maskdeblurfilter_noise_loss) random_maskdenoisefilter_noise_loss = np.array(random_maskdenoisefilter_noise_loss) # write the result to txt np.savetxt(save_faig_maskdeblurfilter_noise_loss_txt, faig_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt(save_faig_maskdenoisefilter_noise_loss_txt, faig_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdeblurfilter_noise_loss_txt, ig_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt(save_ig_maskdenoisefilter_noise_loss_txt, ig_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdeblurfilter_noise_loss_txt, abs_filter_change_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_abs_filter_change_maskdenoisefilter_noise_loss_txt, abs_filter_change_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_random_maskdeblurfilter_noise_loss_txt, random_maskdeblurfilter_noise_loss, delimiter=',', fmt='%.6f') np.savetxt( save_random_maskdenoisefilter_noise_loss_txt, random_maskdenoisefilter_noise_loss, delimiter=',', fmt='%.6f')
Python
def faig(img1, img2, gt_img, baseline_model_path, target_model_path, total_step, conv_name_list, scale): """ filter Attribution Integrated Gradients. When finding blurry neurons, img1 is a blurry image, while img2 is a noisy image. When finding noisy neurons, img1 is a noisy image, while img2 is a blurry image. Args: img1 (tensor): with the shape (1, 3, H, W) img2 (tensor): with the shape (1, 3, H, W) gt_img (tensor): with the shape (1, 3, H, W) baseline_model_path: path of baseline model target_model_path: path of target model total_step (int): bisection of partition conv_name_list (list) scale (int) Returns: sorted_diff (list): sorted values sorted_index (list): sorted index of kernel """ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') baseline_state_dict = torch.load(baseline_model_path)['params_ema'] target_state_dict = torch.load(target_model_path)['params_ema'] # calculate the gradient of two images with different degradation total_gradient_img1 = 0 total_gradient_img2 = 0 # approximate the integral via 100 discrete points uniformly # sampled along the straight-line path for step in range(0, total_step): # define current net alpha = step / total_step current_net_state_dict = {} for key, _ in baseline_state_dict.items(): # a straight-line path between baseline model and target model current_net_state_dict[key] = alpha * baseline_state_dict[key] + (1 - alpha) * target_state_dict[key] current_net = MSRResNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=16, upscale=scale) current_net.eval() current_net = current_net.to(device) current_net.load_state_dict(current_net_state_dict) # for degradation 1 current_net.zero_grad() output1 = current_net(img1) # measure the distance between the network output and the ground-truth # refer to the equation 3 in the main paper criterion = torch.nn.MSELoss(reduction='sum') loss1 = criterion(gt_img, output1) # calculate the gradient of F to every filter loss1.backward() grad_list_img1 = [] # save the gradient of all filters to grad_list_img1 # add conv_first grad = current_net.conv_first.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add body module for i in range(16): grad = current_net.body[i].conv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) grad = current_net.body[i].conv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add upconv1 grad = current_net.upconv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add upconv2 if scale == 4: grad = current_net.upconv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add conv_hr grad = current_net.conv_hr.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add conv_last grad = current_net.conv_last.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # reshape to [-1, 3, 3] grad_list_img1 = torch.cat(grad_list_img1, dim=0) total_gradient_img1 += grad_list_img1 # Input img2 current_net.zero_grad() output2 = current_net(img2) loss2 = criterion(gt_img, output2) # calculate the gradient of F to every filter loss2.backward() grad_list_img2 = [] # save all grad to list # add conv_first grad = current_net.conv_first.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add body module for i in range(16): grad = current_net.body[i].conv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) grad = current_net.body[i].conv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add upconv1 grad = current_net.upconv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add upconv2 if scale == 4: grad = current_net.upconv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add conv_hr grad = current_net.conv_hr.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add conv_last grad = current_net.conv_last.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # reshape to [-1, 3, 3] grad_list_img2 = torch.cat(grad_list_img2, dim=0) total_gradient_img2 += grad_list_img2 # multiple the variation diff_list = [] for key in conv_name_list: variation = baseline_state_dict[key] - target_state_dict[key] variation = variation.reshape(-1, 3, 3) diff_list.append(variation) diff_list = torch.cat(diff_list, dim=0).to(device) # multiple the cumulated gradients of img1 with the diff # refer to equation 6 in the main paper single_faig_img1 = total_gradient_img1 * diff_list / total_step single_faig_img1 = torch.sum(torch.sum(abs(single_faig_img1), dim=1), dim=1) # multiple the cumulated gradients of img2 with the diff # refer to equation 6 in the main paper single_faig_img2 = total_gradient_img2 * diff_list / total_step single_faig_img2 = torch.sum(torch.sum(abs(single_faig_img2), dim=1), dim=1) # Find discriminative filters for a specific degradation # refer to equation 7 in the main paper faig_img1 = single_faig_img1 - single_faig_img2 return faig_img1.cpu().numpy()
def faig(img1, img2, gt_img, baseline_model_path, target_model_path, total_step, conv_name_list, scale): """ filter Attribution Integrated Gradients. When finding blurry neurons, img1 is a blurry image, while img2 is a noisy image. When finding noisy neurons, img1 is a noisy image, while img2 is a blurry image. Args: img1 (tensor): with the shape (1, 3, H, W) img2 (tensor): with the shape (1, 3, H, W) gt_img (tensor): with the shape (1, 3, H, W) baseline_model_path: path of baseline model target_model_path: path of target model total_step (int): bisection of partition conv_name_list (list) scale (int) Returns: sorted_diff (list): sorted values sorted_index (list): sorted index of kernel """ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') baseline_state_dict = torch.load(baseline_model_path)['params_ema'] target_state_dict = torch.load(target_model_path)['params_ema'] # calculate the gradient of two images with different degradation total_gradient_img1 = 0 total_gradient_img2 = 0 # approximate the integral via 100 discrete points uniformly # sampled along the straight-line path for step in range(0, total_step): # define current net alpha = step / total_step current_net_state_dict = {} for key, _ in baseline_state_dict.items(): # a straight-line path between baseline model and target model current_net_state_dict[key] = alpha * baseline_state_dict[key] + (1 - alpha) * target_state_dict[key] current_net = MSRResNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=16, upscale=scale) current_net.eval() current_net = current_net.to(device) current_net.load_state_dict(current_net_state_dict) # for degradation 1 current_net.zero_grad() output1 = current_net(img1) # measure the distance between the network output and the ground-truth # refer to the equation 3 in the main paper criterion = torch.nn.MSELoss(reduction='sum') loss1 = criterion(gt_img, output1) # calculate the gradient of F to every filter loss1.backward() grad_list_img1 = [] # save the gradient of all filters to grad_list_img1 # add conv_first grad = current_net.conv_first.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add body module for i in range(16): grad = current_net.body[i].conv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) grad = current_net.body[i].conv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add upconv1 grad = current_net.upconv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add upconv2 if scale == 4: grad = current_net.upconv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add conv_hr grad = current_net.conv_hr.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # add conv_last grad = current_net.conv_last.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img1.append(grad) # reshape to [-1, 3, 3] grad_list_img1 = torch.cat(grad_list_img1, dim=0) total_gradient_img1 += grad_list_img1 # Input img2 current_net.zero_grad() output2 = current_net(img2) loss2 = criterion(gt_img, output2) # calculate the gradient of F to every filter loss2.backward() grad_list_img2 = [] # save all grad to list # add conv_first grad = current_net.conv_first.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add body module for i in range(16): grad = current_net.body[i].conv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) grad = current_net.body[i].conv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add upconv1 grad = current_net.upconv1.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add upconv2 if scale == 4: grad = current_net.upconv2.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add conv_hr grad = current_net.conv_hr.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # add conv_last grad = current_net.conv_last.weight.grad grad = grad.reshape(-1, 3, 3) grad_list_img2.append(grad) # reshape to [-1, 3, 3] grad_list_img2 = torch.cat(grad_list_img2, dim=0) total_gradient_img2 += grad_list_img2 # multiple the variation diff_list = [] for key in conv_name_list: variation = baseline_state_dict[key] - target_state_dict[key] variation = variation.reshape(-1, 3, 3) diff_list.append(variation) diff_list = torch.cat(diff_list, dim=0).to(device) # multiple the cumulated gradients of img1 with the diff # refer to equation 6 in the main paper single_faig_img1 = total_gradient_img1 * diff_list / total_step single_faig_img1 = torch.sum(torch.sum(abs(single_faig_img1), dim=1), dim=1) # multiple the cumulated gradients of img2 with the diff # refer to equation 6 in the main paper single_faig_img2 = total_gradient_img2 * diff_list / total_step single_faig_img2 = torch.sum(torch.sum(abs(single_faig_img2), dim=1), dim=1) # Find discriminative filters for a specific degradation # refer to equation 7 in the main paper faig_img1 = single_faig_img1 - single_faig_img2 return faig_img1.cpu().numpy()
Python
def main(): """The command line entry point.""" # Get command line arguments config = Config() # If no args are present, show help and exit. if len(sys.argv) == 1: config.parser.print_help(sys.stderr) quit() # Setup logging verbosity_dict = {0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG} logging.basicConfig(format=("{asctime} [{levelname}] " "{module}.{funcName} : {message}"), style="{", level=verbosity_dict[config.verbosity]) if config.command == "predict": np.random.seed(config.seed) dataset = xenith.load_psms(config.psm_files) try: model = xenith.from_percolator(config.model) except UnicodeDecodeError: model = xenith.load_model(config.model) pred = model.predict(dataset) dataset.add_metric(pred, name="score") psms, xlinks = dataset.estimate_qvalues("score") out_base = os.path.join(config.output_dir, config.fileroot) psms.to_csv(out_base + ".psms.txt", sep="\t", index=False) xlinks.to_csv(out_base + ".xlinks.txt", sep="\t", index=False) elif config.command == "kojak": xenith.convert.kojak(kojak_txt=config.kojak_txt, perc_inter=config.perc_inter, perc_intra=config.perc_intra, version=config.version, out_file=config.output_file, max_charge=config.max_charge, to_pin=config.to_pin)
def main(): """The command line entry point.""" # Get command line arguments config = Config() # If no args are present, show help and exit. if len(sys.argv) == 1: config.parser.print_help(sys.stderr) quit() # Setup logging verbosity_dict = {0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG} logging.basicConfig(format=("{asctime} [{levelname}] " "{module}.{funcName} : {message}"), style="{", level=verbosity_dict[config.verbosity]) if config.command == "predict": np.random.seed(config.seed) dataset = xenith.load_psms(config.psm_files) try: model = xenith.from_percolator(config.model) except UnicodeDecodeError: model = xenith.load_model(config.model) pred = model.predict(dataset) dataset.add_metric(pred, name="score") psms, xlinks = dataset.estimate_qvalues("score") out_base = os.path.join(config.output_dir, config.fileroot) psms.to_csv(out_base + ".psms.txt", sep="\t", index=False) xlinks.to_csv(out_base + ".xlinks.txt", sep="\t", index=False) elif config.command == "kojak": xenith.convert.kojak(kojak_txt=config.kojak_txt, perc_inter=config.perc_inter, perc_intra=config.perc_intra, version=config.version, out_file=config.output_file, max_charge=config.max_charge, to_pin=config.to_pin)
Python
def add_metric(self, values: np.ndarray, name: str) -> None: """ Add a new metric to the XenithDataset. Metrics are measures of PSM confidence and can be used for estimating q-values. Parameters ---------- values : np.ndarray A 1D numpy array specifying the values of the metric. This must be the same length as the number of PSMs in the dataset (which can be found with `len()`) name : str Name of the new metric. If the name matches an existing metric, it will be overwritten. """ if len(values.shape) > 1: raise ValueError("'values' must be 1-dimensional.") if values.shape[0] != len(self): raise ValueError("'values' must be the same length as the " "XenithDataset.") self.metrics[name] = values
def add_metric(self, values: np.ndarray, name: str) -> None: """ Add a new metric to the XenithDataset. Metrics are measures of PSM confidence and can be used for estimating q-values. Parameters ---------- values : np.ndarray A 1D numpy array specifying the values of the metric. This must be the same length as the number of PSMs in the dataset (which can be found with `len()`) name : str Name of the new metric. If the name matches an existing metric, it will be overwritten. """ if len(values.shape) > 1: raise ValueError("'values' must be 1-dimensional.") if values.shape[0] != len(self): raise ValueError("'values' must be the same length as the " "XenithDataset.") self.metrics[name] = values
Python
def estimate_qvalues(self, metric: str, desc: bool = True) \ -> Tuple[pd.DataFrame]: """ Estimate q-values at the PSM, and cross-link levels. At each level, the false discovery rate (FDR) is estimated using target-decoy competition. For PSMs, ties from the same scan are broken randomly. Peptide aggregation is performed using the top scoring PSM for a peptide. FDR at the cross-link level is estimated using only unambiguous peptides; That is, peptides that correspond to a single protein and linked residue for each peptide. Parameters ---------- metric : str The metric by which to rank PSMs. This can either be a model prediction or any feature. This is case-sensitive. desc : bool Does a higher value of metric indicate a better PSM? Returns ------- Tuple[pandas.DataFrame] A DataFrame with the q-values at the PSM, and cross-link level, respectively. """ if metric not in self.metrics.columns.tolist(): print(metric) print(self.metrics.columns.tolist()) raise ValueError(f"{metric} not found in the metrics of the " "XenithDataset.") res_df = self.metrics.loc[:, metric] res_df = pd.concat([self.metadata, res_df], axis=1) if not desc: res_df[metric] = -res_df[metric] # Generate keys for grouping prot_site1 = (res_df.proteina + "_" + res_df.proteinlinksitea.astype(str)) prot_site2 = (res_df.proteinb + "_" + res_df.proteinlinksiteb.astype(str)) res_df["residue_key"] = ["--".join(sorted(x)) for x in zip(prot_site1, prot_site2)] # randomize the df, so that ties are broken randomly res_df = res_df.sample(frac=1).reset_index(drop=True) psm_cols = ["fileidx", "scannr"] # PSM FDR ------------------------------------------------------------- psm_idx = res_df.groupby(psm_cols)[metric].idxmax() psms = res_df.loc[psm_idx, :] # Cross-Link FDR ------------------------------------------------------ link_idx = psms.groupby("residue_key")[metric].idxmax() links = psms.loc[link_idx, :] links = links.loc[~links.residue_key.str.contains(";")] # Estimate q-values ---------------------------------------------------- out_list = [] for dat in (psms, links): if not desc: dat[metric] = -dat[metric] dat["q-values"] = xenith.fdr.qvalues(dat.numtarget.values, dat[metric].values, desc=desc) dat.sort_values(metric, ascending=(not desc), inplace=True) dat = dat.loc[dat.numtarget == 2] dat.reset_index(drop=True, inplace=True) out_list.append(_format_output(dat, [metric, "q-values"])) return out_list
def estimate_qvalues(self, metric: str, desc: bool = True) \ -> Tuple[pd.DataFrame]: """ Estimate q-values at the PSM, and cross-link levels. At each level, the false discovery rate (FDR) is estimated using target-decoy competition. For PSMs, ties from the same scan are broken randomly. Peptide aggregation is performed using the top scoring PSM for a peptide. FDR at the cross-link level is estimated using only unambiguous peptides; That is, peptides that correspond to a single protein and linked residue for each peptide. Parameters ---------- metric : str The metric by which to rank PSMs. This can either be a model prediction or any feature. This is case-sensitive. desc : bool Does a higher value of metric indicate a better PSM? Returns ------- Tuple[pandas.DataFrame] A DataFrame with the q-values at the PSM, and cross-link level, respectively. """ if metric not in self.metrics.columns.tolist(): print(metric) print(self.metrics.columns.tolist()) raise ValueError(f"{metric} not found in the metrics of the " "XenithDataset.") res_df = self.metrics.loc[:, metric] res_df = pd.concat([self.metadata, res_df], axis=1) if not desc: res_df[metric] = -res_df[metric] # Generate keys for grouping prot_site1 = (res_df.proteina + "_" + res_df.proteinlinksitea.astype(str)) prot_site2 = (res_df.proteinb + "_" + res_df.proteinlinksiteb.astype(str)) res_df["residue_key"] = ["--".join(sorted(x)) for x in zip(prot_site1, prot_site2)] # randomize the df, so that ties are broken randomly res_df = res_df.sample(frac=1).reset_index(drop=True) psm_cols = ["fileidx", "scannr"] # PSM FDR ------------------------------------------------------------- psm_idx = res_df.groupby(psm_cols)[metric].idxmax() psms = res_df.loc[psm_idx, :] # Cross-Link FDR ------------------------------------------------------ link_idx = psms.groupby("residue_key")[metric].idxmax() links = psms.loc[link_idx, :] links = links.loc[~links.residue_key.str.contains(";")] # Estimate q-values ---------------------------------------------------- out_list = [] for dat in (psms, links): if not desc: dat[metric] = -dat[metric] dat["q-values"] = xenith.fdr.qvalues(dat.numtarget.values, dat[metric].values, desc=desc) dat.sort_values(metric, ascending=(not desc), inplace=True) dat = dat.loc[dat.numtarget == 2] dat.reset_index(drop=True, inplace=True) out_list.append(_format_output(dat, [metric, "q-values"])) return out_list
Python
def load_psms(psm_files: Tuple[str], additional_metadata: Tuple[str] = None)\ -> XenithDataset: """ Load a collection of peptide-spectrum matches (PSMs). Reads a collection of PSMs from a file in the xenith tab-delimited format. By default, the required fields are considered metadata whereas all other fields are considered features. Parameters ---------- psm_files : str or tuple of str The files from which to load a set of PSMs. These should be in the xenith tab-delimited format. additional_metadata : tuple of str Additional columns to be considered metadata. The columns specified here will not be included as features. Returns ------- xenith.dataset.XenithDataset A XenithDataset object containing the PSMs. """ return XenithDataset(psm_files=psm_files, additional_metadata=additional_metadata)
def load_psms(psm_files: Tuple[str], additional_metadata: Tuple[str] = None)\ -> XenithDataset: """ Load a collection of peptide-spectrum matches (PSMs). Reads a collection of PSMs from a file in the xenith tab-delimited format. By default, the required fields are considered metadata whereas all other fields are considered features. Parameters ---------- psm_files : str or tuple of str The files from which to load a set of PSMs. These should be in the xenith tab-delimited format. additional_metadata : tuple of str Additional columns to be considered metadata. The columns specified here will not be included as features. Returns ------- xenith.dataset.XenithDataset A XenithDataset object containing the PSMs. """ return XenithDataset(psm_files=psm_files, additional_metadata=additional_metadata)
Python
def _format_output(out_df, metric): """ Format the output dataframes. Parameters ---------- out_df : pandas.DataFrame The dataframe with q-values and the other output columns. metric : list of str The metric columns to add. """ front_meta = ["fileidx", "psmid", "numtarget", "scannr"] back_meta = ["peptidea", "peptideb", "peptidelinksitea", "peptidelinksiteb", "proteinlinksitea", "proteinlinksiteb", "proteina", "proteinb"] if isinstance(metric, str): metric = [metric] order = front_meta + metric + back_meta out_df = out_df.loc[:, order] out_df = out_df.rename(columns={"fileidx": "FileIdx", "psmid": "PsmId", "numtarget": "NumTarget", "scannr": "ScanNr", "peptidea": "PeptideA", "peptideb": "PeptideB", "peptidelinksitea": "PeptideLinkSiteA", "peptidelinksiteb": "PeptideLinkSiteB", "proteinlinksitea": "ProteinLinkSiteA", "proteinlinksiteb": "ProteinLinkSiteB", "proteina": "ProteinA", "proteinb": "ProteinB"}) return out_df
def _format_output(out_df, metric): """ Format the output dataframes. Parameters ---------- out_df : pandas.DataFrame The dataframe with q-values and the other output columns. metric : list of str The metric columns to add. """ front_meta = ["fileidx", "psmid", "numtarget", "scannr"] back_meta = ["peptidea", "peptideb", "peptidelinksitea", "peptidelinksiteb", "proteinlinksitea", "proteinlinksiteb", "proteina", "proteinb"] if isinstance(metric, str): metric = [metric] order = front_meta + metric + back_meta out_df = out_df.loc[:, order] out_df = out_df.rename(columns={"fileidx": "FileIdx", "psmid": "PsmId", "numtarget": "NumTarget", "scannr": "ScanNr", "peptidea": "PeptideA", "peptideb": "PeptideB", "peptidelinksitea": "PeptideLinkSiteA", "peptidelinksiteb": "PeptideLinkSiteB", "proteinlinksitea": "ProteinLinkSiteA", "proteinlinksiteb": "ProteinLinkSiteB", "proteina": "ProteinA", "proteinb": "ProteinB"}) return out_df
Python
def _process_features(feat_df, feat_mean, feat_stdev, normalize): """ Process a dataframe of features. This function normalizes features and verifies that the `feat_mean` and `feat_stdev` have the same features as feat_df. Parameters ---------- feat_df : pandas.DataFrame A dataframe containing only the features for training and prediction. feat_mean : pd.Series feat_stdev : pd.Series Series containing the mean and standard deviation of each feature to use for normalization. If `None`, these are calculated on the parsed data. For prediction, these should be the respective values from the training set. normalize : bool Should the features be normalized? Returns ------- tuple(pandas.DataFrame, pandas.DataFrame, pandas.DataFrame) A tuple of dataframes containing the normalized features, the employed feat_mean, and the employed feat_stdev, in order. """ if not all(np.issubdtype(col.dtype, np.number) for _, col in feat_df.iteritems()): raise ValueError("All feature columns must be numeric.") if feat_mean is None: feat_mean = feat_df.mean() if feat_stdev is None: feat_stdev = feat_df.std(ddof=0) feat_set = set(feat_df.columns) feat_mean_set = set(feat_mean.index) feat_stdev_set = set(feat_stdev.index) if feat_mean_set != feat_stdev_set: # This one should never happen with the public API. raise RuntimeError("Features for the normalization parameters " "do not match.") if feat_set != feat_mean_set: raise RuntimeError("Model features do not match the dataset.") # Align features feat_mean = feat_mean.loc[feat_df.columns] feat_stdev = feat_stdev.loc[feat_df.columns] eps = np.finfo(np.float).eps if normalize: feat_df = (feat_df - feat_mean.values) / (feat_stdev.values + eps) return (feat_df, feat_mean, feat_stdev)
def _process_features(feat_df, feat_mean, feat_stdev, normalize): """ Process a dataframe of features. This function normalizes features and verifies that the `feat_mean` and `feat_stdev` have the same features as feat_df. Parameters ---------- feat_df : pandas.DataFrame A dataframe containing only the features for training and prediction. feat_mean : pd.Series feat_stdev : pd.Series Series containing the mean and standard deviation of each feature to use for normalization. If `None`, these are calculated on the parsed data. For prediction, these should be the respective values from the training set. normalize : bool Should the features be normalized? Returns ------- tuple(pandas.DataFrame, pandas.DataFrame, pandas.DataFrame) A tuple of dataframes containing the normalized features, the employed feat_mean, and the employed feat_stdev, in order. """ if not all(np.issubdtype(col.dtype, np.number) for _, col in feat_df.iteritems()): raise ValueError("All feature columns must be numeric.") if feat_mean is None: feat_mean = feat_df.mean() if feat_stdev is None: feat_stdev = feat_df.std(ddof=0) feat_set = set(feat_df.columns) feat_mean_set = set(feat_mean.index) feat_stdev_set = set(feat_stdev.index) if feat_mean_set != feat_stdev_set: # This one should never happen with the public API. raise RuntimeError("Features for the normalization parameters " "do not match.") if feat_set != feat_mean_set: raise RuntimeError("Model features do not match the dataset.") # Align features feat_mean = feat_mean.loc[feat_df.columns] feat_stdev = feat_stdev.loc[feat_df.columns] eps = np.finfo(np.float).eps if normalize: feat_df = (feat_df - feat_mean.values) / (feat_stdev.values + eps) return (feat_df, feat_mean, feat_stdev)
Python
def psm_txt(tmpdir): """ Based on one file, make three varieties of xenith files. Elements 0 and 1 will have the same columns, but slightly different data. Element 2 will have different columns but still be valid. """ np.random.seed(1) out_files = [os.path.join(tmpdir, str(i)) for i in range(3)] test_file = os.path.join("tests", "data", "test.tsv") base_dat = pd.read_csv(test_file, sep="\t") # original data base_dat.to_csv(out_files[0], sep="\t", index=False) # Modify scores a little base_dat.Score = base_dat.Score + np.random.normal(size=len(base_dat)) base_dat.PsmId = base_dat.PsmId + "-mod" base_dat.to_csv(out_files[1], sep="\t", index=False) # Delete a column base_dat = base_dat.drop(columns="eVal") base_dat.to_csv(out_files[2], sep="\t", index=False) return out_files
def psm_txt(tmpdir): """ Based on one file, make three varieties of xenith files. Elements 0 and 1 will have the same columns, but slightly different data. Element 2 will have different columns but still be valid. """ np.random.seed(1) out_files = [os.path.join(tmpdir, str(i)) for i in range(3)] test_file = os.path.join("tests", "data", "test.tsv") base_dat = pd.read_csv(test_file, sep="\t") # original data base_dat.to_csv(out_files[0], sep="\t", index=False) # Modify scores a little base_dat.Score = base_dat.Score + np.random.normal(size=len(base_dat)) base_dat.PsmId = base_dat.PsmId + "-mod" base_dat.to_csv(out_files[1], sep="\t", index=False) # Delete a column base_dat = base_dat.drop(columns="eVal") base_dat.to_csv(out_files[2], sep="\t", index=False) return out_files
Python
def toy_features(): """ Generate a sample feature dataframe with one column that isn't a feature. """ feat = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9], "D": ["a", "b", "c"]}) return (feat, feat.loc[:, ["A", "B", "C"]])
def toy_features(): """ Generate a sample feature dataframe with one column that isn't a feature. """ feat = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9], "D": ["a", "b", "c"]}) return (feat, feat.loc[:, ["A", "B", "C"]])
Python
def save(self, file_name: str) -> None: """ Save a XenithModel object to a file. Parameters ---------- file_name : str The path to save the xenith model for future use. """ model_spec = {"model_class": type(self.model).__name__, "source": self.source, "pretrained": self.pretrained, "feat_mean": self.feat_mean, "feat_stdev": self.feat_stdev, "state_dict": self.model.state_dict(), "num_features": self.num_features, "hidden_dims": self.hidden_dims} torch.save(model_spec, file_name)
def save(self, file_name: str) -> None: """ Save a XenithModel object to a file. Parameters ---------- file_name : str The path to save the xenith model for future use. """ model_spec = {"model_class": type(self.model).__name__, "source": self.source, "pretrained": self.pretrained, "feat_mean": self.feat_mean, "feat_stdev": self.feat_stdev, "state_dict": self.model.state_dict(), "num_features": self.num_features, "hidden_dims": self.hidden_dims} torch.save(model_spec, file_name)
Python
def fit(self, training_set: dataset.XenithDataset, validation_set: dataset.XenithDataset, max_epochs: int = 100, batch_size: int = 128, learn_rate: float = 0.001, weight_decay: float = 0.001, early_stop: int = 5, gpu: bool = False, _hybrid_loss=False) \ -> pd.DataFrame: """ Fit a XenithModel on a collection of cross-linked PSMs. The model is trained using the Adam algorithm to perform mini-batch gradient descent. Parameters ---------- training_set : xenith.XenithDataset A training set of PSMs. These are the PSMs that the model learns from. validation_set : xenith.XenithDataset A validation set of PSMs. These PSMs are used to assess when the model is trained. max_epochs : int The maximum number of iterations through the full dataset to perform. As a simple rule, more epochs are needed for larger models and more complicated datasets. batch_size : int The batch size to use for gradient descent. learn_rate : float The learning rate of the Adam optimizer. weight_decay : float Adds L2 regularization to all model parameters. early_stop : int Stop training if the validation set loss does not decrease for `early_stop` consecutive epochs. Set to `None` to disable early stopping. gpu : bool Should the gpu be used, if available? Returns ------- pandas.DataFrame A dataframe containing the training and validation losses at each epoch. """ device = _set_device(gpu) train_set = dataset._PsmDataset(training_set, feat_mean=None, feat_stdev=None, normalize=True) self.feat_mean = train_set.feat_mean self.feat_stdev = train_set.feat_stdev val_set = dataset._PsmDataset(validation_set, feat_mean=train_set.feat_mean, feat_stdev=train_set.feat_stdev, normalize=True) if not _hybrid_loss: loss_fun = torchmods.SigmoidLoss() else: loss_fun = torchmods.HybridLoss() # Send everything to 'device' self.model = self.model.to(device) train_set.features.to(device) train_set.target.to(device) val_set.features.to(device) val_set.target.to(device) # Setup loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=True) optimizer = torch.optim.Adam(self.model.parameters(), lr=learn_rate, weight_decay=weight_decay, amsgrad=True) # Set tracking variables best_epoch = 0 best_loss = 0 stop_counter = 0 train_loss_tracker = [] val_loss_tracker = [] # The main training loop for epoch in range(max_epochs): # Evaluate and update trackers ------------------------------------ with torch.no_grad(): self.model.eval() train_pred = self.model(train_set.features) train_loss = loss_fun(train_pred.flatten(), train_set.target) train_loss = train_loss.item() train_loss_tracker.append(train_loss) val_pred = self.model(val_set.features) val_loss = loss_fun(val_pred.flatten(), val_set.target) val_loss = val_loss.item() val_loss_tracker.append(val_loss) self.model.train() if val_loss < best_loss or not best_loss: best_loss = val_loss best_epoch = epoch best_model = copy.deepcopy(self.model) stop_counter = 0 else: stop_counter += 1 # The important bit ----------------------------------------------- loss = _train_batch(loader, self.model, optimizer, loss_fun) # Communication and error tracking -------------------------------- _train_message(epoch, train_loss, val_loss) if np.isnan(loss): raise RuntimeError("NaN detected in loss.") if stop_counter == early_stop: logging.info("Stopping at epoch %s...", epoch) break res_msg = (f"Best Epoch = {best_epoch}, " f"Validation Loss = {best_loss:.5f}") logging.info(res_msg) # Wrap-up ------------------------------------------------------------- self.model = best_model.cpu() self.pretrained = True self.source = "xenith" loss_df = pd.DataFrame({"epoch": list(range(epoch+1)), "train_loss": train_loss_tracker, "val_loss": val_loss_tracker}) return loss_df
def fit(self, training_set: dataset.XenithDataset, validation_set: dataset.XenithDataset, max_epochs: int = 100, batch_size: int = 128, learn_rate: float = 0.001, weight_decay: float = 0.001, early_stop: int = 5, gpu: bool = False, _hybrid_loss=False) \ -> pd.DataFrame: """ Fit a XenithModel on a collection of cross-linked PSMs. The model is trained using the Adam algorithm to perform mini-batch gradient descent. Parameters ---------- training_set : xenith.XenithDataset A training set of PSMs. These are the PSMs that the model learns from. validation_set : xenith.XenithDataset A validation set of PSMs. These PSMs are used to assess when the model is trained. max_epochs : int The maximum number of iterations through the full dataset to perform. As a simple rule, more epochs are needed for larger models and more complicated datasets. batch_size : int The batch size to use for gradient descent. learn_rate : float The learning rate of the Adam optimizer. weight_decay : float Adds L2 regularization to all model parameters. early_stop : int Stop training if the validation set loss does not decrease for `early_stop` consecutive epochs. Set to `None` to disable early stopping. gpu : bool Should the gpu be used, if available? Returns ------- pandas.DataFrame A dataframe containing the training and validation losses at each epoch. """ device = _set_device(gpu) train_set = dataset._PsmDataset(training_set, feat_mean=None, feat_stdev=None, normalize=True) self.feat_mean = train_set.feat_mean self.feat_stdev = train_set.feat_stdev val_set = dataset._PsmDataset(validation_set, feat_mean=train_set.feat_mean, feat_stdev=train_set.feat_stdev, normalize=True) if not _hybrid_loss: loss_fun = torchmods.SigmoidLoss() else: loss_fun = torchmods.HybridLoss() # Send everything to 'device' self.model = self.model.to(device) train_set.features.to(device) train_set.target.to(device) val_set.features.to(device) val_set.target.to(device) # Setup loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=True) optimizer = torch.optim.Adam(self.model.parameters(), lr=learn_rate, weight_decay=weight_decay, amsgrad=True) # Set tracking variables best_epoch = 0 best_loss = 0 stop_counter = 0 train_loss_tracker = [] val_loss_tracker = [] # The main training loop for epoch in range(max_epochs): # Evaluate and update trackers ------------------------------------ with torch.no_grad(): self.model.eval() train_pred = self.model(train_set.features) train_loss = loss_fun(train_pred.flatten(), train_set.target) train_loss = train_loss.item() train_loss_tracker.append(train_loss) val_pred = self.model(val_set.features) val_loss = loss_fun(val_pred.flatten(), val_set.target) val_loss = val_loss.item() val_loss_tracker.append(val_loss) self.model.train() if val_loss < best_loss or not best_loss: best_loss = val_loss best_epoch = epoch best_model = copy.deepcopy(self.model) stop_counter = 0 else: stop_counter += 1 # The important bit ----------------------------------------------- loss = _train_batch(loader, self.model, optimizer, loss_fun) # Communication and error tracking -------------------------------- _train_message(epoch, train_loss, val_loss) if np.isnan(loss): raise RuntimeError("NaN detected in loss.") if stop_counter == early_stop: logging.info("Stopping at epoch %s...", epoch) break res_msg = (f"Best Epoch = {best_epoch}, " f"Validation Loss = {best_loss:.5f}") logging.info(res_msg) # Wrap-up ------------------------------------------------------------- self.model = best_model.cpu() self.pretrained = True self.source = "xenith" loss_df = pd.DataFrame({"epoch": list(range(epoch+1)), "train_loss": train_loss_tracker, "val_loss": val_loss_tracker}) return loss_df
Python
def from_percolator(weights_file: str) -> XenithModel: """ Load a pretrained model from Percolator results. Parameters ---------- weights_file : str The output weights from Percolator. This can be obtained using the '--weights' option when running Percolator on a training dataset. Returns ------- xenith.models.XenithModel A XenithModel object for predicting on a new dataset using the Percolator weights. """ weights_file = os.path.abspath(os.path.expanduser(weights_file)) if not os.path.isfile(weights_file): raise FileNotFoundError(f"{weights_file} not found.") weight_df = pd.read_csv(weights_file, sep="\t", nrows=2) weights = torch.FloatTensor(weight_df.loc[1, :].values) weights = weights[None, :] # Add a second dim bias = weights[:, -1] weights = weights[:, :-1] model = torchmods.Linear(input_dim=len(weights)) model.linear.weight.data = weights model.linear.bias.data = bias # Dummy pd.Series to verify that the features are correct. features = weight_df.drop(columns="m0").columns.tolist() features = [f.lower() for f in features] dummy = pd.Series([0]*weights.shape[1], index=features) return XenithModel(model=model, num_features=weights.shape[1], feat_mean=dummy, feat_stdev=dummy, source="percolator", pretrained=True)
def from_percolator(weights_file: str) -> XenithModel: """ Load a pretrained model from Percolator results. Parameters ---------- weights_file : str The output weights from Percolator. This can be obtained using the '--weights' option when running Percolator on a training dataset. Returns ------- xenith.models.XenithModel A XenithModel object for predicting on a new dataset using the Percolator weights. """ weights_file = os.path.abspath(os.path.expanduser(weights_file)) if not os.path.isfile(weights_file): raise FileNotFoundError(f"{weights_file} not found.") weight_df = pd.read_csv(weights_file, sep="\t", nrows=2) weights = torch.FloatTensor(weight_df.loc[1, :].values) weights = weights[None, :] # Add a second dim bias = weights[:, -1] weights = weights[:, :-1] model = torchmods.Linear(input_dim=len(weights)) model.linear.weight.data = weights model.linear.bias.data = bias # Dummy pd.Series to verify that the features are correct. features = weight_df.drop(columns="m0").columns.tolist() features = [f.lower() for f in features] dummy = pd.Series([0]*weights.shape[1], index=features) return XenithModel(model=model, num_features=weights.shape[1], feat_mean=dummy, feat_stdev=dummy, source="percolator", pretrained=True)
Python
def load_model(xenith_model_file: str) -> XenithModel: """ Load a pretrained model from xenith. Parameters ---------- xenith_model_file : str The saved model file output from xenith. Returns ------- xenith.models.XenithModel A XenithModel object for predicting on a new dataset. """ xenith_model_file = os.path.abspath(os.path.expanduser(xenith_model_file)) if not os.path.isfile(xenith_model_file): raise FileNotFoundError(f"{xenith_model_file} not found.") model_spec = torch.load(xenith_model_file) if model_spec["model_class"] == "MLP": model = torchmods.MLP(input_dim=model_spec["num_features"], hidden_dims=model_spec["hidden_dims"]) else: model = torchmods.Linear(input_dim=model_spec["num_features"]) model.load_state_dict(model_spec["state_dict"]) return XenithModel(model=model, num_features=model_spec["num_features"], hidden_dims=model_spec["hidden_dims"], feat_mean=model_spec["feat_mean"], feat_stdev=model_spec["feat_stdev"], source=model_spec["source"], pretrained=model_spec["pretrained"])
def load_model(xenith_model_file: str) -> XenithModel: """ Load a pretrained model from xenith. Parameters ---------- xenith_model_file : str The saved model file output from xenith. Returns ------- xenith.models.XenithModel A XenithModel object for predicting on a new dataset. """ xenith_model_file = os.path.abspath(os.path.expanduser(xenith_model_file)) if not os.path.isfile(xenith_model_file): raise FileNotFoundError(f"{xenith_model_file} not found.") model_spec = torch.load(xenith_model_file) if model_spec["model_class"] == "MLP": model = torchmods.MLP(input_dim=model_spec["num_features"], hidden_dims=model_spec["hidden_dims"]) else: model = torchmods.Linear(input_dim=model_spec["num_features"]) model.load_state_dict(model_spec["state_dict"]) return XenithModel(model=model, num_features=model_spec["num_features"], hidden_dims=model_spec["hidden_dims"], feat_mean=model_spec["feat_mean"], feat_stdev=model_spec["feat_stdev"], source=model_spec["source"], pretrained=model_spec["pretrained"])
Python
def _set_device(gpu): """Set PyTorch to use the gpu, if requested.""" gpu_avail = torch.cuda.is_available() device = torch.device("cuda:0" if gpu and gpu_avail else "cpu") if gpu and not gpu_avail: logging.warning("No gpu was detected. Using cpu instead.") return device
def _set_device(gpu): """Set PyTorch to use the gpu, if requested.""" gpu_avail = torch.cuda.is_available() device = torch.device("cuda:0" if gpu and gpu_avail else "cpu") if gpu and not gpu_avail: logging.warning("No gpu was detected. Using cpu instead.") return device
Python
def _train_message(epoch, train_loss, val_loss): """Print messages about training progress.""" msg = (f"Epoch {epoch}: Train loss = {train_loss:.5f}, " f"Validation Loss = {val_loss:.5f}") logging.info(msg)
def _train_message(epoch, train_loss, val_loss): """Print messages about training progress.""" msg = (f"Epoch {epoch}: Train loss = {train_loss:.5f}, " f"Validation Loss = {val_loss:.5f}") logging.info(msg)
Python
def _train_batch(loader, model, optimizer, loss_fun): """Train a batch and return the loss""" running_loss = 0 total = 0 for batch_idx, (target, feat) in enumerate(loader): pred = model(feat) optimizer.zero_grad() loss = loss_fun(pred.flatten(), target) loss.backward() optimizer.step() running_loss += loss.item() total = batch_idx return running_loss / (total + 1)
def _train_batch(loader, model, optimizer, loss_fun): """Train a batch and return the loss""" running_loss = 0 total = 0 for batch_idx, (target, feat) in enumerate(loader): pred = model(feat) optimizer.zero_grad() loss = loss_fun(pred.flatten(), target) loss.backward() optimizer.step() running_loss += loss.item() total = batch_idx return running_loss / (total + 1)
Python
def _count_proteins(psm_df): """ Count the number of proteins in the dataset. If the number of proteins is 2, intraprotein should be constant. """ all_prot = psm_df.ProteinA + ";" + psm_df.ProteinB prot_set = [p.split(";") for p in all_prot.tolist()] prot_set = set(chain.from_iterable(prot_set)) return len(prot_set)
def _count_proteins(psm_df): """ Count the number of proteins in the dataset. If the number of proteins is 2, intraprotein should be constant. """ all_prot = psm_df.ProteinA + ";" + psm_df.ProteinB prot_set = [p.split(";") for p in all_prot.tolist()] prot_set = set(chain.from_iterable(prot_set)) return len(prot_set)
Python
def _write_pin(pin_df, pin_file): """ Write a dataframe to pin format. This is only necessary, because pandas *always* must either quote or escape the string containing a delimiter. """ with open(pin_file, "w") as pin_out: pin_out.write("\t".join(pin_df.columns.tolist()) + "\n") for _, row in pin_df.iterrows(): row = row.values.astype(str) pin_out.write("\t".join(row.tolist()) + "\n")
def _write_pin(pin_df, pin_file): """ Write a dataframe to pin format. This is only necessary, because pandas *always* must either quote or escape the string containing a delimiter. """ with open(pin_file, "w") as pin_out: pin_out.write("\t".join(pin_df.columns.tolist()) + "\n") for _, row in pin_df.iterrows(): row = row.values.astype(str) pin_out.write("\t".join(row.tolist()) + "\n")
Python
def _read_kojak(kojak_file, decoy_prefix): """ Read a kojak results file and generate key columns Parameters ---------- kojak_file : str The kojak result file to read. decoy_prefix : str Decoy prefix string. Returns ------- tuple(pandas.DataFrame, list) A dataframe containing the parsed PSMs and a list naming the key columns to join with the Percolator data. """ dat = pd.read_csv(kojak_file, sep="\t", skiprows=1) dat = dat.loc[dat["Protein #2"] != "-"] key_cols = ["scannr", "Peptide", "Label"] pep1 = dat["Peptide #1"].str.replace(r"\[.+?\]", "") pep2 = dat["Peptide #2"].str.replace(r"\[.+?\]", "") link1 = dat["Linked AA #1"] link2 = dat["Linked AA #2"] decoy1 = _all_decoy(dat["Protein #1"], decoy_prefix) decoy2 = _all_decoy(dat["Protein #2"], decoy_prefix) dat["Protein #1"] = _parse_proteins(dat["Protein #1"]) dat["Protein #2"] = _parse_proteins(dat["Protein #2"]) dat["scannr"] = dat["Scan Number"] dat["Peptide"] = ("-." + pep1 + "(" + link1 + ")--" + pep2 + "(" + link2 + ").-") dat["Label"] = (((decoy1.values - 1) * (decoy2.values - 1))*2 - 1) # rename some columns for the final file dat["NumTarget"] = (decoy1.values + decoy2.values - 2) * -1 dat = dat.rename(columns={"Protein #1 Site": "ProteinLinkSiteA", "Protein #2 Site": "ProteinLinkSiteB", "Linked AA #1": "PeptideLinkSiteA", "Linked AA #2": "PeptideLinkSiteB", "Protein #1": "ProteinA", "Protein #2": "ProteinB", "Peptide #1": "PeptideA", "Peptide #2": "PeptideB"}) final_cols = ["NumTarget", "ProteinA", "ProteinB", "PeptideA", "PeptideB", "ProteinLinkSiteA", "ProteinLinkSiteB", "PeptideLinkSiteA", "PeptideLinkSiteB"] dat = dat.loc[:, key_cols + final_cols] return (dat, key_cols)
def _read_kojak(kojak_file, decoy_prefix): """ Read a kojak results file and generate key columns Parameters ---------- kojak_file : str The kojak result file to read. decoy_prefix : str Decoy prefix string. Returns ------- tuple(pandas.DataFrame, list) A dataframe containing the parsed PSMs and a list naming the key columns to join with the Percolator data. """ dat = pd.read_csv(kojak_file, sep="\t", skiprows=1) dat = dat.loc[dat["Protein #2"] != "-"] key_cols = ["scannr", "Peptide", "Label"] pep1 = dat["Peptide #1"].str.replace(r"\[.+?\]", "") pep2 = dat["Peptide #2"].str.replace(r"\[.+?\]", "") link1 = dat["Linked AA #1"] link2 = dat["Linked AA #2"] decoy1 = _all_decoy(dat["Protein #1"], decoy_prefix) decoy2 = _all_decoy(dat["Protein #2"], decoy_prefix) dat["Protein #1"] = _parse_proteins(dat["Protein #1"]) dat["Protein #2"] = _parse_proteins(dat["Protein #2"]) dat["scannr"] = dat["Scan Number"] dat["Peptide"] = ("-." + pep1 + "(" + link1 + ")--" + pep2 + "(" + link2 + ").-") dat["Label"] = (((decoy1.values - 1) * (decoy2.values - 1))*2 - 1) # rename some columns for the final file dat["NumTarget"] = (decoy1.values + decoy2.values - 2) * -1 dat = dat.rename(columns={"Protein #1 Site": "ProteinLinkSiteA", "Protein #2 Site": "ProteinLinkSiteB", "Linked AA #1": "PeptideLinkSiteA", "Linked AA #2": "PeptideLinkSiteB", "Protein #1": "ProteinA", "Protein #2": "ProteinB", "Peptide #1": "PeptideA", "Peptide #2": "PeptideB"}) final_cols = ["NumTarget", "ProteinA", "ProteinB", "PeptideA", "PeptideB", "ProteinLinkSiteA", "ProteinLinkSiteB", "PeptideLinkSiteA", "PeptideLinkSiteB"] dat = dat.loc[:, key_cols + final_cols] return (dat, key_cols)
Python
def _read_percolator(percolator_file): """Parse a PIN formatted file. Return a dataframe""" with open(percolator_file, "r") as pin: header = pin.readline() splits = header.count("\t") header = header.replace("\n", "") header = header.split("\t") rows = [line.replace("\n", "").split("\t", splits) for line in pin] data = pd.DataFrame(columns=header, data=rows) return data.apply(pd.to_numeric, errors="ignore")
def _read_percolator(percolator_file): """Parse a PIN formatted file. Return a dataframe""" with open(percolator_file, "r") as pin: header = pin.readline() splits = header.count("\t") header = header.replace("\n", "") header = header.split("\t") rows = [line.replace("\n", "").split("\t", splits) for line in pin] data = pd.DataFrame(columns=header, data=rows) return data.apply(pd.to_numeric, errors="ignore")
Python
def _parse_proteins(protein_col): """Remove description from protein id.""" protein_col = protein_col.str.split(";") prot = [";".join([p.strip().split(" ", 1)[0] for p in r]) for r in protein_col] return prot
def _parse_proteins(protein_col): """Remove description from protein id.""" protein_col = protein_col.str.split(";") prot = [";".join([p.strip().split(" ", 1)[0] for p in r]) for r in protein_col] return prot
Python
def _is_intraprotein(protein_col_a, protein_col_b, decoy_prefix): """Determine if the cross-link is between the same protein or it's decoy""" protein_col_a = protein_col_a.str.replace(decoy_prefix, "").str.split(";") protein_col_b = protein_col_b.str.replace(decoy_prefix, "").str.split(";") return [int(set(a) == set(b)) for a, b in zip(protein_col_a, protein_col_b)]
def _is_intraprotein(protein_col_a, protein_col_b, decoy_prefix): """Determine if the cross-link is between the same protein or it's decoy""" protein_col_a = protein_col_a.str.replace(decoy_prefix, "").str.split(";") protein_col_b = protein_col_b.str.replace(decoy_prefix, "").str.split(";") return [int(set(a) == set(b)) for a, b in zip(protein_col_a, protein_col_b)]
Python
def _all_decoy(protein_col, decoy_prefix): """Returns 1 if all proteins are decoys, 0 otherwise.""" ret = [] protein_col = protein_col.str.split(";") for row in protein_col: decoy = all([p.startswith(decoy_prefix) for p in row]) ret.append(decoy) return pd.Series(ret).astype(int)
def _all_decoy(protein_col, decoy_prefix): """Returns 1 if all proteins are decoys, 0 otherwise.""" ret = [] protein_col = protein_col.str.split(";") for row in protein_col: decoy = all([p.startswith(decoy_prefix) for p in row]) ret.append(decoy) return pd.Series(ret).astype(int)
Python
def dataset(): """ Construct a contrived dataset for testing the FDR By design, the dataset has several edge cases: 1) The top score is a decoy (should result in div by 0) 2) At one point decoy-decoy hits outnumber target-decoy hits. Under the Walzthoeni FDR calculation, this would be negative. 3) There are meaningful ties Additionally, the order is randomized. I'm not going to set the seed because it should not matter what order they are in. """ num_targets = [1, 1, 0, 1, 2, 2, 1, 1, 1, 2, 0, 0, 2, 1] score = [1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11] qvalues = [1, 0.75] + [0.5] * 7 + [0] * 5 dat = pd.DataFrame({"num_targets": num_targets, "score": score, "qvalues": qvalues}) dat = dat.sample(frac=1).reset_index() return dat
def dataset(): """ Construct a contrived dataset for testing the FDR By design, the dataset has several edge cases: 1) The top score is a decoy (should result in div by 0) 2) At one point decoy-decoy hits outnumber target-decoy hits. Under the Walzthoeni FDR calculation, this would be negative. 3) There are meaningful ties Additionally, the order is randomized. I'm not going to set the seed because it should not matter what order they are in. """ num_targets = [1, 1, 0, 1, 2, 2, 1, 1, 1, 2, 0, 0, 2, 1] score = [1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11] qvalues = [1, 0.75] + [0.5] * 7 + [0] * 5 dat = pd.DataFrame({"num_targets": num_targets, "score": score, "qvalues": qvalues}) dat = dat.sample(frac=1).reset_index() return dat
Python
def forward(self, score, target): """ Calculate the loss using a sigmoid loss function. Parameters ---------- score : torch.FloatTensor A 1D tensor of the scores output from a model. These should not be sigmoided. target : torch.ByteTensor A 1D tensor of indicating the truth. In the case of xenith '1' indicates a target hit and '0' indicates a decoy hit. """ if not score.is_floating_point(): score = score.float() if not target.is_floating_point(): target = target.float() eps = torch.finfo(score.dtype).eps pred = torch.sigmoid(score).clamp(min=eps, max=1 - eps) loss = target * (1 - pred) + (1 - target) * pred return loss.mean()
def forward(self, score, target): """ Calculate the loss using a sigmoid loss function. Parameters ---------- score : torch.FloatTensor A 1D tensor of the scores output from a model. These should not be sigmoided. target : torch.ByteTensor A 1D tensor of indicating the truth. In the case of xenith '1' indicates a target hit and '0' indicates a decoy hit. """ if not score.is_floating_point(): score = score.float() if not target.is_floating_point(): target = target.float() eps = torch.finfo(score.dtype).eps pred = torch.sigmoid(score).clamp(min=eps, max=1 - eps) loss = target * (1 - pred) + (1 - target) * pred return loss.mean()
Python
def forward(self, score, target): """ Calculate the loss using a sigmoid loss function. Parameters ---------- score : torch.FloatTensor A 1D tensor of the scores output from a model. These should not be sigmoided. target : torch.ByteTensor A 1D tensor of indicating the truth. In the case of xenith '1' indicates a target hit and '0' indicates a decoy hit. """ if not score.is_floating_point(): score = score.float() if not target.is_floating_point(): target = target.float() eps = torch.finfo(score.dtype).eps pred = torch.sigmoid(score).clamp(min=eps, max=1 - eps) loss = target * (1 - pred) - (1 - target) * torch.log(1 - pred) return loss.mean()
def forward(self, score, target): """ Calculate the loss using a sigmoid loss function. Parameters ---------- score : torch.FloatTensor A 1D tensor of the scores output from a model. These should not be sigmoided. target : torch.ByteTensor A 1D tensor of indicating the truth. In the case of xenith '1' indicates a target hit and '0' indicates a decoy hit. """ if not score.is_floating_point(): score = score.float() if not target.is_floating_point(): target = target.float() eps = torch.finfo(score.dtype).eps pred = torch.sigmoid(score).clamp(min=eps, max=1 - eps) loss = target * (1 - pred) - (1 - target) * torch.log(1 - pred) return loss.mean()
Python
def main(percolate: bool): """ Creates a xenith tsv and a Percolator PIN file. The data used for this is a very small set of PSMs, so that it is readily stored on GitHub. To generate a PIN file that would successfully complete Percolator, I copied the same PSMs 10 times, and changed the 'Score' column to be drawn from two normal distributions. The Percolator command to use this should be: $ percolator --weights=data/weights.txt -Y --override \ --default direction=Score data/test_large.pin Successful completion of this function will result in the following new files in the `./data` subdirectory: "test.tsv" Input for xenith in the tab-delimited format. "test.pin" A small pin file for Percolator. "test_large.pin" A large pin file that can actually be used with Percolator "weights.txt" The weights output from Percolator. Note this is present only if Percolator is run. Parameters ---------- percolate : bool If `True`, Percolator will be run using `subprocess.run()`. Note that Percolator must be installed and in your path to execute successfully. """ tsv = xenith.convert_kojak(kojak="data/test.kojak.txt", perc_inter="data/test.perc.inter.txt", perc_intra="data/test.perc.intra.txt", out_file="data/test.tsv", to_pin=False) pin = xenith.convert_kojak(kojak="data/test.kojak.txt", perc_inter="data/test.perc.inter.txt", perc_intra="data/test.perc.intra.txt", out_file="data/test.pin", to_pin=True) # Need a larger pin file for Percolator: pin_df = xenith.convert._read_percolator(pin) pin_df = pd.concat([pin_df] * 10, sort=False) targets = pin_df.Label == 1 pos_scores = np.random.normal(0.5, targets.sum()) neg_scores = np.random.normal(0, ((targets-1)**2).sum()) pin_df.Score = pin_df.Score.replace(targets, pos_scores) pin_df.Score = pin_df.Score.replace(~targets, neg_scores) pin_file = "data/test_large.pin" xenith.convert._write_pin(pin_df, pin_file) # Percolator command cmd = ["percolator", "--weights=data/weights.txt", "-Y", "--override", "--default-direction=Score", pin_file] if percolate: proc = subprocess.run(cmd) print(proc)
def main(percolate: bool): """ Creates a xenith tsv and a Percolator PIN file. The data used for this is a very small set of PSMs, so that it is readily stored on GitHub. To generate a PIN file that would successfully complete Percolator, I copied the same PSMs 10 times, and changed the 'Score' column to be drawn from two normal distributions. The Percolator command to use this should be: $ percolator --weights=data/weights.txt -Y --override \ --default direction=Score data/test_large.pin Successful completion of this function will result in the following new files in the `./data` subdirectory: "test.tsv" Input for xenith in the tab-delimited format. "test.pin" A small pin file for Percolator. "test_large.pin" A large pin file that can actually be used with Percolator "weights.txt" The weights output from Percolator. Note this is present only if Percolator is run. Parameters ---------- percolate : bool If `True`, Percolator will be run using `subprocess.run()`. Note that Percolator must be installed and in your path to execute successfully. """ tsv = xenith.convert_kojak(kojak="data/test.kojak.txt", perc_inter="data/test.perc.inter.txt", perc_intra="data/test.perc.intra.txt", out_file="data/test.tsv", to_pin=False) pin = xenith.convert_kojak(kojak="data/test.kojak.txt", perc_inter="data/test.perc.inter.txt", perc_intra="data/test.perc.intra.txt", out_file="data/test.pin", to_pin=True) # Need a larger pin file for Percolator: pin_df = xenith.convert._read_percolator(pin) pin_df = pd.concat([pin_df] * 10, sort=False) targets = pin_df.Label == 1 pos_scores = np.random.normal(0.5, targets.sum()) neg_scores = np.random.normal(0, ((targets-1)**2).sum()) pin_df.Score = pin_df.Score.replace(targets, pos_scores) pin_df.Score = pin_df.Score.replace(~targets, neg_scores) pin_file = "data/test_large.pin" xenith.convert._write_pin(pin_df, pin_file) # Percolator command cmd = ["percolator", "--weights=data/weights.txt", "-Y", "--override", "--default-direction=Score", pin_file] if percolate: proc = subprocess.run(cmd) print(proc)
Python
def contrived_dataset(tmpdir): """Create a simple dataset to test model predictions""" dset = pd.DataFrame({"psmid": [1, 2, 3], "numtarget": [0, 1, 2], "scannr": [1, 2, 3], "peptidea": ["a", "b", "c"], "peptideb": ["d", "e", "f"], "peptidelinksitea": [1, 1, 1], "peptidelinksiteb": [2, 2, 2], "proteinlinksitea": [1, 1, 1], "proteinlinksiteb": [2, 2, 2], "proteina": ["a", "b", "c"], "proteinb": ["a", "b", "c"], "feat_a": [0, 1, 2], "feat_b": [3, 4, 5]}) feat_mean = pd.Series([1, 4], index=["feat_a", "feat_b"]) feat_stdev = pd.Series([np.std([0, 1, 2], ddof=0)]*2, index=["feat_a", "feat_b"]) dset_file = os.path.join(tmpdir, "test.tsv") dset.to_csv(dset_file, sep="\t", index=False) dataset = xenith.load_psms(dset_file) return (dataset, feat_mean, feat_stdev)
def contrived_dataset(tmpdir): """Create a simple dataset to test model predictions""" dset = pd.DataFrame({"psmid": [1, 2, 3], "numtarget": [0, 1, 2], "scannr": [1, 2, 3], "peptidea": ["a", "b", "c"], "peptideb": ["d", "e", "f"], "peptidelinksitea": [1, 1, 1], "peptidelinksiteb": [2, 2, 2], "proteinlinksitea": [1, 1, 1], "proteinlinksiteb": [2, 2, 2], "proteina": ["a", "b", "c"], "proteinb": ["a", "b", "c"], "feat_a": [0, 1, 2], "feat_b": [3, 4, 5]}) feat_mean = pd.Series([1, 4], index=["feat_a", "feat_b"]) feat_stdev = pd.Series([np.std([0, 1, 2], ddof=0)]*2, index=["feat_a", "feat_b"]) dset_file = os.path.join(tmpdir, "test.tsv") dset.to_csv(dset_file, sep="\t", index=False) dataset = xenith.load_psms(dset_file) return (dataset, feat_mean, feat_stdev)
Python
def create_user(self, email, password): """ Creates and saves a User with the given email and password. """ if not email: raise ValueError("Users must have an email address.") if not password: raise ValueError("Users must have a password.") user = self.model( email = self.normalize_email(email) ) user.set_password(password) user.active = True user.basic = False user.premium = False user.staff = False user.admin = False user.save(using=self._db) return user
def create_user(self, email, password): """ Creates and saves a User with the given email and password. """ if not email: raise ValueError("Users must have an email address.") if not password: raise ValueError("Users must have a password.") user = self.model( email = self.normalize_email(email) ) user.set_password(password) user.active = True user.basic = False user.premium = False user.staff = False user.admin = False user.save(using=self._db) return user
Python
def create_basicuser(self, email, password): """ Creates and saves a basic user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = False user.staff = False user.admin = False user.save(using=self._db) return user
def create_basicuser(self, email, password): """ Creates and saves a basic user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = False user.staff = False user.admin = False user.save(using=self._db) return user
Python
def create_premiumuser(self, email, password): """ Creates and saves a premium user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = True user.staff = False user.admin = False user.save(using=self._db) return user
def create_premiumuser(self, email, password): """ Creates and saves a premium user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = True user.staff = False user.admin = False user.save(using=self._db) return user
Python
def create_staffuser(self, email, password): """ Creates and saves a staff user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = True user.staff = True user.admin = False user.save(using=self._db) return user
def create_staffuser(self, email, password): """ Creates and saves a staff user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = True user.staff = True user.admin = False user.save(using=self._db) return user
Python
def create_superuser(self, email, password): """ Creates and saves a super user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = True user.staff = True user.admin = True user.save(using=self._db) return user
def create_superuser(self, email, password): """ Creates and saves a super user with the given email and password. """ user = self.create_user(email, password=password) user.active = True user.basic = True user.premium = True user.staff = True user.admin = True user.save(using=self._db) return user
Python
def backup(self): """ 1. backup using mongodump with gzip format 2. compress the backup folder Returns: filename {string} -- backup file name """ # mongodump compressed print('Making mongodump in gz...') output = subprocess.check_output(['mongodump', '--out=databackup-temp/databackup', '--gzip'], encoding='utf-8') print(output) # compress the backup folder with a file name current_datetime = datetime.now() filename = f'databackup--{current_datetime.strftime("%d-%m-%Y--%H-%M")}.tar.gz' make_tarfile(filename, 'databackup-temp/databackup') return filename
def backup(self): """ 1. backup using mongodump with gzip format 2. compress the backup folder Returns: filename {string} -- backup file name """ # mongodump compressed print('Making mongodump in gz...') output = subprocess.check_output(['mongodump', '--out=databackup-temp/databackup', '--gzip'], encoding='utf-8') print(output) # compress the backup folder with a file name current_datetime = datetime.now() filename = f'databackup--{current_datetime.strftime("%d-%m-%Y--%H-%M")}.tar.gz' make_tarfile(filename, 'databackup-temp/databackup') return filename
Python
def make_tarfile(output_filename, source_dir): """Utility function to make tar gz file of the given source_dir Arguments: output_filename {string} -- filename of the gzip file source_dir {string} -- directory name to be gzipped """ with tarfile.open(os.path.join('databackup-temp', output_filename), "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir))
def make_tarfile(output_filename, source_dir): """Utility function to make tar gz file of the given source_dir Arguments: output_filename {string} -- filename of the gzip file source_dir {string} -- directory name to be gzipped """ with tarfile.open(os.path.join('databackup-temp', output_filename), "w:gz") as tar: tar.add(source_dir, arcname=os.path.basename(source_dir))
Python
def _weigh_object(self, host_state, weight_properties): """Higher weights win. We do not want a host with preemtible instances selected if there are hosts without them. """ count = 0 for instance in host_state.instances.values(): if instance.system_metadata.get("preemptible"): count += 1 return - count
def _weigh_object(self, host_state, weight_properties): """Higher weights win. We do not want a host with preemtible instances selected if there are hosts without them. """ count = 0 for instance in host_state.instances.values(): if instance.system_metadata.get("preemptible"): count += 1 return - count
Python
def _weigh_object(self, host_state, weight_properties): """Higher weights win. We do not want a host with preemtible instances selected if there are hosts without them. """ remainder = 0 for instance in host_state.instances.values(): if instance.system_metadata.get("preemptible"): now = timeutils.utcnow() now = now.replace(tzinfo=None) ct = instance.created_at.replace(tzinfo=None) duration = (now - ct).total_seconds() remainder += duration % 3600 return - remainder
def _weigh_object(self, host_state, weight_properties): """Higher weights win. We do not want a host with preemtible instances selected if there are hosts without them. """ remainder = 0 for instance in host_state.instances.values(): if instance.system_metadata.get("preemptible"): now = timeutils.utcnow() now = now.replace(tzinfo=None) ct = instance.created_at.replace(tzinfo=None) duration = (now - ct).total_seconds() remainder += duration % 3600 return - remainder
Python
def read_table(source, columns=None, nthreads=1, metadata=None): """ Read a Table from Parquet format Parameters ---------- source: str or pyarrow.io.NativeFile Readable source. For passing Python file objects or byte buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader. columns: list If not None, only these columns will be read from the file. nthreads : int, default 1 Number of columns to read in parallel. Requires that the underlying file source is threadsafe metadata : FileMetaData If separately computed Returns ------- pyarrow.Table Content of the file as a table (of columns) """ pf = ParquetFile(source, metadata=metadata) return pf.read(columns=columns, nthreads=nthreads)
def read_table(source, columns=None, nthreads=1, metadata=None): """ Read a Table from Parquet format Parameters ---------- source: str or pyarrow.io.NativeFile Readable source. For passing Python file objects or byte buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader. columns: list If not None, only these columns will be read from the file. nthreads : int, default 1 Number of columns to read in parallel. Requires that the underlying file source is threadsafe metadata : FileMetaData If separately computed Returns ------- pyarrow.Table Content of the file as a table (of columns) """ pf = ParquetFile(source, metadata=metadata) return pf.read(columns=columns, nthreads=nthreads)
Python
def best_fit_transform_point2point(A, B): """ Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions Input: A: Nxm numpy array of corresponding points B: Nxm numpy array of corresponding points Returns: T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B R: mxm rotation matrix t: mx1 translation vector """ assert A.shape == B.shape # get number of dimensions m = A.shape[1] # translate points to their centroids centroid_A = np.mean(A, axis=0) centroid_B = np.mean(B, axis=0) AA = A - centroid_A BB = B - centroid_B # rotation matrix H = np.dot(AA.T, BB) U, S, Vt = np.linalg.svd(H) R = np.dot(Vt.T, U.T) # special reflection case if np.linalg.det(R) < 0: Vt[m-1, :] *= -1 R = np.dot(Vt.T, U.T) # translation t = centroid_B.T - np.dot(R, centroid_A.T) # homogeneous transformation T = np.identity(m + 1) T[:m, :m] = R T[:m, m] = t return T, R, t
def best_fit_transform_point2point(A, B): """ Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions Input: A: Nxm numpy array of corresponding points B: Nxm numpy array of corresponding points Returns: T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B R: mxm rotation matrix t: mx1 translation vector """ assert A.shape == B.shape # get number of dimensions m = A.shape[1] # translate points to their centroids centroid_A = np.mean(A, axis=0) centroid_B = np.mean(B, axis=0) AA = A - centroid_A BB = B - centroid_B # rotation matrix H = np.dot(AA.T, BB) U, S, Vt = np.linalg.svd(H) R = np.dot(Vt.T, U.T) # special reflection case if np.linalg.det(R) < 0: Vt[m-1, :] *= -1 R = np.dot(Vt.T, U.T) # translation t = centroid_B.T - np.dot(R, centroid_A.T) # homogeneous transformation T = np.identity(m + 1) T[:m, :m] = R T[:m, m] = t return T, R, t
Python
def toO3d(tm, color): """put trimesh object into open3d object""" mesh_o3d = o3d.geometry.TriangleMesh() mesh_o3d.vertices = o3d.utility.Vector3dVector(tm.vertices) mesh_o3d.triangles = o3d.utility.Vector3iVector(tm.faces) mesh_o3d.compute_vertex_normals() vex_color_rgb = np.array(color) vex_color_rgb = np.tile(vex_color_rgb, (tm.vertices.shape[0], 1)) mesh_o3d.vertex_colors = o3d.utility.Vector3dVector(vex_color_rgb) return mesh_o3d
def toO3d(tm, color): """put trimesh object into open3d object""" mesh_o3d = o3d.geometry.TriangleMesh() mesh_o3d.vertices = o3d.utility.Vector3dVector(tm.vertices) mesh_o3d.triangles = o3d.utility.Vector3iVector(tm.faces) mesh_o3d.compute_vertex_normals() vex_color_rgb = np.array(color) vex_color_rgb = np.tile(vex_color_rgb, (tm.vertices.shape[0], 1)) mesh_o3d.vertex_colors = o3d.utility.Vector3dVector(vex_color_rgb) return mesh_o3d
Python
def build_version(): """Build dynamic version and update version in package""" version = f"{MAJOR_VERSION}.{MINOR_VERSION}.{PATCH_VERSION}" update_package_version(HERE / "pkg" / "pydantic_mongo", version=version) return version
def build_version(): """Build dynamic version and update version in package""" version = f"{MAJOR_VERSION}.{MINOR_VERSION}.{PATCH_VERSION}" update_package_version(HERE / "pkg" / "pydantic_mongo", version=version) return version
Python
def sigmoid(in_x): """ sigmoid: a bounded differentiable real function that is defined for all real input values and has a positive derivative at each point """ return 1.0 / (1 + np.exp(-in_x))
def sigmoid(in_x): """ sigmoid: a bounded differentiable real function that is defined for all real input values and has a positive derivative at each point """ return 1.0 / (1 + np.exp(-in_x))
Python
def gradient_ascent(data_matrix_in, class_labels): """ Start with the weights all set to 1 repeat R number of times: calculate the gradient of the entire set update the weights by alpha * gradient return the weights vector """ data_matrix = np.mat(data_matrix_in) label_matrix = np.mat(class_labels).transpose() m, n = np.shape(data_matrix) alpha = 0.001 max_cycles = 500 weights = np.ones((n, 1)) for k in range(max_cycles): h = sigmoid(data_matrix * weights) error = (label_matrix - h) weights = weights + alpha * data_matrix.transpose() * error return weights
def gradient_ascent(data_matrix_in, class_labels): """ Start with the weights all set to 1 repeat R number of times: calculate the gradient of the entire set update the weights by alpha * gradient return the weights vector """ data_matrix = np.mat(data_matrix_in) label_matrix = np.mat(class_labels).transpose() m, n = np.shape(data_matrix) alpha = 0.001 max_cycles = 500 weights = np.ones((n, 1)) for k in range(max_cycles): h = sigmoid(data_matrix * weights) error = (label_matrix - h) weights = weights + alpha * data_matrix.transpose() * error return weights
Python
def stochastic_gradient_ascent(data_matrix, class_labels): """ Start with all the weights set to 1 for each piece of data in the dataset: calculate the gradient of one piece of data update the weights vector by alpha * gradient return the weights vector """ m, n = np.shape(data_matrix) alpha = 0.01 weights = np.ones(n) for i in range(m): h = sigmoid(sum(data_matrix[i] * weights)) error = class_labels[i] - h weights = weights + alpha * error * data_matrix[i] return weights
def stochastic_gradient_ascent(data_matrix, class_labels): """ Start with all the weights set to 1 for each piece of data in the dataset: calculate the gradient of one piece of data update the weights vector by alpha * gradient return the weights vector """ m, n = np.shape(data_matrix) alpha = 0.01 weights = np.ones(n) for i in range(m): h = sigmoid(sum(data_matrix[i] * weights)) error = class_labels[i] - h weights = weights + alpha * error * data_matrix[i] return weights
Python
def modified_stochastic_gradient_ascent(data_matrix, class_labels, iterations=150): """ The alpha changes with each iteration. Note that update vectors are randomly selected. """ m, n = np.shape(data_matrix) weights = np.ones(n) _CONSTANT = 0.0001 for j in range(iterations): data_index = range(m) for i in range(m): # the alpha decreases with iteration, # but never reaches zero because of the constant alpha = 4 / (1.0 + i + j) + _CONSTANT # update vectors chosen randomly rand_index = int(np.random.uniform(0, len(data_index))) h = sigmoid(sum(data_matrix[rand_index] * weights)) error = class_labels[rand_index] - h weights = weights + alpha * error * data_matrix[rand_index] del(data_index[rand_index]) return weights
def modified_stochastic_gradient_ascent(data_matrix, class_labels, iterations=150): """ The alpha changes with each iteration. Note that update vectors are randomly selected. """ m, n = np.shape(data_matrix) weights = np.ones(n) _CONSTANT = 0.0001 for j in range(iterations): data_index = range(m) for i in range(m): # the alpha decreases with iteration, # but never reaches zero because of the constant alpha = 4 / (1.0 + i + j) + _CONSTANT # update vectors chosen randomly rand_index = int(np.random.uniform(0, len(data_index))) h = sigmoid(sum(data_matrix[rand_index] * weights)) error = class_labels[rand_index] - h weights = weights + alpha * error * data_matrix[rand_index] del(data_index[rand_index]) return weights
Python
def classify_vector(in_x, weights): """ takes weights and an input vector and calculates the sigmoid; more than 0.5 are 1, otherwise zero """ probability = sigmoid(sum(in_x * weights)) if probability > 0.5: return 1.0 else: return 0.0
def classify_vector(in_x, weights): """ takes weights and an input vector and calculates the sigmoid; more than 0.5 are 1, otherwise zero """ probability = sigmoid(sum(in_x * weights)) if probability > 0.5: return 1.0 else: return 0.0
Python
def standard_regression(x_arr, y_arr): """ compute the best fit line. first compute X.T * X and test if its determinate is zero. If so, you cannot get the inverse. If not, compute the w values and return """ x_matrix = np.mat(x_arr) y_matrix = np.mat(y_arr).T xTx = x_matrix.T * x_matrix if np.linalg.det(xTx) == 0.0: logging.warning("Matrix is singular, cannot do inverse") return ws = xTx.I * (x_matrix.T * y_matrix) return ws
def standard_regression(x_arr, y_arr): """ compute the best fit line. first compute X.T * X and test if its determinate is zero. If so, you cannot get the inverse. If not, compute the w values and return """ x_matrix = np.mat(x_arr) y_matrix = np.mat(y_arr).T xTx = x_matrix.T * x_matrix if np.linalg.det(xTx) == 0.0: logging.warning("Matrix is singular, cannot do inverse") return ws = xTx.I * (x_matrix.T * y_matrix) return ws
Python
def rss_error(y_arr, y_hat_arr): """ returns a single number describing the error of our estimate """ return ((y_arr - y_hat_arr) ** 2).sum()
def rss_error(y_arr, y_hat_arr): """ returns a single number describing the error of our estimate """ return ((y_arr - y_hat_arr) ** 2).sum()
Python
def ridge_regression(x_matrix, y_matrix, lamb=0.2): """ Ridge regression adds an additional matrix lambda-if-I to the matrix X^tX. The matrix I is a mxm identity matrix where there are 1s in the diagonal elements and zeros everywhere else. """ x_t_x = x_matrix.T * x_matrix # eye creates the identity matrix denominator = x_t_x * np.eye(np.shape(x_matrix)[1]) * lamb if np.linalg.det(denominator) == 0.0: logging.warning("The matrix is singular, cannot do inverse") return ws = denominator.I * (x_matrix.T * y_matrix) return ws
def ridge_regression(x_matrix, y_matrix, lamb=0.2): """ Ridge regression adds an additional matrix lambda-if-I to the matrix X^tX. The matrix I is a mxm identity matrix where there are 1s in the diagonal elements and zeros everywhere else. """ x_t_x = x_matrix.T * x_matrix # eye creates the identity matrix denominator = x_t_x * np.eye(np.shape(x_matrix)[1]) * lamb if np.linalg.det(denominator) == 0.0: logging.warning("The matrix is singular, cannot do inverse") return ws = denominator.I * (x_matrix.T * y_matrix) return ws
Python
def run_ridge_regression(x_arr, y_arr): """ run ridge regression over a number of lambda values; remember that you need to normalize the date to give each feature equal importance regardless of the units in which it was measured. In this case, we normalize by subtracting the mean from each feature and dividing by the variance. """ x_matrix = np.mat(x_arr) y_matrix = np.mat(y_arr).T y_mean = np.mean(y_matrix, 0) y_matrix = y_matrix - y_mean x_means = np.mean(x_matrix, 0) x_variance = np.var(x_matrix, 0) x_matrix = (x_matrix - x_means) / x_variance number_test_pts = 30 w_matrix = np.zeros((number_test_pts, np.shape(x_matrix)[1])) for i in range(number_test_pts): ws = ridge_regression(x_matrix, y_matrix, np.exp(i - 10)) w_matrix[i, :] = ws.T return w_matrix
def run_ridge_regression(x_arr, y_arr): """ run ridge regression over a number of lambda values; remember that you need to normalize the date to give each feature equal importance regardless of the units in which it was measured. In this case, we normalize by subtracting the mean from each feature and dividing by the variance. """ x_matrix = np.mat(x_arr) y_matrix = np.mat(y_arr).T y_mean = np.mean(y_matrix, 0) y_matrix = y_matrix - y_mean x_means = np.mean(x_matrix, 0) x_variance = np.var(x_matrix, 0) x_matrix = (x_matrix - x_means) / x_variance number_test_pts = 30 w_matrix = np.zeros((number_test_pts, np.shape(x_matrix)[1])) for i in range(number_test_pts): ws = ridge_regression(x_matrix, y_matrix, np.exp(i - 10)) w_matrix[i, :] = ws.T return w_matrix
Python
def stage_wise_linear_regression(x_arr, y_arr, eps=0.01, number_iterations=100): """ Greedily optimizes by looping all the possible features to see how the error changes if you increase or decrease that feature. """ x_matrix = np.mat(x_arr) y_matrix = np.mat(y_arr).T y_mean = np.mean(y_matrix, 0) y_matrix = y_matrix - y_mean x_matrix = regularize(x_matrix) m, n = np.shape(x_matrix) return_mat = np.zeros((number_iterations, n)) ws = np.zeros((n, 1)) ws_test = ws.copy() ws_max = ws.copy() for i in range(number_iterations): logging.info("ws.T = {0}".format(ws.T)) lowest_error = np.inf for j in range(n): for sign in [-1, 1]: ws_test = ws.copy() ws_test[j] += eps * sign y_test = x_matrix * ws_test rss_e = rss_error(y_matrix.A, y_test.A) if rss_e < lowest_error: lowest_error = rss_e ws_max = ws_test ws = ws_max.copy() return_mat[i, :] = ws.T return return_mat
def stage_wise_linear_regression(x_arr, y_arr, eps=0.01, number_iterations=100): """ Greedily optimizes by looping all the possible features to see how the error changes if you increase or decrease that feature. """ x_matrix = np.mat(x_arr) y_matrix = np.mat(y_arr).T y_mean = np.mean(y_matrix, 0) y_matrix = y_matrix - y_mean x_matrix = regularize(x_matrix) m, n = np.shape(x_matrix) return_mat = np.zeros((number_iterations, n)) ws = np.zeros((n, 1)) ws_test = ws.copy() ws_max = ws.copy() for i in range(number_iterations): logging.info("ws.T = {0}".format(ws.T)) lowest_error = np.inf for j in range(n): for sign in [-1, 1]: ws_test = ws.copy() ws_test[j] += eps * sign y_test = x_matrix * ws_test rss_e = rss_error(y_matrix.A, y_test.A) if rss_e < lowest_error: lowest_error = rss_e ws_max = ws_test ws = ws_max.copy() return_mat[i, :] = ws.T return return_mat
Python
def k_means(dataset, k, distance_measure=euclidean_distance, create_centroids=random_centroid): """ Create k centroids, then assign each point to the closest centroid. Then re-calculate the centroids. Repeat until the points stop changing clusters. """ m = np.shape(dataset)[0] cluster_assessment = np.mat(np.zeros((m, 2))) centroids = create_centroids(dataset, k) cluster_changed = True while cluster_changed: cluster_changed = False for i in range(m): min_distance = np.inf min_index = -1 for j in range(k): dist_j_i = distance_measure(centroids[j, :], dataset[i, :]) if dist_j_i < min_distance: min_distance = dist_j_i min_index = j if cluster_assessment[i, 0] != min_index: cluster_changed = True cluster_assessment[i, :] = min_index, min_distance ** 2 logging.info("centroids: {}".format(centroids)) for cent in range(k): points_in_cluster = dataset[np.nonzero(cluster_assessment[:, 0]. A == cent)[0]] centroids[cent, :] = np.mean(points_in_cluster, axis=0) return centroids, cluster_assessment
def k_means(dataset, k, distance_measure=euclidean_distance, create_centroids=random_centroid): """ Create k centroids, then assign each point to the closest centroid. Then re-calculate the centroids. Repeat until the points stop changing clusters. """ m = np.shape(dataset)[0] cluster_assessment = np.mat(np.zeros((m, 2))) centroids = create_centroids(dataset, k) cluster_changed = True while cluster_changed: cluster_changed = False for i in range(m): min_distance = np.inf min_index = -1 for j in range(k): dist_j_i = distance_measure(centroids[j, :], dataset[i, :]) if dist_j_i < min_distance: min_distance = dist_j_i min_index = j if cluster_assessment[i, 0] != min_index: cluster_changed = True cluster_assessment[i, :] = min_index, min_distance ** 2 logging.info("centroids: {}".format(centroids)) for cent in range(k): points_in_cluster = dataset[np.nonzero(cluster_assessment[:, 0]. A == cent)[0]] centroids[cent, :] = np.mean(points_in_cluster, axis=0) return centroids, cluster_assessment
Python
def bisect_k_means(dataset, k, distance_measure=euclidean_distance): """ Choose the cluster with the largest SSE and split it. Then repeat until you get the k number of clusters. For every cluster, you measure the total error, perform k-means with k=2, measure the error after k-means has split the cluster in two, choose to keep the cluster split that gives the lowest error """ m = np.shape(dataset)[0] cluster_assessment = np.mat(np.zeros((m, 2))) centroid_0 = np.mean(dataset, axis=0).tolist()[0] centroid_list = [centroid_0] # set the original error for j in range(m): cluster_assessment[j, 1] = distance_measure(np.mat(centroid_0), dataset[j, :]) ** 2 while (len(centroid_list) < k): lowest_sse = np.inf for i in range(len(centroid_list)): points_in_current_cluster =\ dataset[np.nonzero(cluster_assessment[:, 0].A == i)[0], :] centroid_matrix, split_cluster_assessment =\ k_means(points_in_current_cluster, 2, distance_measure) # compare the SSE to the current minimum sse_split = sum(split_cluster_assessment[:, 1]) nosplit_index = np.nonzero(cluster_assessment[:, 0].A != i) sse_not_split =\ sum(cluster_assessment[nosplit_index[0], 1]) logging.info("sse split:\n{split}".format(split=sse_split)) logging.info("sse NOT split:\n{not_split}". format(not_split=sse_not_split)) if (sse_split + sse_not_split) < lowest_sse: best_centroid_to_split = i best_new_centroids = centroid_matrix best_cluster_assessmnt = split_cluster_assessment.copy() lowest_sse = sse_split + sse_not_split best_cluster_index = np.nonzero(best_cluster_assessmnt[:, 0].A == 1)[0] best_cluster_assessmnt[best_cluster_index, 0] = len(centroid_list) best_cluster_index = np.nonzero(best_cluster_assessmnt[:, 0].A == 0)[0] best_cluster_assessmnt[best_cluster_index, 0] = best_centroid_to_split logging.info("the best centroid on which to split = {best}". format(best=best_centroid_to_split)) logging.info("the length of best_cluster_assessmnt = {cluster_len}". format(cluster_len=len(best_cluster_assessmnt))) centroid_list[best_centroid_to_split] =\ best_new_centroids[0, :].tolist()[0] centroid_list.append(best_new_centroids[1, :]) asssigning_cluster_index =\ np.nonzero(cluster_assessment[:, 0].A == best_centroid_to_split)[0] cluster_assessment[asssigning_cluster_index, :] =\ best_cluster_assessmnt return np.mat(centroid_list), cluster_assessment
def bisect_k_means(dataset, k, distance_measure=euclidean_distance): """ Choose the cluster with the largest SSE and split it. Then repeat until you get the k number of clusters. For every cluster, you measure the total error, perform k-means with k=2, measure the error after k-means has split the cluster in two, choose to keep the cluster split that gives the lowest error """ m = np.shape(dataset)[0] cluster_assessment = np.mat(np.zeros((m, 2))) centroid_0 = np.mean(dataset, axis=0).tolist()[0] centroid_list = [centroid_0] # set the original error for j in range(m): cluster_assessment[j, 1] = distance_measure(np.mat(centroid_0), dataset[j, :]) ** 2 while (len(centroid_list) < k): lowest_sse = np.inf for i in range(len(centroid_list)): points_in_current_cluster =\ dataset[np.nonzero(cluster_assessment[:, 0].A == i)[0], :] centroid_matrix, split_cluster_assessment =\ k_means(points_in_current_cluster, 2, distance_measure) # compare the SSE to the current minimum sse_split = sum(split_cluster_assessment[:, 1]) nosplit_index = np.nonzero(cluster_assessment[:, 0].A != i) sse_not_split =\ sum(cluster_assessment[nosplit_index[0], 1]) logging.info("sse split:\n{split}".format(split=sse_split)) logging.info("sse NOT split:\n{not_split}". format(not_split=sse_not_split)) if (sse_split + sse_not_split) < lowest_sse: best_centroid_to_split = i best_new_centroids = centroid_matrix best_cluster_assessmnt = split_cluster_assessment.copy() lowest_sse = sse_split + sse_not_split best_cluster_index = np.nonzero(best_cluster_assessmnt[:, 0].A == 1)[0] best_cluster_assessmnt[best_cluster_index, 0] = len(centroid_list) best_cluster_index = np.nonzero(best_cluster_assessmnt[:, 0].A == 0)[0] best_cluster_assessmnt[best_cluster_index, 0] = best_centroid_to_split logging.info("the best centroid on which to split = {best}". format(best=best_centroid_to_split)) logging.info("the length of best_cluster_assessmnt = {cluster_len}". format(cluster_len=len(best_cluster_assessmnt))) centroid_list[best_centroid_to_split] =\ best_new_centroids[0, :].tolist()[0] centroid_list.append(best_new_centroids[1, :]) asssigning_cluster_index =\ np.nonzero(cluster_assessment[:, 0].A == best_centroid_to_split)[0] cluster_assessment[asssigning_cluster_index, :] =\ best_cluster_assessmnt return np.mat(centroid_list), cluster_assessment
Python
def local_words(feed_1, feed_0): """ Parse two RSS feeds; remove the most frequently ocurring words. """ doc_list = [] class_list = [] full_text = [] min_len = min(len(feed_1['entries']), len(feed_0['entries'])) for i in range(min_len): word_list = core.text_parse(feed_1['entries'][i]['summary']) doc_list.append(word_list) full_text.extend(word_list) class_list.append(1) word_list = core.text_parse(feed_0['entries'][i]['summary']) doc_list.append(word_list) full_text.extend(word_list) class_list.append(0) vocabulary = core.create_vocabulary(doc_list) vocabulary = filter_stopwords(vocabulary, stopwords_file()) # filter out stopwords stopwords = core.get_stopwords(stopwords_file()) vocabulary = [token for token in vocabulary if not core.is_stopword(token, stopwords)] top_thirty_words = core.calculate_most_frequent(vocabulary, full_text) for pair_w in top_thirty_words: if pair_w[0] in vocabulary: vocabulary.remove(pair_w[0]) training_set = range(2*min_len) test_set = [] for i in range(20): random_i = int(random.uniform(0, len(training_set))) test_set.append(training_set[random_i]) del(training_set[random_i]) training_matrix = [] train_classes = [] for doc_index in training_set: word_vector = core.bag_of_words_to_vector(vocabulary, doc_list[doc_index]) training_matrix.append(word_vector) train_classes.append(class_list[doc_index]) p_0_v, p_1_v, p_spam = core.train_naive_bayes0(array(training_matrix), array(train_classes)) error_count = 0 for doc_index in test_set: word_vector = core.bag_of_words_to_vector(vocabulary, doc_list[doc_index]) classification = core.classify_naive_bayes(array(word_vector), p_0_v, p_1_v, p_spam) if classification != class_list[doc_index]: error_count += 1 error_rate = float(error_count)/len(test_set) logging.info("errors: {0}\terror rate: {1}".format(error_count, error_rate)) return vocabulary, p_0_v, p_1_v
def local_words(feed_1, feed_0): """ Parse two RSS feeds; remove the most frequently ocurring words. """ doc_list = [] class_list = [] full_text = [] min_len = min(len(feed_1['entries']), len(feed_0['entries'])) for i in range(min_len): word_list = core.text_parse(feed_1['entries'][i]['summary']) doc_list.append(word_list) full_text.extend(word_list) class_list.append(1) word_list = core.text_parse(feed_0['entries'][i]['summary']) doc_list.append(word_list) full_text.extend(word_list) class_list.append(0) vocabulary = core.create_vocabulary(doc_list) vocabulary = filter_stopwords(vocabulary, stopwords_file()) # filter out stopwords stopwords = core.get_stopwords(stopwords_file()) vocabulary = [token for token in vocabulary if not core.is_stopword(token, stopwords)] top_thirty_words = core.calculate_most_frequent(vocabulary, full_text) for pair_w in top_thirty_words: if pair_w[0] in vocabulary: vocabulary.remove(pair_w[0]) training_set = range(2*min_len) test_set = [] for i in range(20): random_i = int(random.uniform(0, len(training_set))) test_set.append(training_set[random_i]) del(training_set[random_i]) training_matrix = [] train_classes = [] for doc_index in training_set: word_vector = core.bag_of_words_to_vector(vocabulary, doc_list[doc_index]) training_matrix.append(word_vector) train_classes.append(class_list[doc_index]) p_0_v, p_1_v, p_spam = core.train_naive_bayes0(array(training_matrix), array(train_classes)) error_count = 0 for doc_index in test_set: word_vector = core.bag_of_words_to_vector(vocabulary, doc_list[doc_index]) classification = core.classify_naive_bayes(array(word_vector), p_0_v, p_1_v, p_spam) if classification != class_list[doc_index]: error_count += 1 error_rate = float(error_count)/len(test_set) logging.info("errors: {0}\terror rate: {1}".format(error_count, error_rate)) return vocabulary, p_0_v, p_1_v
Python
def split_data_set(data_set, axis, value): """ Takes: dataset to split, the feature to split on, and the value of the feature to return. and cut out the feature to split on """ ret_data_set = [] for feature_vec in data_set: if feature_vec[axis] == value: reduced_feature_vec = feature_vec[:axis] reduced_feature_vec.extend(feature_vec[axis + 1:]) logging.info("reduced_feature_vec:\t{0}". format(reduced_feature_vec)) ret_data_set.append(reduced_feature_vec) return ret_data_set
def split_data_set(data_set, axis, value): """ Takes: dataset to split, the feature to split on, and the value of the feature to return. and cut out the feature to split on """ ret_data_set = [] for feature_vec in data_set: if feature_vec[axis] == value: reduced_feature_vec = feature_vec[:axis] reduced_feature_vec.extend(feature_vec[axis + 1:]) logging.info("reduced_feature_vec:\t{0}". format(reduced_feature_vec)) ret_data_set.append(reduced_feature_vec) return ret_data_set
Python
def majority_count(class_list): """ Take a list of class names; return the one with the greatest frequency """ class_count = Counter(class_list) return class_count.most_common()[0][0]
def majority_count(class_list): """ Take a list of class names; return the one with the greatest frequency """ class_count = Counter(class_list) return class_count.most_common()[0][0]
Python
def calculate_shannon_entropy(data_set): """ calculate a cont of the number of instances; create a dict whose keys are the values in the final col; if a key was not encountered previously, one is created; for each key, keep track of how many times the label occurs; finally use the frequency of all the different labels to calculate the probablility of that label; then sum this up for all the labels """ num_entries = len(data_set) label_counts = defaultdict(int) for feature_vec in data_set: current_label = feature_vec[-1] label_counts[current_label] += 1 shannon_entropy = 0.0 for key in label_counts: prob = float(label_counts[key]) / num_entries shannon_entropy -= prob * math.log(prob, 2) return shannon_entropy
def calculate_shannon_entropy(data_set): """ calculate a cont of the number of instances; create a dict whose keys are the values in the final col; if a key was not encountered previously, one is created; for each key, keep track of how many times the label occurs; finally use the frequency of all the different labels to calculate the probablility of that label; then sum this up for all the labels """ num_entries = len(data_set) label_counts = defaultdict(int) for feature_vec in data_set: current_label = feature_vec[-1] label_counts[current_label] += 1 shannon_entropy = 0.0 for key in label_counts: prob = float(label_counts[key]) / num_entries shannon_entropy -= prob * math.log(prob, 2) return shannon_entropy
Python
def text_parse(big_string): """ This could be generalized further. """ list_of_tokens = re.split(r'\W*', big_string) return [tok.lower() for tok in list_of_tokens if len(tok) > 2]
def text_parse(big_string): """ This could be generalized further. """ list_of_tokens = re.split(r'\W*', big_string) return [tok.lower() for tok in list_of_tokens if len(tok) > 2]
Python
def classify_0(in_x, data_set, labels, k): """For every point in our dataset calculate the distance between inX and the current point; sort the distances in increasing order; take k items with lowest distances to inX; find the majority class among these items; return the majority class as our prediction for the class of inX""" data_set_size = data_set.shape[0] # distance diff_mat = tile(in_x, (data_set_size, 1)) - data_set sq_diff_mat = diff_mat ** 2 sq_distances = sq_diff_mat.sum(axis=1) distances = sq_distances ** 0.5 # argsort pulls indices corresponding # to sorted array sorted_dist_indices = distances.argsort() # voting with lowest k indices class_count = {} for i in range(k): vote_i_label = labels[sorted_dist_indices[i]] class_count[vote_i_label] = class_count.get(vote_i_label, 0) + 1 sorted_class_count = sorted(class_count.iteritems(), key=operator.itemgetter(1), reverse=True) # the most frequent return sorted_class_count[0][0]
def classify_0(in_x, data_set, labels, k): """For every point in our dataset calculate the distance between inX and the current point; sort the distances in increasing order; take k items with lowest distances to inX; find the majority class among these items; return the majority class as our prediction for the class of inX""" data_set_size = data_set.shape[0] # distance diff_mat = tile(in_x, (data_set_size, 1)) - data_set sq_diff_mat = diff_mat ** 2 sq_distances = sq_diff_mat.sum(axis=1) distances = sq_distances ** 0.5 # argsort pulls indices corresponding # to sorted array sorted_dist_indices = distances.argsort() # voting with lowest k indices class_count = {} for i in range(k): vote_i_label = labels[sorted_dist_indices[i]] class_count[vote_i_label] = class_count.get(vote_i_label, 0) + 1 sorted_class_count = sorted(class_count.iteritems(), key=operator.itemgetter(1), reverse=True) # the most frequent return sorted_class_count[0][0]
Python
def auto_norm(data_set): """ Get the minimum values of each column and place in min_vals. max_vals, too. data_set.min(0) allows you to take the minimums from the columns, not the rows. Then calculate the range of possible values seen in our data. To get the normalized values, you subtract the minimum values and then divide by the range.""" min_vals = data_set.min(0) max_vals = data_set.max(0) ranges = max_vals - min_vals norm_data_set = zeros(shape(data_set)) m = data_set.shape[0] norm_data_set = data_set - tile(min_vals, (m, 1)) norm_data_set = norm_data_set / tile(ranges, (m, 1)) return norm_data_set, ranges, min_vals
def auto_norm(data_set): """ Get the minimum values of each column and place in min_vals. max_vals, too. data_set.min(0) allows you to take the minimums from the columns, not the rows. Then calculate the range of possible values seen in our data. To get the normalized values, you subtract the minimum values and then divide by the range.""" min_vals = data_set.min(0) max_vals = data_set.max(0) ranges = max_vals - min_vals norm_data_set = zeros(shape(data_set)) m = data_set.shape[0] norm_data_set = data_set - tile(min_vals, (m, 1)) norm_data_set = norm_data_set / tile(ranges, (m, 1)) return norm_data_set, ranges, min_vals
Python
def train_naive_bayes0(training_matrix, training_category): """ * training_matrix: array of array of strings * training_category: array of 0 or 1; corresponding to the "class" of each array of strings in training_matrix """ training_element_count = len(training_matrix) word_count = len(training_matrix[0]) #initialize probablilities; 0 or 1 prob_1 = sum(training_category) / float(training_element_count) p0_num = ones(word_count) p1_num = ones(word_count) p0_denom = 2.0 p1_denom = 2.0 for i in range(training_element_count): if training_category[i] == 1: p1_num += training_matrix[i] p1_denom += sum(training_matrix[i]) else: p0_num += training_matrix[i] p0_denom += sum(training_matrix[i]) # change to log() to help with underflow p1_vector = log(p1_num / p1_denom) p0_vector = log(p0_num / p0_denom) return p0_vector, p1_vector, prob_1
def train_naive_bayes0(training_matrix, training_category): """ * training_matrix: array of array of strings * training_category: array of 0 or 1; corresponding to the "class" of each array of strings in training_matrix """ training_element_count = len(training_matrix) word_count = len(training_matrix[0]) #initialize probablilities; 0 or 1 prob_1 = sum(training_category) / float(training_element_count) p0_num = ones(word_count) p1_num = ones(word_count) p0_denom = 2.0 p1_denom = 2.0 for i in range(training_element_count): if training_category[i] == 1: p1_num += training_matrix[i] p1_denom += sum(training_matrix[i]) else: p0_num += training_matrix[i] p0_denom += sum(training_matrix[i]) # change to log() to help with underflow p1_vector = log(p1_num / p1_denom) p0_vector = log(p0_num / p0_denom) return p0_vector, p1_vector, prob_1
Python
def apriori_generate(Lk, k): """ Takes a list of frequent itemsets, Lk and the size of the sets, to produce candidate itemsets. """ return_list = [] len_Lk = len(Lk) for i in range(len_Lk): for j in range(i + 1, len_Lk): L1 = list(Lk[i])[:k - 2] L2 = list(Lk[j])[:k - 2] L1.sort() L2.sort() if L1 == L2: return_list.append(Lk[i] | Lk[j]) # set union return return_list
def apriori_generate(Lk, k): """ Takes a list of frequent itemsets, Lk and the size of the sets, to produce candidate itemsets. """ return_list = [] len_Lk = len(Lk) for i in range(len_Lk): for j in range(i + 1, len_Lk): L1 = list(Lk[i])[:k - 2] L2 = list(Lk[j])[:k - 2] L1.sort() L2.sort() if L1 == L2: return_list.append(Lk[i] | Lk[j]) # set union return return_list
Python
def create_tree(dataset, minimum_support=1): """ Makes two passes. First pass counts the frequency of each term. These are stored in the header table. Then prunes all elements whose count is less than the minimum support. Then the header table is expanded to hld a count and a pointer to the first item of each type. Then it creates the base node, which contains the null nset. Then you iterate over the dataset again, using only the frequents, you sort them, and then update_tree is called. """ header_table = initialize_header(dataset) prune_infrequents(header_table, minimum_support) frequent_item_set = set(header_table.keys()) if len(frequent_item_set) == 0: return None, None for k in header_table: header_table[k] = [header_table[k], None] return_tree = TreeNode('Null Set', 1, None) for transaction_set, count in dataset.items(): local_d = {} for item in transaction_set: if item in frequent_item_set: local_d[item] = header_table[item][0] if len(local_d) > 0: update_tree(order_items(local_d), return_tree, header_table, count) return return_tree, header_table
def create_tree(dataset, minimum_support=1): """ Makes two passes. First pass counts the frequency of each term. These are stored in the header table. Then prunes all elements whose count is less than the minimum support. Then the header table is expanded to hld a count and a pointer to the first item of each type. Then it creates the base node, which contains the null nset. Then you iterate over the dataset again, using only the frequents, you sort them, and then update_tree is called. """ header_table = initialize_header(dataset) prune_infrequents(header_table, minimum_support) frequent_item_set = set(header_table.keys()) if len(frequent_item_set) == 0: return None, None for k in header_table: header_table[k] = [header_table[k], None] return_tree = TreeNode('Null Set', 1, None) for transaction_set, count in dataset.items(): local_d = {} for item in transaction_set: if item in frequent_item_set: local_d[item] = header_table[item][0] if len(local_d) > 0: update_tree(order_items(local_d), return_tree, header_table, count) return return_tree, header_table
Python
def update_tree(items, input_tree, header_table, count): """ First tests if th first item exists as a child node. If so, it updates the count of that item. If not, it creates a new TreeNode and adds it as a child. Header table s also updated to point to a new node. Then it calls itself recursively with the cdr of the list. """ if items[0] in input_tree.children: input_tree.children[items[0]].inc(count) else: input_tree.children[items[0]] = TreeNode(items[0], count, input_tree) if header_table[items[0]][1] is None: header_table[items[0]][1] = input_tree.children[items[0]] else: update_header(header_table[items[0]][1], input_tree.children[items[0]]) # recursively call update_tree on remaining items if len(items) > 1: #logging.info("recursively callling update_tree on {i}". #format(i=items)) update_tree(items[1::], input_tree.children[items[0]], header_table, count)
def update_tree(items, input_tree, header_table, count): """ First tests if th first item exists as a child node. If so, it updates the count of that item. If not, it creates a new TreeNode and adds it as a child. Header table s also updated to point to a new node. Then it calls itself recursively with the cdr of the list. """ if items[0] in input_tree.children: input_tree.children[items[0]].inc(count) else: input_tree.children[items[0]] = TreeNode(items[0], count, input_tree) if header_table[items[0]][1] is None: header_table[items[0]][1] = input_tree.children[items[0]] else: update_header(header_table[items[0]][1], input_tree.children[items[0]]) # recursively call update_tree on remaining items if len(items) > 1: #logging.info("recursively callling update_tree on {i}". #format(i=items)) update_tree(items[1::], input_tree.children[items[0]], header_table, count)
Python
def find_prefix_path(tree_node): """ Generate a conditional pattern base given a single item. Visit every node in the tree that contains that item """ conditional_patterns = {} while tree_node is not None: prefix_path = [] ascend_tree(tree_node, prefix_path) if len(prefix_path) > 1: conditional_patterns[frozenset(prefix_path[1:])] = tree_node.count tree_node = tree_node.node_link return conditional_patterns
def find_prefix_path(tree_node): """ Generate a conditional pattern base given a single item. Visit every node in the tree that contains that item """ conditional_patterns = {} while tree_node is not None: prefix_path = [] ascend_tree(tree_node, prefix_path) if len(prefix_path) > 1: conditional_patterns[frozenset(prefix_path[1:])] = tree_node.count tree_node = tree_node.node_link return conditional_patterns
Python
def create_dataset(self): """sample data for shannon entropy test""" data_set = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] labels = ['no surfacing', 'flippers'] return data_set, labels
def create_dataset(self): """sample data for shannon entropy test""" data_set = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] labels = ['no surfacing', 'flippers'] return data_set, labels
Python
def img_to_vector(filename): """ converts an image to a vector. create a 1x1024 NumPy array; open the file, loop over the first 32 lines; store integer of each line's first 32 chars """ return_vector = zeros((1, 1024)) fr = open(filename) for i in range(32): line_str = fr.readline() for j in range(32): return_vector[0, 32 * i + j] = int(line_str[j]) return return_vector
def img_to_vector(filename): """ converts an image to a vector. create a 1x1024 NumPy array; open the file, loop over the first 32 lines; store integer of each line's first 32 chars """ return_vector = zeros((1, 1024)) fr = open(filename) for i in range(32): line_str = fr.readline() for j in range(32): return_vector[0, 32 * i + j] = int(line_str[j]) return return_vector
Python
def load_tsv_datafile(filename): """ load TSV datafile into list of data and corresponding labels; feature labels sit in the last (farthest-right) column (i.e. a 1 or a 0) """ number_of_features = len(open(filename).readline().split('\t')) data_matrix = [] label_matrix = [] fr = open(filename) for line in fr.readlines(): pieces = [] current_line = line.strip().split('\t') for i in range(number_of_features - 1): pieces.append(float(current_line[i])) data_matrix.append(pieces) label_matrix.append(float(current_line[-1])) return data_matrix, label_matrix
def load_tsv_datafile(filename): """ load TSV datafile into list of data and corresponding labels; feature labels sit in the last (farthest-right) column (i.e. a 1 or a 0) """ number_of_features = len(open(filename).readline().split('\t')) data_matrix = [] label_matrix = [] fr = open(filename) for line in fr.readlines(): pieces = [] current_line = line.strip().split('\t') for i in range(number_of_features - 1): pieces.append(float(current_line[i])) data_matrix.append(pieces) label_matrix.append(float(current_line[-1])) return data_matrix, label_matrix