repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
apicarver | apicarver-main/testCarver/pythonCode/toolExecutionStats.py | import glob
import os
from constants import APPS
from plotCoverage import getAppData
from runCarver import getExistingCarverRun
from utilsRun import importJson, writeCSV, writeCSV_Dict
from urllib.parse import urlparse
def findResultResponses():
allOutputs = {}
for appName in APPS:
carverOutput = None
outputs = {}
allOutputs[appName] = outputs
try:
existingCarverData = getExistingCarverRun(appName)
carverOutput, fileName = os.path.split(existingCarverData['existingValidCrawls'][0])
outputs["carver"] = carverOutput
outputs["success"] = True
except Exception as ex:
outputs["success"] = False
outputs["message"] = "error finding outputs"
print(ex)
continue
if carverOutput is None:
outputs["success"] = False
outputs["message"] = "error finding outputs"
print("carver output not found for {}".format(appName))
continue
try:
outputs["recordedRequests"] = glob.glob(carverOutput+ "/gen/*/combined_generatedEvents.json")[0]
outputs["filteredRequests"] = glob.glob(carverOutput+ "/gen/*/generatedEvents.json")[0]
outputs["carverResultFile"] = glob.glob(carverOutput+ "/run/*/resultResponses.json")[0]
outputs["proberResultFile"] = glob.glob(carverOutput+ "/oas/*/resultResponses.json")[0]
executedProbes = glob.glob(carverOutput+ "/oas/*/crawlEventsLog.json")
if len(executedProbes) == 0:
executedProbes = glob.glob(carverOutput+ "/oas/*/executedprobeEvents.json")
outputs["executedProbes"] = executedProbes[0]
except Exception as ex:
print("Exception getting output files inside carver output directory")
print(ex)
return allOutputs
def parseAPIResults(resultsJson):
succeeded = 0
failed = 0
status2xx = 0
status3xx = 0
status4xx = 0
status5xx = 0
statusOther = 0
triedUrls = []
succeededUrls = []
executionTime = 0
for result in resultsJson:
executionTime += result['duration']
try:
url = urlparse(result['request']['requestUrl'])
path = url.path
if path not in triedUrls:
triedUrls.append(path)
status = result['response']['status']
if status >=200 and status <300:
status2xx += 1
if path not in succeededUrls:
succeededUrls.append(path)
elif status >=300 and status <400:
status3xx +=1
if path not in succeededUrls:
succeededUrls.append(path)
elif status >=400 and status <500:
status4xx +=1
elif status >=500 and status <600:
status5xx += 1
if path not in succeededUrls:
succeededUrls.append(path)
else:
statusOther += 1
succeeded += 1
except:
failed += 1
return {"succeeded": succeeded, "failed": failed,
"status2xx": status2xx, "status3xx":status3xx, "status4xx": status4xx, "status5xx": status5xx, "statusOther": statusOther, "goodStatus": (status2xx + status3xx + status5xx + statusOther),
"triedUrls": len(triedUrls), "succeededUrls": len(succeededUrls),
"duration": executionTime}
def parseProbeEvents(probeEvents, proberResults):
I2L = []
MD2BiP = []
RA = []
MOP = []
other = []
unknown = []
total = []
succeeded = []
opCheckPoints = 0
cookieCheckPoints = 0
bothCheckPoints = 0
for proberResult in proberResults:
if proberResult['request']['clazz'] == 'Probe' and proberResult['status'] == 'SUCCESS' and not (proberResult['response']['status'] >=400 and proberResult['response']['status'] <500):
succeeded.append(proberResult['request']['requestId'])
if proberResult['checkPoint'] == "OPERATION":
opCheckPoints += 1
elif proberResult['checkPoint'] == 'COOKIE':
cookieCheckPoints += 1
elif proberResult['checkPoint'] == 'BOTH':
bothCheckPoints += 1
for probeEvent in probeEvents:
total.append(probeEvent['requestId'])
if "probeType" not in probeEvent:
print(probeEvent)
unknown.append(probeEvent['requestId'])
continue
probeType = probeEvent["probeType"]
if probeType == "MOP":
MOP.append(probeEvent['requestId'])
elif probeType == "RA":
RA.append(probeEvent['requestId'])
elif probeType == "MDI2L":
I2L.append(probeEvent['requestId'])
else:
other.append(probeEvent['requestId'])
return {"exec_I2L": len(I2L), "exec_RA": len(RA), "exec_MOP": len(MOP), "exec_other": len(other), "exec_BiP": len(unknown), "exec_total": len(probeEvents), "exec_succeededProbes": len(succeeded),
"gen_I2L": len(set(I2L)), "gen_RA": len(set(RA)), "gen_MOP": len(set(MOP)), "gen_other": len(set(other)), "gen_BiP": len(set(unknown)),"gen_succeededProbes": len(set(succeeded)), "gen_total": len(set(total)),
"checkPoints_operation": opCheckPoints, "checkPoints_cookie": cookieCheckPoints, "checkPoints_both": bothCheckPoints}
def getToolExecutionData(allResults):
allOutputs = findResultResponses()
for key in allOutputs:
print(key)
recordedRequests = len(importJson(allOutputs[key]["recordedRequests"]))
filteredRequests = len(importJson(allOutputs[key]["filteredRequests"]))
filteringResults = {"recorded": recordedRequests, "filtered": filteredRequests}
filteringResults['app'] = key
carverResults = parseAPIResults(importJson(allOutputs[key]["carverResultFile"]))
carverResults["app"] = key
carverResults["tool"] = "carver"
proberResults = parseAPIResults(importJson(allOutputs[key]["proberResultFile"]))
proberResults["tool"] = "prober"
proberResults["app"] = key
probingResults = parseProbeEvents(importJson(allOutputs[key]["executedProbes"]), importJson(allOutputs[key]["proberResultFile"]))
probingResults["app"] = key
appResult = {"app": key, "filtering": filteringResults, "carver": carverResults, "prober": proberResults, "probingResults": probingResults}
allResults['filtering'].append(filteringResults)
allResults['execution'].append(carverResults)
allResults['execution'].append(proberResults)
allResults['probing'].append(probingResults)
return allResults
def estParseProbes():
probeEvents = importJson("/TestCarving/testCarver/out/ecomm/20220827_151530/oas/20220827_155323/executedprobeEvents.json")
probingOutput = parseProbeEvents(probeEvents=probeEvents)
print(probingOutput)
if __name__=="__main__":
# allOutputs = findResultResponses()
# print(allOutputs)
# estParseProbes()
allResults = {'filtering': [], 'execution': [], 'probing': []}
results = getToolExecutionData(allResults)
print(results)
writeCSV_Dict(csvFields=allResults['filtering'][0].keys(), csvRows=allResults['filtering'], dst="../results/filtering.csv")
writeCSV_Dict(csvFields=allResults['execution'][0].keys(), csvRows=allResults['execution'], dst="../results/execution.csv")
writeCSV_Dict(csvFields=allResults['probing'][0].keys(), csvRows=allResults['probing'], dst="../results/probing.csv")
| 7,591 | 40.714286 | 220 | py |
apicarver | apicarver-main/testCarver/pythonCode/coverageStats.py | import datetime
import glob
import os
import xml.etree.ElementTree as ETree
from bs4 import BeautifulSoup
import constants
# import utilsRun
from subprocess import check_call, CalledProcessError
# combine schemathesis(vanilla) coverage with carver and prober coverage
import utilsRun
from runGeneratedTests import getExistingCrawl
def generateCombinedCoverage(stCov, toMerge, mergedFileName, classFolder):
execFile = os.path.splitext(os.path.abspath(stCov))[0] + mergedFileName + ".exec"
if os.path.exists(execFile):
print("generation already done. Skipping {}".format(execFile))
return constants.STATUS_SKIPPED
mergeCommand = constants.JACOCO_MERGE_COMMAND.copy()
mergeCommand.append(os.path.abspath(stCov))
mergeCommand.append(os.path.abspath(toMerge))
mergeCommand.append("--destfile")
mergeCommand.append(execFile)
try:
check_call(mergeCommand)
except Exception as ex:
print("Could not merge files? ")
print(ex)
reportCommand = constants.JACOCO_REPORT_COMMAND.copy()
reportCommand.append('--xml')
xmlFile = os.path.splitext(os.path.abspath(stCov))[0] + mergedFileName + ".xml"
reportCommand.append(xmlFile)
reportCommand.append(execFile)
reportCommand.append('--classfiles')
reportCommand.append(classFolder)
try:
check_call(reportCommand)
except Exception as ex:
print("Could not generate report? ")
print(ex)
return constants.STATUS_ERRORED
return constants.STATUS_SUCCESSFUL
def parseJacocoResults(xmlFile):
try:
print(xmlFile)
eTree = ETree.parse(xmlFile)
branchResult = None
instrResult = None
try:
branchNode = eTree.findall("counter[@type='BRANCH']")[0]
print(branchNode)
branchResult = {"type": "branch", "missed": int(branchNode.attrib['missed']),
"covered": int(branchNode.attrib['covered'])
, "total": int(branchNode.attrib['missed']) + int(branchNode.attrib['covered'])}
print(branchResult)
except Exception as ex1:
print("Exception getting branch result for {}".format(xmlFile))
print(ex1)
try:
instNode = eTree.findall("counter[@type='INSTRUCTION']")[0]
instrResult = {"type": "instruction", "missed": int(instNode.attrib['missed']),
"covered": int(instNode.attrib['covered'])
, "total": int(instNode.attrib['missed']) + int(instNode.attrib['covered'])}
print(instrResult)
except Exception as ex2:
print("Exception getting Instruction result for {}".format(xmlFile))
print(ex2)
return {"branch": branchResult, "instruction": instrResult}
except Exception as ex:
print(ex)
print("Error getting coverage for {}".format("xmlFile"))
return None
def parseNYCResults(htmlFile):
try:
with open(htmlFile) as html:
soup = BeautifulSoup(html, 'html.parser')
try:
rowElem = soup.find_all('td', attrs={"data-value": "app"})[0].parent
except Exception as ex:
print("Unable to get coverage for {}".format(htmlFile))
return None
branchNode = rowElem.find_all('td', 'abs')[1].get_text().split("/")
instNode = rowElem.find_all('td', 'abs')[0].get_text().split("/")
branchResult = {"type": "branch", "missed": int(branchNode[1]) - int(branchNode[0]),
"covered": int(branchNode[0])
, "total": int(branchNode[1])}
instrResult = {"type": "instruction", "missed": int(instNode[1]) - int(instNode[0]),
"covered": int(instNode[0])
, "total": int(instNode[1])}
print(branchResult)
print(instrResult)
return {"branch": branchResult, "instruction": instrResult}
except Exception as ex:
print("Exception getting NYC coverage data for {}", htmlFile)
print(ex)
return None
def getRawCovFiles(appName):
appOutput = os.path.abspath(os.path.join("..", "out", appName))
if not os.path.exists(appOutput):
print("no output folder for {}".format(appName))
return None
if appName == "realworld":
carverCov = glob.glob(appOutput + "/*/run/*/cov*/raw/")
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/raw/")
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/raw/")
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/raw/")
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov/raw/")
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov/raw/")
# elif appName == "jawa":
# carverCov = glob.glob(appOutput + "/*/run/*/cov*/" + constants.COV_EXEC)
# proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/" + constants.COV_EXEC)
# stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_EXEC)
# stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov/" + constants.COV_EXEC)
# stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov/" + constants.COV_EXEC)
elif appName == "booker":
carverCov = glob.glob(appOutput + "/*/run/*/cov*/")
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/")
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov*/")
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov*/")
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov*/")
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov*/")
else:
carverCov = glob.glob(appOutput + "/*/run/*/cov*/" + constants.COV_EXEC)
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/" + constants.COV_EXEC)
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_EXEC)
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_EXEC)
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov/" + constants.COV_EXEC)
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov/" + constants.COV_EXEC)
return {"carverCov": carverCov,
"proberCov": proberCov,
"stCov": stCov,
"emCov": emCov,
"stCarver": stCarverCov,
"stProber": stProberCov}
def getCovFiles(appName):
appOutput = os.path.abspath(os.path.join("..", "out", appName))
if not os.path.exists(appOutput):
print("no output folder for {}".format(appName))
return None
try:
print(getExistingCrawl(appName, "HYBRID", -1.0, 30, ALL_CRAWLS = os.path.abspath(os.path.join("..", "crawlOut"))))
crawlOutput, file = os.path.split(
getExistingCrawl(appName, "HYBRID", -1.0, 30, ALL_CRAWLS = os.path.abspath(os.path.join("..", "crawlOut")))
['existingValidCrawls'][0])
except Exception as ex:
print(ex)
print("Cannot get Crawl folder")
crawlOutput = None
uiTestCov = None
if appName == "realworld":
uiCov = glob.glob(appOutput + "/*/cov*/lcov-report/" + constants.NYC_REPORT)
if crawlOutput is not None:
uiTestCov = glob.glob(crawlOutput + "/test-results/0/nyc_output/lcov-report/" + constants.NYC_REPORT)
carverCov = glob.glob(appOutput + "/*/run/*/cov*/lcov-report/" + constants.NYC_REPORT)
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/lcov-report/" + constants.NYC_REPORT)
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/lcov-report/" + constants.NYC_REPORT)
stCarverMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/lcov-report/" + constants.NYC_CARVER_REPORT)
stProberMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/lcov-report/" + constants.NYC_PROBER_REPORT)
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/lcov-report/" + constants.NYC_REPORT)
emCarverMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/lcov-report/" + constants.NYC_CARVER_REPORT)
emProberMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/lcov-report/" + constants.NYC_PROBER_REPORT)
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov/lcov-report/" + constants.NYC_REPORT)
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov/lcov-report/" + constants.NYC_REPORT)
elif appName == "jawa" or appName == "medical":
uiCov = glob.glob(appOutput + "/*/cov*/" + constants.COV_JAWA_XML)
if crawlOutput is not None:
uiTestCov = glob.glob(crawlOutput + "/test-results/0/cov/" + constants.COV_JAWA_XML)
carverCov = glob.glob(appOutput + "/*/run/*/cov*/" + constants.COV_JAWA_XML)
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/" + constants.COV_JAWA_XML)
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_JAWA_XML)
stCarverMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_JAWA_CARVER_XML)
stProberMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_JAWA_PROBER_XML)
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_JAWA_XML)
emCarverMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_JAWA_CARVER_XML)
emProberMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_JAWA_PROBER_XML)
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov/" + constants.COV_JAWA_XML)
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov/" + constants.COV_JAWA_XML)
elif appName == "parabank":
uiCov = glob.glob(appOutput + "/*/cov*/" + constants.COV_XML)
if crawlOutput is not None:
uiTestCov = glob.glob(crawlOutput + "/test-results/0/cov/" + constants.COV_XML)
carverCov = glob.glob(appOutput + "/*/run/*/cov*/" + constants.COV_XML)
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/" + constants.COV_XML)
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_XML)
stCarverMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_PARABANK_CARVER_XML)
stProberMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_PARABANK_PROBER_XML)
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_XML)
emCarverMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_PARABANK_CARVER_XML)
emProberMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_PARABANK_PROBER_XML)
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov/" + constants.COV_XML)
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov/" + constants.COV_XML)
elif appName == "booker":
uiCov = glob.glob(appOutput + "/*/cov*/")
if crawlOutput is not None:
uiTestCov = glob.glob(crawlOutput + "/test-results/0/cov/")
carverCov = glob.glob(appOutput + "/*/run/*/cov*/")
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/")
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov*/")
stCarverMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov*/")
stProberMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov*/")
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov*/")
emCarverMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov*/")
emProberMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov*/")
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov*/")
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov*/")
else:
uiCov = glob.glob(appOutput + "/*/cov*/" + constants.COV_XML)
if crawlOutput is not None:
uiTestCov = glob.glob(crawlOutput + "/test-results/0/cov/" + constants.COV_XML)
carverCov = glob.glob(appOutput + "/*/run/*/cov*/" + constants.COV_XML)
proberCov = glob.glob(appOutput + "/*/oas/*/allPr_cov*/" + constants.COV_XML)
stCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_XML)
stCarverMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_CARVER_XML)
stProberMerge = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_OUTPUT + "/*/cov/" + constants.COV_PROBER_XML)
emCov = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_XML)
emCarverMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_CARVER_XML)
emProberMerge = glob.glob(appOutput + "/" + constants.EVOMASTER_OUTPUT + "/*/cov/" + constants.COV_PROBER_XML)
stCarverCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_CARVER + "/*/cov/" + constants.COV_XML)
stProberCov = glob.glob(appOutput + "/" + constants.SCHEMATHESIS_PROBER + "/*/cov/" + constants.COV_XML)
return {
"uiCov": uiCov,
"uiTestCov": uiTestCov,
"carverCov": carverCov,
"proberCov": proberCov,
"stCov": stCov,
"stCarverMerge": stCarverMerge,
"stProberMerge": stProberMerge,
"emCov": emCov,
"emCarverMerge": emCarverMerge,
"emProberMerge": emProberMerge,
"stCarver": stCarverCov,
"stProber": stProberCov
}
def getResultForCovFile(covRecords, appName, tool):
if appName == "realworld":
result = parseNYCResults(covRecords)
elif appName == "booker":
if tool == "stCarverMerge" or tool == "emCarverMerge":
pattern = "carver"
elif tool == "stProberMerge" or tool == "emProberMerge":
pattern = "prober"
else:
pattern = ""
result = mergeCovFiles(covRecords, pattern)
else:
result = parseJacocoResults(covRecords)
return result
def getAppResults(appName):
results = []
errors = []
covFiles = getCovFiles(appName)
print(covFiles)
if covFiles is None:
print("Ignoring App because no Coverage Data available.")
status = constants.STATUS_SKIPPED
return [{"app": appName, "status": status, "message": "No coverage data available"}]
for key in covFiles.keys():
if key in ["uiCov", "uiTestCov", "carverCov", "proberCov"]:
try:
covRecords = covFiles[key][0]
except Exception as ex:
print(ex)
continue
if covRecords is not None:
result = getResultForCovFile(covRecords=covRecords, appName=appName, tool=key)
if result is None:
result = {}
result['error'] = "error"
result['app'] = appName
result['tool'] = key
result['file'] = covRecords
errors.append(result)
else:
result['branch']['app'] = appName
result['branch']['tool'] = key
result['branch']['file'] = covRecords
results.append(result['branch'])
result['instruction']['app'] = appName
result['instruction']['tool'] = key
result['instruction']['file'] = covRecords
results.append(result['instruction'])
else:
try:
covRecordEntries = covFiles[key]
except Exception as ex:
print(ex)
continue
toolResults = []
avgResult = {"branch": {"type": "branch", "missed": 0,"covered": 0, "total": 0}, "instruction": {"type": "instruction", "missed": 0,"covered": 0, "total": 0}}
for runIndex in range(len(covRecordEntries)):
covRecords = covRecordEntries[runIndex]
result = getResultForCovFile(covRecords=covRecords, appName=appName, tool=key)
if result is None:
result = {}
result['error'] = "error"
result['app'] = appName
result['tool'] = key+str(runIndex)
result['file'] = covRecords
errors.append(result)
else:
avgResult['branch']['covered'] += result['branch']['covered']
avgResult['branch']['missed'] += result['branch']['missed']
avgResult['branch']['total'] += result['branch']['total']
result['branch']['app'] = appName
result['branch']['tool'] = key+str(runIndex)
result['branch']['file'] = covRecords
toolResults.append(result['branch'])
avgResult['instruction']['covered'] += result['instruction']['covered']
avgResult['instruction']['missed'] += result['instruction']['missed']
avgResult['instruction']['total'] += result['instruction']['total']
result['instruction']['app'] = appName
result['instruction']['tool'] = key+str(runIndex)
result['instruction']['file'] = covRecords
toolResults.append(result['instruction'])
if toolResults is not None and len(toolResults) >0:
avgResult['branch']['app'] = appName
avgResult['branch']['tool'] = key
avgResult['branch']['file'] = "Aggregate" + "_" + appName + "_" + key
results.append(avgResult['branch'])
avgResult['instruction']['app'] = appName
avgResult['instruction']['tool'] = key
avgResult['instruction']['file'] = "Aggregate" + "_" + appName + "_" + key
results.append(avgResult['instruction'])
results.extend(toolResults)
return results
def getAllResults():
totalResults = []
for app in constants.APPS:
results = getAppResults(app)
if results is not None:
totalResults.extend(results)
return totalResults
def parseNYCTests():
parseNYCResults("/TestCarving/testCarver/out/realworld/schemathesis_prober/cov/lcov-report/index.html")
def mergeCovFiles(covFolder, pattern=""):
# covFiles = glob.glob(covFolder + pattern)
# print(covFiles)
finalResult = {"branch": {"type": "branch", "missed": 0, "covered":0, "total":0},
"instruction": {"type": "instruction", "missed": 0, "covered":0, "total":0}}
# for covFile in covFiles:
for module in constants.BOOKER_MODULES:
covFile = os.path.join(os.path.abspath(covFolder), module + pattern + ".xml")
results = parseJacocoResults(covFile)
if results is not None:
if "branch" in results and results["branch"] is not None:
finalResult["branch"]["missed"] += results["branch"]["missed"]
finalResult["branch"]["covered"] += results["branch"]["covered"]
finalResult["branch"]["total"] += results["branch"]["total"]
if "instruction" in results and results["instruction"] is not None:
finalResult["instruction"]["missed"] += results["instruction"]["missed"]
finalResult["instruction"]["covered"] += results["instruction"]["covered"]
finalResult["instruction"]["total"] += results["instruction"]["total"]
return finalResult
def mergeCovFilesTests():
finalResult = mergeCovFiles("/TestCarving/testCarver/out/booker")
print(finalResult)
def generateCombinedCoverageForApp(appName, TOOLS):
succeeded = []
errored = []
skipped = []
rawCovFiles = getRawCovFiles(appName)
if appName == "petclinic" or appName == "parabank" or appName == "jawa" or appName == "ecomm" or appName == "medical" or appName == "shopizer":
# These apps have a single module
classFiles = os.path.abspath(os.path.join("..", "src", "main", "resources", "webapps", appName, "target", "classes"))
carverCov = rawCovFiles["carverCov"]
proberCov = rawCovFiles["proberCov"]
for tool in rawCovFiles.keys():
if (tool in ["carverCov", "proberCov", "uiCov", "uiTestCov", "stProberCov"]) or (tool not in TOOLS):
print("Skipping merge {} : {}".format(appName, tool))
continue
for rawCov in rawCovFiles[tool]:
stCov = rawCov
carverMerge = carverCov[0]
proberMerge = proberCov[0]
status = generateCombinedCoverage(stCov=stCov, toMerge=carverMerge, mergedFileName="carver", classFolder=classFiles)
if status == constants.STATUS_SKIPPED:
skipped.append({"st": stCov, "toMerge": "carver"})
elif status == constants.STATUS_ERRORED:
errored.append({"st": stCov, "toMerge": "carver"})
elif status == constants.STATUS_SUCCESSFUL:
succeeded.append({"st": stCov, "toMerge": "carver"})
status = generateCombinedCoverage(stCov=stCov, toMerge=proberMerge, mergedFileName="prober", classFolder=classFiles)
if status == constants.STATUS_SKIPPED:
skipped.append({"st": stCov, "toMerge": "prober"})
elif status == constants.STATUS_ERRORED:
errored.append({"st": stCov, "toMerge": "prober"})
elif status == constants.STATUS_SUCCESSFUL:
succeeded.append({"st": stCov, "toMerge": "prober"})
if appName == "booker":
modules = constants.BOOKER_MODULES
for module in modules:
classFiles = os.path.abspath(os.path.join("..", "src", "main", "resources", "webapps", appName, "target", module))
carverCov = os.path.join(rawCovFiles["carverCov"][0], module + ".exec")
proberCov = os.path.join(rawCovFiles["proberCov"][0], module + ".exec")
for tool in rawCovFiles.keys():
if (tool in ["carverCov", "proberCov", "uiCov", "uiTestCov", "stProberCov"]) or (tool not in TOOLS):
continue
for rawCov in rawCovFiles[tool]:
stCov = os.path.join(rawCov, module + ".exec")
carverMerge = carverCov
proberMerge = proberCov
status = generateCombinedCoverage(stCov=stCov, toMerge=carverMerge, mergedFileName="carver", classFolder=classFiles)
if status == constants.STATUS_SKIPPED:
skipped.append({"st": stCov, "toMerge": "carver"})
elif status == constants.STATUS_ERRORED:
errored.append({"st": stCov, "toMerge": "carver"})
elif status == constants.STATUS_SUCCESSFUL:
succeeded.append({"st": stCov, "toMerge": "carver"})
status = generateCombinedCoverage(stCov=stCov, toMerge=proberMerge, mergedFileName="prober", classFolder=classFiles)
if status == constants.STATUS_SKIPPED:
skipped.append({"st": stCov, "toMerge": "prober"})
elif status == constants.STATUS_ERRORED:
errored.append({"st": stCov, "toMerge": "prober"})
elif status == constants.STATUS_SUCCESSFUL:
succeeded.append({"st": stCov, "toMerge": "prober"})
print(succeeded)
print(skipped)
print(errored)
print("succeeded {}, errored {}, skipped {}".format(len(succeeded), len(errored), len(skipped)))
return succeeded, errored, skipped
def combineCovTest():
stCov = "/TestCarving/testCarver/out/ecomm/schemathesis/0/cov/cov.exec"
toMerge = "/TestCarving/testCarver/out/ecomm/20220824_195922/oas/20220824_200840/allPr_cov_20220824_201447/cov.exec"
mergedFileName = "Prober"
classFiles = "/TestCarving/testCarver/src/main/resources/webapps/ecomm/target/classes"
generateCombinedCoverage(stCov=stCov, toMerge=toMerge, mergedFileName=mergedFileName, classFolder=classFiles)
ALL_TOOLS = ["uiCov", "uiTestCov", "carverCov", "proberCov", "stCov",
"stCarverMerge", "stProberMerge", "emCov", "emCarverMerge", "emProberMerge", "stCarver", "stProber"]
if __name__ == "__main__":
# parseJacocoResults("coverage/cov/cov.xml")
# results = getAppResults("booker")
# print(results)
# parseNYCTests()
# mergeCovFilesTests()
# combineCovTest()
# generateCombinedCoverageForApp("petclinic", TOOLS=["emCov"])
# generateCombinedCoverageForApp("parabank", TOOLS=["emCov"])
# generateCombinedCoverageForApp("booker", TOOLS=["emCov"])
# generateCombinedCoverageForApp("medical", TOOLS=["emCov"])
# generateCombinedCoverageForApp("ecomm", TOOLS=["emCov"])
# # generateCombinedCoverageForApp("realworld", TOOLS=["emCov"])
# generateCombinedCoverageForApp("jawa", TOOLS=["emCov"])
# generateCombinedCoverageForApp("medical")
#
totalResults = getAllResults()
print(totalResults)
totalResults = [result for result in totalResults if 'status' not in result.keys()]
# utilsRun.exportJson(jsonData=totalResults, file="../results/cov_"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")+".json")
utilsRun.writeCSV_Dict(totalResults[0].keys(), csvRows=totalResults,dst="../results/cov_"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")+".csv")
| 26,599 | 52.846154 | 170 | py |
apicarver | apicarver-main/testCarver/pythonCode/runCarver.py | import os
import shutil
from datetime import datetime
# from globalNames import FILTER, THRESHOLD_SETS, DB_SETS, APPS, isDockerized, DOCKER_LOCATION, isNd3App, getHostNames, \
# ALGOS, getDockerName, getDockerList, getURLList
import glob
from constants import APPS, RUN_CARVER_COMMAND, STATUS_SUCCESSFUL, STATUS_SKIPPED, STATUS_ERRORED
from utilsRun import restartDocker, cleanup, monitorProcess, changeDirectory, startProcess
######################## REGRESSION UTILS ##################
######################## ######## ##################
def runAllApps(RUNTIME=30):
succesful = []
unsuccesful = []
skipped = []
for app in APPS:
if app in excludeApps:
continue
status, command = runAlgo(app, RUNTIME)
if status == STATUS_SUCCESSFUL:
succesful.append(command)
elif status == STATUS_SKIPPED:
skipped.append(command)
elif status == STATUS_ERRORED:
unsuccesful.append(command)
print("succesful : {0}".format(str(len(succesful))))
print(succesful)
print("skipped : {0}".format(str(len(skipped))))
print(skipped)
print("unsuccesful : {0}".format(str(len(unsuccesful))))
print(unsuccesful)
if DRY_RUN:
print("Predicted run time : " + str(RUNTIME * len(succesful)))
def getExistingCarverRun(appName, ALL_CRAWLS=os.path.join(os.path.abspath(".."), "out")):
existingValidCrawls = []
crawljaxOutputPath = os.path.abspath(os.path.join(ALL_CRAWLS, appName))
if os.path.exists(crawljaxOutputPath):
existingValidCrawls = glob.glob(crawljaxOutputPath + "/*/uiTest_runResult.json")
return {"path": crawljaxOutputPath, "existingValidCrawls": existingValidCrawls}
return {"path": None, "existingValidCrawls": existingValidCrawls}
def runAlgo(appName, runtime,
logFile=os.path.join("logs", "carverLog_" + str(datetime.now().strftime("%Y%m%d-%H%M%S")) + ".log"),
rerun=False):
command = RUN_CARVER_COMMAND.copy()
command.append(appName)
command.append(str(runtime))
# host = "localhost"
# if(isDockerized(appName)):
# host = "192.168.99.101"
existingCrawlData = getExistingCarverRun(appName)
existingValidCrawls = existingCrawlData['existingValidCrawls']
crawljaxOutputPath = existingCrawlData['path']
if (not rerun):
if crawljaxOutputPath is not None and os.path.exists(crawljaxOutputPath):
if len(existingValidCrawls) == 0:
# shutil.rmtree(crawljaxOutputPath)
print("No existing output. Continuing to run")
else:
print("Ignoring run because a crawl already exists.")
print("Call with rerun=True for creating a new crawl with the same configuration")
status = STATUS_SKIPPED
return status, command
if DRY_RUN:
status = STATUS_SUCCESSFUL
return status, command
#
# if isDockerized(appName):
# # restartDocker(appName)
# restartDocker(getDockerName(appName))
print("sending command {0}".format(command))
proc = startProcess(command, logFile, changeDir="..")
if proc == None:
print("Ignoring error command.")
status = STATUS_ERRORED
return status, command
timeout = 200
status = monitorProcess(proc, runtime, timeout=timeout, crawljaxOutputPath=crawljaxOutputPath,
existing=len(existingValidCrawls))
print("Done : {0}".format(command))
cleanup(appName)
return status, command
###########################################################################
##Tests ############
###########################################################################
def CleanupTest():
cleanup()
print("cleanup tested")
def RestartDockerTest():
restartDocker("dimeshift")
def ChangeDirTest():
current = os.getcwd();
print(os.getcwd())
changeDirectory("..")
print(os.getcwd())
changeDirectory(current)
print(os.getcwd())
def GetExistingTest():
for app in APPS:
print(getExistingCarverRun(app))
###########################################################################
## Main Code ############
###########################################################################
DRY_RUN = False
excludeApps = ['tmf', 'mdh']
if __name__ == "__main__":
print("hello")
# testGetExisting()
runAllApps(30)
| 4,439 | 29.62069 | 121 | py |
apicarver | apicarver-main/testCarver/pythonCode/plotCoverage.py | import json
import os
from urllib.parse import urlsplit, parse_qs
import ruamel.yaml
import constants
import runRestats
import matplotlib.pyplot as plt
def parsePostData(postData):
params = {}
if postData is None:
print("Cannot parse None")
return None
if type(postData) is dict and "string" in postData.keys():
postData = postData["string"]
try:
split = postData.split(sep="&")
for splitItem in split:
if splitItem is None or len(splitItem.strip()) == 0:
continue
paramItemSplit = splitItem.split(sep="=")
if len(paramItemSplit) >= 2:
name = paramItemSplit[0]
value = "".join(paramItemSplit[1:])
elif len(paramItemSplit) == 1:
name = paramItemSplit[0]
value = ''
else:
continue
params[name] = value
except:
print("cannot parse {}".format(postData))
return params
def yamlResponse2Dict(yamlResult):
request = yamlResult["request"]
response = yamlResult["response"]
if 'body' in request:
postData = parsePostData(request['body'])
else:
postData = {}
parameters = []
path = urlsplit(request['uri'])[2]
queryParam = parse_qs(urlsplit(request["uri"])[3])
# Add query parameters in the parameters dictionary
for k, v in queryParam.items():
parameters.append({'in': 'query', 'name': k, 'value': v[0]})
for header in request["headers"]:
parameters.append({'in': 'header', 'name': header, 'value': request["headers"][header]})
requestDict = {
'method': request["method"].lower(),
'url': request["uri"],
'version': "HTTP 1.0",
'path': path,
'parameters': parameters,
'body': postData
}
if response is not None:
status = response["status"]["code"]
message = response["status"]["message"]
responseParams = []
for header in response["headers"]:
parameters.append({'in': 'header', 'name': header, 'value': response["headers"][header]})
# body = response["body"]
body = ''
responseDict = {
'status': status,
'message': message,
'parameters': responseParams,
'body': body
}
else:
responseDict = {}
return {"request": requestDict, "response":responseDict}
def addCassetteEntries(yamlResponses):
requestResponses = []
statusList = []
for yamlResponse in yamlResponses:
# print(yamlResponse)
requestResponse = yamlResponse2Dict(yamlResponse)
if requestResponse is not None:
requestResponses.append(requestResponse)
for requestResponse in requestResponses:
request = requestResponse['request']
response = requestResponse['response']
if response is not None and "status" in response.keys():
statusEntry = response["status"]
if isinstance(statusEntry, str):
status = int(statusEntry)
elif type(statusEntry) is dict and "code" in statusEntry.keys():
status = int(statusEntry["code"])
statusList.append(status)
print("Total" + str(len(statusList)))
print("Error client (4xx) - " + str(len([status for status in statusList if status>=400 and status <500])))
print("Success (2xx) - " + str(len([status for status in statusList if status>=200 and status <400])))
print("Error server (5xx) - " + str(len([status for status in statusList if status>=500])))
# print()
return statusList
def getStatsListFromCassette(yamlResponsesPath):
try:
with open(yamlResponsesPath) as yamlFile:
yaml = ruamel.yaml.YAML(typ='safe')
data = yaml.load(yamlFile)
yamlResponses = json.loads(json.dumps(data))
statusList = addCassetteEntries(yamlResponses["http_interactions"])
return statusList
except Exception as ex:
print(ex)
print("Unable to parse yaml from {}".format(yamlResponsesPath))
return None
def getAppData(appName):
print(appName)
statusDict = {}
toolOutputs = runRestats.getExistingOutput(appName)
stOutput = toolOutputs["stOutput"][0]
if stOutput is not None:
statusList = getStatsListFromCassette(stOutput)
if statusList is not None:
statusDict["stOutput"] = statusList
stCarver = toolOutputs["stCarver"][0]
if stCarver is not None:
statusList = getStatsListFromCassette(stCarver)
if statusList is not None:
statusDict["stCarver"] = statusList
stProber = toolOutputs["stProber"][0]
if stProber is not None:
statusList = getStatsListFromCassette(stProber)
if statusList is not None:
statusDict["stProber"] = statusList
return statusDict
def getGraphData(statusList):
graphData = []
dataType = []
y = 0
for status in statusList:
if status>=400 and status<500:
graphData.append(y)
dataType.append(0)
elif status>=200 and status<400:
y = y+1
graphData.append(y)
dataType.append(1)
elif status>=500:
y = y+1
graphData.append(y)
dataType.append(2)
return {"y_data" : graphData, "type": dataType}
def plotApp(appName, ax):
graphDataList = []
statuses = getAppData(appName)
print(statuses.keys())
for tool in statuses.keys():
statusList = statuses[tool]
graphData = getGraphData(statusList)
graphData["tool"] = tool
graphDataList.append(graphData)
markers_on = [i for i, j in enumerate(graphData["type"]) if j == 2]
ax.plot(graphData["y_data"], '-D', markevery=markers_on, label=tool)
ax.legend()
def exampleFigure():
fig, ax = plt.subplots(nrows=1, ncols=len(constants.APPS), squeeze=False)
fig.set_size_inches(20,4)
for index in range(len(constants.APPS)):
ax[0][index].plot(range(10), '-D', markevery=[2,4], label=str(index))
ax[0][index].set_title(constants.APPS[index])
plt.show()
def plotAllApps():
fig, ax = plt.subplots(ncols=len(constants.APPS), nrows=1, squeeze=False)
fig.set_size_inches(20,4)
for index in range(len(constants.APPS)):
plotApp(constants.APPS[index], ax[0][index])
ax[0][index].set_title(constants.APPS[index])
plt.show()
print('hello')
if __name__ == "__main__":
# exampleFigure()
plotAllApps() | 6,614 | 29.767442 | 111 | py |
a2dr | a2dr-master/setup.py | from setuptools import setup, find_packages
import codecs
import os.path
# code for single sourcing versions
# reference: https://packaging.python.org/guides/single-sourcing-package-version/
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
# read the contents of your README file
# reference: https://packaging.python.org/guides/making-a-pypi-friendly-readme/
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='a2dr',
version=get_version("a2dr/__init__.py"),
description='A Python package for type-II Anderson accelerated Douglas-Rachford splitting (A2DR).',
url='https://github.com/cvxgrp/a2dr',
author='Anqi Fu, Junzi Zhang, Stephen Boyd',
author_email='[email protected]',
license='Apache License, Version 2.0',
packages=find_packages(),
install_requires=['matplotlib',
'cvxpy >= 1.0.25',
'numpy >= 1.16',
'scipy >= 1.2.1'],
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose'],
long_description=long_description,
long_description_content_type='text/markdown')
| 1,673 | 37.045455 | 105 | py |
a2dr | a2dr-master/examples/other_examples/simple_logistic.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
from cvxpy import *
from scipy import sparse
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestOther(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # Specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 2000
def test_simple_logistic(self):
# minimize \sum_j log(1 + exp(-y_j*Z_j)) subject to Z = X\beta with variables (Z,\beta).
# Problem data.
m = 100
p = 80
X = np.random.randn(m, p)
beta_true = np.random.randn(p)
Z_true = X.dot(beta_true)
y = 2 * (Z_true > 0) - 1 # y_i = 1 or -1.
# Solve with CVXPY.
beta = Variable(p)
obj = sum(logistic(multiply(-y, X*beta)))
prob = Problem(Minimize(obj))
prob.solve()
cvxpy_beta = beta.value
cvxpy_obj = obj.value
print('CVXPY finished.')
# Split problem by row.
# minimize \sum_{i,j} log(1 + exp(-y_{ij}*Z_{ij})) subject to Z_i = X_i\beta with variables (Z_1,...,Z_K,\beta),
# where y_i is the i-th (m/N) subvector and X_i is the i-th (m/N) x p submatrix for i = 1,...,K.
K = 4 # Number of splits.
m_split = int(m/K) # Rows in each split.
y_split = np.split(y, K)
# Convert problem to standard form.
# f_i(Z_i) = \sum_j log(1 + exp(-y_{ij}*Z_{ij})) for i = 1,...,K.
# f_{K+1}(\beta) = 0.
# A_1 = [I; 0; ...; 0], A_2 = [0; I; 0; ...; 0], ..., A_K = [0; ...; 0; I], A_{K+1} = -X, b = 0.
prox_list = [lambda v, t, i=i: prox_logistic(v, t, y=y_split[i]) for i in range(K)] + \
[prox_constant]
A_list = []
for i in range(K):
mat_top = sparse.csr_matrix((i*m_split, m_split))
mat_bot = sparse.csr_matrix((m-(i+1)*m_split, m_split))
A_list += [sparse.vstack([mat_top, sparse.eye(m_split), mat_bot])]
A_list += [-X]
b = np.zeros(K*m_split)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
drs_beta = drs_result["x_vals"][-1]
drs_obj = np.sum(-np.log(sp.special.expit(np.multiply(y, X.dot(drs_beta)))))
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
a2dr_beta = a2dr_result["x_vals"][-1]
a2dr_obj = np.sum(-np.log(sp.special.expit(np.multiply(y, X.dot(a2dr_beta)))))
print('A2DR finished.')
# Compare results.
self.compare_total(drs_result, a2dr_result)
# self.assertItemsAlmostEqual(a2dr_beta, cvxpy_beta, places=3)
# self.assertItemsAlmostEqual(a2dr_obj, cvxpy_obj, places=4)
if __name__ == '__main__':
tests = TestOther()
tests.setUp()
tests.test_simple_logistic()
| 3,761 | 35.524272 | 120 | py |
a2dr | a2dr-master/examples/other_examples/stratified_model.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
from scipy import sparse
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
import networkx as nx
class TestOther(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_strat_model(self):
# http://web.stanford.edu/~boyd/papers/strat_models.html
# minimize \sum_i \|A_i x_i - b_i\|_2^2 + lam \|x_i\|_2^2 +
# \sum_{i, j} W_{ij} \|x_i - x_j\|_2^2
# Problem data.
m, n, K = 30, 20, 5
As = [np.random.randn(m, n) for _ in range(K)]
bs = [np.random.randn(m) for _ in range(K)]
G = nx.cycle_graph(K)
L = nx.laplacian_matrix(G)
lam = 1e-4
# Convert problem to standard form.
# f_1(x_1) = \sum_i \|A_i (x_1)_i - b_i\|_2^2
# f_2(x_2) = \sum_i lam \|(x_2)_i\|_2^2
# f_3(x_3) = \sum_{i, j} W_{ij} \|(x_3)_i - (x_3)_j\|_2^2
# f_4(x_4) = 0
# A_1 = [I 0 0]^T
# A_2 = [0 I 0]^T
# A_3 = [0 0 I]^T
# A_4 = [-I -I -I]^T
# b = 0
def loss_prox(v, t):
result = np.empty(0)
for i in range(K):
result = np.append(
result,
prox_sum_squares_affine(v[i*n:(i+1)*n], t, F=As[i], g=bs[i], method="lstsq")
)
return result
def reg_prox(v, t):
return prox_sum_squares(v, t, scale=lam)
Q = sparse.kron(sparse.eye(n), L)
Q = Q + 1e-12 * sparse.eye(n*K) # ensure positive-semi-definite-ness
def laplacian_prox(v, t):
return prox_quad_form(v, t, Q=Q, method="lsqr")
def identity_prox(v, t):
return v
prox_list = [loss_prox, reg_prox, laplacian_prox, identity_prox]
eye = sparse.eye(K*n)
zero = sparse.csc_matrix((K*n, K*n))
A_list = [
sparse.vstack([eye, zero, zero]),
sparse.vstack([zero, eye, zero]),
sparse.vstack([zero, zero, eye]),
sparse.vstack([-eye, -eye, -eye])
]
b = np.zeros(3*K*n)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result)
if __name__ == '__main__':
tests = TestOther()
tests.setUp()
tests.test_strat_model()
| 3,750 | 30 | 101 | py |
a2dr | a2dr-master/examples/paper_examples/coupled_qp.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_coupled_qp(self):
# Problem data.
L = 4 # number of blocks
s = 10 # number of coupling constraints
ql = 30 # variable dimension of each subproblem QP
pl = 50 # constraint dimension of each subproblem QP
G_list = [np.random.randn(s,ql) for l in range(L)]
F_list = [np.random.randn(pl,ql) for l in range(L)]
c_list = [np.random.randn(ql) for l in range(L)]
z_tld_list = [np.random.randn(ql) for l in range(L)]
d_list = [F_list[l].dot(z_tld_list[l])+0.1 for l in range(L)]
G = np.hstack(G_list)
z_tld = np.hstack(z_tld_list)
h = G.dot(z_tld)
H_list = [np.random.randn(ql,ql) for l in range(L)]
Q_list = [H_list[l].T.dot(H_list[l]) for l in range(L)]
# Convert problem to standard form.
def prox_qp_wrapper(l, Q_list, c_list, F_list, d_list):
return lambda v, t: prox_qp(v, t, Q_list[l], c_list[l], F_list[l], d_list[l])
# Use "map" method to avoid implicit overriding, which would make all the proximal operators the same
prox_list = list(map(lambda l: prox_qp_wrapper(l, Q_list, c_list, F_list, d_list), range(L)))
A_list = G_list
b = h
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=0)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=1e-12)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
#a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, lam_accel=1e-12)
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result)
# Check solution correctness.
a2dr_z = a2dr_result['x_vals']
a2dr_obj = np.sum([a2dr_z[l].dot(Q_list[l]).dot(a2dr_z[l])
+ c_list[l].dot(a2dr_z[l]) for l in range(L)])
a2dr_constr_vio = [np.linalg.norm(np.maximum(F_list[l].dot(a2dr_z[l])-d_list[l],0))**2
for l in range(L)]
a2dr_constr_vio += [np.linalg.norm(G.dot(np.hstack(a2dr_z))-h)**2]
a2dr_constr_vio_val = np.sqrt(np.sum(a2dr_constr_vio))
print('objective value of A2DR = {}'.format(a2dr_obj))
print('constraint violation of A2DR = {}'.format(a2dr_constr_vio_val))
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_coupled_qp()
| 4,224 | 39.625 | 133 | py |
a2dr | a2dr-master/examples/paper_examples/single_commodity_flow.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_commodity_flow(self):
# Problem data.
p = 300 # Number of sources.
q = 800 # Number of flows.
# Construct a random incidence matrix.
B = sparse.lil_matrix((p,q))
for i in range(q-p+1):
idxs = np.random.choice(p, size=2, replace=False)
tmp = np.random.rand()
if tmp > 0.5:
B[idxs[0],i] = 1
B[idxs[1],i] = -1
else:
B[idxs[0],i] = -1
B[idxs[1],i] = 1
for j in range(q-p+1,q):
B[j-(q-p+1),j] = 1
B[j-(q-p),j] = -1
B = sparse.csr_matrix(B)
# Generate source and flow range data
s_tilde = np.random.randn(p)
p1, p2, p3 = int(p/3), int(p/3*2), int(p/6*5)
s_tilde[:p1] = 0
s_tilde[p1:p2] = -np.abs(s_tilde[p1:p2])
s_tilde[p2:] = np.sum(np.abs(s_tilde[p1:p2])) / (p-p2)
L = s_tilde[p1:p2]
s_max = np.hstack([s_tilde[p2:p3]+0.001, 2*(s_tilde[p3:]+0.001)])
res = sparse.linalg.lsqr(B, -s_tilde, atol=1e-16, btol=1e-16)
z_tilde = res[0]
q1 = int(q/2)
z_max = np.abs(z_tilde)+0.001
z_max[q1:] = 2*z_max[q1:]
# Generate cost coefficients
c = np.random.rand(q)
d = np.random.rand(p)
# Solve by CVXPY
z = Variable(q)
s = Variable(p)
C = sparse.diags(c)
D = sparse.diags(d)
obj = quad_form(z, C) + quad_form(s, D)
constr = [-z_max<=z, z<=z_max, s[:p1]==0, s[p1:p2]==L, 0<=s[p2:], s[p2:]<=s_max, B*z+s==0]
prob = Problem(Minimize(obj), constr)
prob.solve(solver='SCS', eps=self.eps_abs, verbose=True) # 'OSQP'
cvxpy_z = z.value
cvxpy_s = s.value
# Convert problem to standard form.
# f_1(z) = \sum_j c_j*z_j^2 + I(-z_max <= z_j <= z_max),
# f_2(s) = \sum_i d_i*s_i^(source)^2 + I(0 <= s_i^(source) <= s_max)
# + \sum_{i'} I(s_{i'}^{transfer}=0) + \sum_{i''}
# + \sum_{i"} I(s_{i"}^{sink}=L_{i"}).
# A = [B, I], b = 0
zeros = np.zeros(p1)
def prox_sat(v, t, c, v_lo = -np.inf, v_hi = np.inf):
return prox_box_constr(prox_sum_squares(v, t*c), t, v_lo, v_hi)
prox_list = [lambda v, t: prox_sat(v, t, c, -z_max, z_max),
lambda v, t: np.hstack([zeros, L, prox_sat(v[p2:], t, d[p2:], 0, s_max)])]
A_list = [B, sparse.eye(p)]
b = np.zeros(p)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=2*self.MAX_ITER)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=0)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=1e-12)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=2*self.MAX_ITER)
#a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=2*self.MAX_ITER, lam_accel=1e-12)
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result)
# Check solution correctness
a2dr_z = a2dr_result['x_vals'][0]
a2dr_s = a2dr_result['x_vals'][1]
cvxpy_obj_raw = np.sum(c*cvxpy_z**2) + np.sum(d*cvxpy_s**2)
a2dr_obj = np.sum(c*a2dr_z**2) + np.sum(d*a2dr_s**2)
cvxpy_constr_vio = [np.maximum(np.abs(cvxpy_z) - z_max, 0),
cvxpy_s[:p1],
np.abs(cvxpy_s[p1:p2]-L),
np.maximum(-cvxpy_s[p2:],0),
np.maximum(cvxpy_s[p2:]-s_max,0),
B.dot(cvxpy_z)+cvxpy_s]
cvxpy_constr_vio_val = np.linalg.norm(np.hstack(cvxpy_constr_vio))
a2dr_constr_vio = [np.maximum(np.abs(a2dr_z) - z_max, 0),
a2dr_s[:p1],
np.abs(a2dr_s[p1:p2]-L),
np.maximum(-a2dr_s[p2:],0),
np.maximum(a2dr_s[p2:]-s_max,0),
B.dot(a2dr_z)+a2dr_s]
a2dr_constr_vio_val = np.linalg.norm(np.hstack(a2dr_constr_vio))
print('objective cvxpy raw = {}, objective a2dr = {}'.format(cvxpy_obj_raw, a2dr_obj))
print('constraint violation cvxpy = {}, constraint violation a2dr = {}'.format(
cvxpy_constr_vio_val, a2dr_constr_vio_val))
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_commodity_flow()
| 6,153 | 38.961039 | 133 | py |
a2dr | a2dr-master/examples/paper_examples/l1_trend_filtering.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_l1_trend_filtering(self):
# minimize (1/2)||y - z||_2^2 + \alpha*||Dz||_1,
# where (Dz)_{t-1} = z_{t-1} - 2*z_t + z_{t+1} for t = 2,...,q-1.
# Reference: https://web.stanford.edu/~boyd/papers/l1_trend_filter.html
# Problem data.
q = 1000
y = np.random.randn(q)
alpha = 0.01*np.linalg.norm(y, np.inf)
# Form second difference matrix.
D = sparse.lil_matrix(sparse.eye(q))
D.setdiag(-2, k = 1)
D.setdiag(1, k = 2)
D = D[:(q-2),:]
# Convert problem to standard form.
# f_1(x_1) = (1/2)||y - x_1||_2^2, f_2(x_2) = \alpha*||x_2||_1.
# A_1 = D, A_2 = -I_{n-2}, b = 0.
prox_list = [lambda v, t: prox_sum_squares(v, t = 0.5*t, offset = y),
lambda v, t: prox_norm1(v, t = alpha*t)]
A_list = [D, -sparse.eye(q-2)]
b = np.zeros(q-2)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=0)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=1e-12)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
#a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, lam_accel=1e-12)
self.compare_total(drs_result, a2dr_result)
print('Finished A2DR.')
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_l1_trend_filtering()
| 3,203 | 34.6 | 133 | py |
a2dr | a2dr-master/examples/paper_examples/nnls_reg.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_nnls_reg(self):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 300, 200
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with no regularization.
a2dr_noreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, lam_accel=0, max_iter=self.MAX_ITER)
print('Finish A2DR no regularization.')
# Solve with constant regularization.
a2dr_consreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=False, max_iter=self.MAX_ITER)
print('Finish A2DR constant regularization.')
# Solve with adaptive regularization.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=True, max_iter=self.MAX_ITER)
print('Finish A2DR adaptive regularization.')
self.compare_total_all([a2dr_noreg_result, a2dr_consreg_result, a2dr_result],
['no-reg', 'constant-reg', 'ada-reg'])
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_nnls_reg()
| 2,825 | 32.642857 | 124 | py |
a2dr | a2dr-master/examples/paper_examples/optimal_control.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_optimal_control(self):
# Problem data.
p = 20
q = 40
L = 5
F = np.random.randn(q,q)
G = np.random.randn(q,p)
h = np.random.randn(q)
z_init = np.random.randn(q)
F = F / np.max(np.abs(LA.eigvals(F)))
z_hat = z_init
for l in range(L-1):
u_hat = np.random.randn(p)
u_hat = u_hat / np.max(np.abs(u_hat))
z_hat = F.dot(z_hat) + G.dot(u_hat) + h
z_term = z_hat
# no normalization of u_hat actually leads to more significant improvement of A2DR over DRS, and also happens to be feasible
# z_term = 0 also happens to be feasible
# Convert problem to standard form.
def prox_sat(v, t, v_lo = -np.inf, v_hi = np.inf):
return prox_box_constr(prox_sum_squares(v, t), t, v_lo, v_hi)
prox_list = [prox_sum_squares, lambda v, t: prox_sat(v, t, -1, 1)]
A1 = sparse.lil_matrix(((L+1)*q,L*q))
A1[q:L*q,:(L-1)*q] = -sparse.block_diag((L-1)*[F])
A1.setdiag(1)
A1[L*q:,(L-1)*q:] = sparse.eye(q)
A2 = sparse.lil_matrix(((L+1)*q,L*p))
A2[q:L*q,:(L-1)*p] = -sparse.block_diag((L-1)*[G])
A_list = [sparse.csr_matrix(A1), sparse.csr_matrix(A2)]
b_list = [z_init]
b_list.extend((L-1)*[h])
b_list.extend([z_term])
b = np.concatenate(b_list)
# Solve with CVXPY
z = Variable((L,q))
u = Variable((L,p))
obj = sum([sum_squares(z[l]) + sum_squares(u[l]) for l in range(L)])
constr = [z[0] == z_init, norm_inf(u) <= 1]
constr += [z[l+1] == F*z[l] + G*u[l] + h for l in range(L-1)]
constr += [z[L-1] == z_term]
prob = Problem(Minimize(obj), constr)
prob.solve(solver='SCS', eps=self.eps_abs, verbose=True)
# OSQP fails for p=50, q=100, L=30, and also for p=100, q=200, L=30
# SCS also fails to converge
cvxpy_obj = prob.value
cvxpy_z = z.value.ravel(order='C')
cvxpy_u = u.value.ravel(order='C')
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=0)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=1e-12)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
#a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, lam_accel=1e-12)
self.compare_total(drs_result, a2dr_result)
print('Finished A2DR.')
# check solution correctness
a2dr_z = a2dr_result['x_vals'][0]
a2dr_u = a2dr_result['x_vals'][1]
a2dr_obj = np.sum(a2dr_z**2) + np.sum(a2dr_u**2)
cvxpy_obj_raw = np.sum(cvxpy_z**2) + np.sum(cvxpy_u**2)
cvxpy_Z = cvxpy_z.reshape([L,q], order='C')
cvxpy_U = cvxpy_u.reshape([L,p], order='C')
a2dr_Z = a2dr_z.reshape([L,q], order='C')
a2dr_U = a2dr_u.reshape([L,p], order='C')
cvxpy_constr_vio = [np.linalg.norm(cvxpy_Z[0]-z_init), np.linalg.norm(cvxpy_Z[L-1]-z_term)]
a2dr_constr_vio = [np.linalg.norm(a2dr_Z[0]-z_init), np.linalg.norm(a2dr_Z[L-1]-z_term)]
for l in range(L-1):
cvxpy_constr_vio.append(np.linalg.norm(cvxpy_Z[l+1]-F.dot(cvxpy_Z[l])-G.dot(cvxpy_U[l])-h))
a2dr_constr_vio.append(np.linalg.norm(a2dr_Z[l+1]-F.dot(a2dr_Z[l])-G.dot(a2dr_U[l])-h))
print('linear constr vio cvxpy = {}, linear constr vio a2dr = {}'.format(
np.mean(cvxpy_constr_vio), np.mean(a2dr_constr_vio)))
print('norm constr vio cvxpy = {}, norm constr vio a2dr = {}'.format(np.max(np.abs(cvxpy_u)),
np.max(np.abs(a2dr_u))))
print('objective cvxpy = {}, objective cvxpy raw = {}, objective a2dr = {}'.format(cvxpy_obj,
cvxpy_obj_raw,
a2dr_obj))
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_optimal_control()
| 5,945 | 41.471429 | 133 | py |
a2dr | a2dr-master/examples/paper_examples/sparse_inv_cov_est.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_sparse_inv_covariance(self, q, alpha_ratio):
# minimize -log(det(S)) + trace(S*Q) + \alpha*||S||_1 subject to S is symmetric PSD.
# Problem data.
# q: Dimension of matrix.
p = 1000 # Number of samples.
ratio = 0.9 # Fraction of zeros in S.
S_true = sparse.csc_matrix(make_sparse_spd_matrix(q, ratio))
Sigma = sparse.linalg.inv(S_true).todense()
z_sample = sp.linalg.sqrtm(Sigma).dot(np.random.randn(q,p))
Q = np.cov(z_sample)
mask = np.ones(Q.shape, dtype=bool)
np.fill_diagonal(mask, 0)
alpha_max = np.max(np.abs(Q)[mask])
alpha = alpha_ratio*alpha_max # 0.001 for q = 100, 0.01 for q = 50
# Convert problem to standard form.
# f_1(S) = -log(det(S)) + trace(S*Q) on symmetric PSD matrices, f_2(S) = \alpha*||S||_1.
# A_1 = I, A_2 = -I, b = 0.
prox_list = [lambda v, t: prox_neg_log_det(v.reshape((q,q), order='C'), t, lin_term=t*Q).ravel(order='C'),
lambda v, t: prox_norm1(v, t*alpha)]
A_list = [sparse.eye(q*q), -sparse.eye(q*q)]
b = np.zeros(q*q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=0)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=1e-12)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
#a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, lam_accel=1e-12)
# lam_accel = 0 seems to work well sometimes, although it oscillates a lot.
a2dr_S = a2dr_result["x_vals"][-1].reshape((q,q), order='C')
self.compare_total(drs_result, a2dr_result)
print('Finished A2DR.')
print('recovered sparsity = {}'.format(np.sum(a2dr_S != 0)*1.0/a2dr_S.shape[0]**2))
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_sparse_inv_covariance(80, 0.001)
| 3,718 | 38.147368 | 133 | py |
a2dr | a2dr-master/examples/paper_examples/nnls.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_nnls(self):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 150, 300
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finish DRS.')
# Solve with A2DR.
t0 = time.time()
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
t1 = time.time()
a2dr_beta = a2dr_result["x_vals"][-1]
print('nonzero entries proportion = {}'.format(np.sum(a2dr_beta > 0)*1.0/len(a2dr_beta)))
print('Finish A2DR.')
self.compare_total(drs_result, a2dr_result)
# Check solution correctness.
print('run time of A2DR = {}'.format(t1-t0))
print('constraint violation of A2DR = {}'.format(np.min(a2dr_beta)))
print('objective value of A2DR = {}'.format(np.linalg.norm(F.dot(a2dr_beta)-g)))
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_nnls()
| 2,824 | 31.471264 | 101 | py |
a2dr | a2dr-master/examples/paper_examples/multitask_reg_logistic.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_multi_task_logistic(self):
# minimize \sum_{il} log(1 + exp(-Y_{il}*Z_{il})) + \alpha*||\theta||_{2,1} + \beta*||\theta||_*
# subject to Z = W\theta, ||.||_{2,1} = group lasso, ||.||_* = nuclear norm.
# Problem data.
L = 3 # Number of tasks.
s = 80 # Number of features.
p = 100 # Number of samples.
alpha = 0.1
beta = 0.1
W = np.random.randn(p,s)
theta_true = np.random.randn(s,L)
Z_true = W.dot(theta_true)
Y = 2*(Z_true > 0) - 1 # Y_{ij} = 1 or -1.
def calc_obj(theta):
obj = np.sum(-np.log(sp.special.expit(np.multiply(Y, W.dot(theta)))))
reg = alpha*np.sum([LA.norm(theta[:,l], 2) for l in range(L)])
reg += beta*LA.norm(theta, ord='nuc')
return obj + reg
# Convert problem to standard form.
# f_1(Z) = \sum_{il} log(1 + exp(-Y_{il}*Z_{il})),
# f_2(\theta) = \alpha*||\theta||_{2,1},
# f_3(\tilde \theta) = \beta*||\tilde \theta||_*.
# A_1 = [I; 0], A_2 = [-W; I], A_3 = [0; -I], b = 0.
prox_list = [lambda v, t: prox_logistic(v, t, y = Y.ravel(order='F')),
# TODO: Calculate in parallel for l = 1,...L.
lambda v, t: prox_group_lasso(v.reshape((s,L), order='F'), t*alpha).ravel(order='F'),
lambda v, t: prox_norm_nuc(v.reshape((s,L), order='F'), t*beta).ravel(order='F')]
A_list = [sparse.vstack([sparse.eye(p*L), sparse.csr_matrix((s*L,p*L))]),
sparse.vstack([-sparse.block_diag(L*[W]), sparse.eye(s*L)]),
sparse.vstack([sparse.csr_matrix((p*L,s*L)), -sparse.eye(s*L)])]
b = np.zeros(p*L + s*L)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=0)
#drs_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, ada_reg=False, lam_accel=1e-12)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
#a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER, lam_accel=1e-12)
a2dr_theta = a2dr_result["x_vals"][-1].reshape((s,L), order='F')
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result)
if __name__ == '__main__':
tests = TestPaper()
tests.setUp()
tests.test_multi_task_logistic()
| 4,133 | 39.135922 | 133 | py |
a2dr | a2dr-master/examples/paper_examples/paper_plots.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Reproducible tests and plots for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_nnls(self, figname):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 10000, 8000
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finish DRS.')
# Solve with A2DR.
t0 = time.time()
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
t1 = time.time()
a2dr_beta = a2dr_result["x_vals"][-1]
print('nonzero entries proportion = {}'.format(np.sum(a2dr_beta > 0)*1.0/len(a2dr_beta)))
print('Finish A2DR.')
self.compare_total(drs_result, a2dr_result, figname)
# Check solution correctness.
print('run time of A2DR = {}'.format(t1-t0))
print('constraint violation of A2DR = {}'.format(np.min(a2dr_beta)))
print('objective value of A2DR = {}'.format(np.linalg.norm(F.dot(a2dr_beta)-g)))
def test_nnls_reg(self, figname):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 300, 500
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with no regularization.
a2dr_noreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, lam_accel=0, max_iter=self.MAX_ITER)
print('Finish A2DR no regularization.')
# Solve with constant regularization.
a2dr_consreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=False, max_iter=self.MAX_ITER)
print('Finish A2DR constant regularization.')
# Solve with adaptive regularization.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=True, max_iter=self.MAX_ITER)
print('Finish A2DR adaptive regularization.')
self.compare_total_all([a2dr_noreg_result, a2dr_consreg_result, a2dr_result],
['no-reg', 'constant-reg', 'ada-reg'], figname)
def test_sparse_inv_covariance(self, q, alpha_ratio, figname):
# minimize -log(det(S)) + trace(S*Q) + \alpha*||S||_1 subject to S is symmetric PSD.
# Problem data.
# q: Dimension of matrix.
p = 1000 # Number of samples.
ratio = 0.9 # Fraction of zeros in S.
S_true = sparse.csc_matrix(make_sparse_spd_matrix(q, ratio))
Sigma = sparse.linalg.inv(S_true).todense()
z_sample = np.real(sp.linalg.sqrtm(Sigma)).dot(np.random.randn(q,p)) # make sure it's real matrices.
Q = np.cov(z_sample)
print('Q is positive definite? {}'.format(bool(LA.slogdet(Q)[0])))
mask = np.ones(Q.shape, dtype=bool)
np.fill_diagonal(mask, 0)
alpha_max = np.max(np.abs(Q)[mask])
alpha = alpha_ratio*alpha_max # 0.001 for q = 100, 0.01 for q = 50.
# Convert problem to standard form.
# f_1(S_1) = -log(det(S_1)) + trace(S_1*Q) on symmetric PSD matrices, f_2(S_2) = \alpha*||S_2||_1.
# A_1 = I, A_2 = -I, b = 0.
prox_list = [lambda v, t: prox_neg_log_det(v.reshape((q,q), order='C'), t, lin_term=t*Q).ravel(order='C'),
lambda v, t: prox_norm1(v, t*alpha)]
A_list = [sparse.eye(q*q), -sparse.eye(q*q)]
b = np.zeros(q*q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
# lam_accel = 0 seems to work well sometimes, although it oscillates a lot.
a2dr_S = a2dr_result["x_vals"][-1].reshape((q,q), order='C')
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
print('recovered sparsity = {}'.format(np.sum(a2dr_S != 0)*1.0/a2dr_S.shape[0]**2))
def test_l1_trend_filtering(self, figname):
# minimize (1/2)||y - z||_2^2 + \alpha*||Dz||_1,
# where (Dz)_{t-1} = z_{t-1} - 2*z_t + z_{t+1} for t = 2,...,q-1.
# Reference: https://web.stanford.edu/~boyd/papers/l1_trend_filter.html
# Problem data.
q = int(2*10**4)
y = np.random.randn(q)
alpha = 0.01*np.linalg.norm(y, np.inf)
# Form second difference matrix.
D = sparse.lil_matrix(sparse.eye(q))
D.setdiag(-2, k = 1)
D.setdiag(1, k = 2)
D = D[:(q-2),:]
# Convert problem to standard form.
# f_1(x_1) = (1/2)||y - x_1||_2^2, f_2(x_2) = \alpha*||x_2||_1.
# A_1 = D, A_2 = -I_{n-2}, b = 0.
prox_list = [lambda v, t: prox_sum_squares(v, t = 0.5*t, offset = y),
lambda v, t: prox_norm1(v, t = alpha*t)]
A_list = [D, -sparse.eye(q-2)]
b = np.zeros(q-2)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
def test_optimal_control(self, figname):
# Problem data/
p = 80
q = 150
L = 20
F = np.random.randn(q,q)
G = np.random.randn(q,p)
h = np.random.randn(q)
z_init = np.random.randn(q)
F = F / np.max(np.abs(LA.eigvals(F)))
z_hat = z_init
for l in range(L-1):
u_hat = np.random.randn(p)
u_hat = u_hat / np.max(np.abs(u_hat))
z_hat = F.dot(z_hat) + G.dot(u_hat) + h
z_term = z_hat
# no normalization of u_hat actually leads to more significant improvement of A2DR over DRS, and also happens to be feasible
# x_term = 0 also happens to be feasible
# Convert problem to standard form.
def prox_sat(v, t, v_lo = -np.inf, v_hi = np.inf):
return prox_box_constr(prox_sum_squares(v, t), t, v_lo, v_hi)
prox_list = [prox_sum_squares, lambda v, t: prox_sat(v, t, -1, 1)]
A1 = sparse.lil_matrix(((L+1)*q,L*q))
A1[q:L*q,:(L-1)*q] = -sparse.block_diag((L-1)*[F])
A1.setdiag(1)
A1[L*q:,(L-1)*q:] = sparse.eye(q)
A2 = sparse.lil_matrix(((L+1)*q,L*p))
A2[q:L*q,:(L-1)*p] = -sparse.block_diag((L-1)*[G])
A_list = [sparse.csr_matrix(A1), sparse.csr_matrix(A2)]
b_list = [z_init]
b_list.extend((L-1)*[h])
b_list.extend([z_term])
b = np.concatenate(b_list)
# Solve with CVXPY
z = Variable((L,q))
u = Variable((L,p))
obj = sum([sum_squares(z[l]) + sum_squares(u[l]) for l in range(L)])
constr = [z[0] == z_init, norm_inf(u) <= 1]
constr += [z[l+1] == F*z[l] + G*u[l] + h for l in range(L-1)]
constr += [z[L-1] == z_term]
prob = Problem(Minimize(obj), constr)
prob.solve(solver='SCS', verbose=True)
# OSQP fails for p=50, q=100, L=30, and also for p=100, q=200, L=30
# SCS also fails to converge
cvxpy_obj = prob.value
cvxpy_z = z.value.ravel(order='C')
cvxpy_u = u.value.ravel(order='C')
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
# check solution correctness
a2dr_z = a2dr_result['x_vals'][0]
a2dr_u = a2dr_result['x_vals'][1]
a2dr_obj = np.sum(a2dr_z**2) + np.sum(a2dr_u**2)
cvxpy_obj_raw = np.sum(cvxpy_z**2) + np.sum(cvxpy_u**2)
cvxpy_Z = cvxpy_z.reshape([L,q], order='C')
cvxpy_U = cvxpy_u.reshape([L,p], order='C')
a2dr_Z = a2dr_z.reshape([L,q], order='C')
a2dr_U = a2dr_u.reshape([L,p], order='C')
cvxpy_constr_vio = [np.linalg.norm(cvxpy_Z[0]-z_init), np.linalg.norm(cvxpy_Z[L-1]-z_term)]
a2dr_constr_vio = [np.linalg.norm(a2dr_Z[0]-z_init), np.linalg.norm(a2dr_Z[L-1]-z_term)]
for l in range(L-1):
cvxpy_constr_vio.append(np.linalg.norm(cvxpy_Z[l+1]-F.dot(cvxpy_Z[l])-G.dot(cvxpy_U[l])-h))
a2dr_constr_vio.append(np.linalg.norm(a2dr_Z[l+1]-F.dot(a2dr_Z[l])-G.dot(a2dr_U[l])-h))
print('linear constr vio cvxpy = {}, linear constr_vio a2dr = {}'.format(
np.mean(cvxpy_constr_vio), np.mean(a2dr_constr_vio)))
print('norm constr vio cvxpy = {}, norm constr vio a2dr = {}'.format(np.max(np.abs(cvxpy_u)),
np.max(np.abs(a2dr_u))))
print('objective cvxpy = {}, objective cvxpy raw = {}, objective a2dr = {}'.format(cvxpy_obj,
cvxpy_obj_raw,
a2dr_obj))
def test_coupled_qp(self, figname):
# Problem data.
L = 8 # number of blocks
s = 50 # number of coupling constraints
ql = 300 # variable dimension of each subproblem QP
pl = 200 # constraint dimension of each subproblem QP
G_list = [np.random.randn(s,ql) for l in range(L)]
F_list = [np.random.randn(pl,ql) for l in range(L)]
c_list = [np.random.randn(ql) for l in range(L)]
z_tld_list = [np.random.randn(ql) for l in range(L)]
d_list = [F_list[l].dot(z_tld_list[l])+0.1 for l in range(L)]
G = np.hstack(G_list)
z_tld = np.hstack(z_tld_list)
h = G.dot(z_tld)
H_list = [np.random.randn(ql,ql) for l in range(L)]
Q_list = [H_list[l].T.dot(H_list[l]) for l in range(L)]
# Convert problem to standard form.
def tmp(l, Q_list, c_list, F_list, d_list):
return lambda v, t: prox_qp(v, t, Q_list[l], c_list[l], F_list[l], d_list[l])
# Use "map" method to avoid implicit overriding, which would make all the proximal operators the same
prox_list = list(map(lambda l: tmp(l, Q_list, c_list, F_list, d_list), range(L)))
A_list = G_list
b = h
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result, figname)
# Check solution correctness.
a2dr_z = a2dr_result['x_vals']
a2dr_obj = np.sum([a2dr_z[l].dot(Q_list[l]).dot(a2dr_z[l])
+ c_list[l].dot(a2dr_z[l]) for l in range(L)])
a2dr_constr_vio = [np.linalg.norm(np.maximum(F_list[l].dot(a2dr_z[l])-d_list[l],0))**2
for l in range(L)]
a2dr_constr_vio += [np.linalg.norm(G.dot(np.hstack(a2dr_z))-h)**2]
a2dr_constr_vio_val = np.sqrt(np.sum(a2dr_constr_vio))
print('objective value of A2DR = {}'.format(a2dr_obj))
print('constraint violation of A2DR = {}'.format(a2dr_constr_vio_val))
def test_commodity_flow(self, figname):
# Problem data.
p = 4000 # Number of sources.
q = 7000 # Number of flows.
# Construct a random incidence matrix.
B = sparse.lil_matrix((p,q))
for i in range(q-p+1):
idxs = np.random.choice(p, size=2, replace=False)
tmp = np.random.rand()
if tmp > 0.5:
B[idxs[0],i] = 1
B[idxs[1],i] = -1
else:
B[idxs[0],i] = -1
B[idxs[1],i] = 1
for j in range(q-p+1,q):
B[j-(q-p+1),j] = 1
B[j-(q-p),j] = -1
B = sparse.csr_matrix(B)
# Generate source and flow range data
s_tilde = np.random.randn(p)
p1, p2, p3 = int(p/3), int(p/3*2), int(p/6*5)
s_tilde[:p1] = 0
s_tilde[p1:p2] = -np.abs(s_tilde[p1:p2])
s_tilde[p2:] = np.sum(np.abs(s_tilde[p1:p2])) / (p-p2)
L = s_tilde[p1:p2]
s_max = np.hstack([s_tilde[p2:p3]+0.001, 2*(s_tilde[p3:]+0.001)])
res = sparse.linalg.lsqr(B, -s_tilde, atol=1e-16, btol=1e-16)
z_tilde = res[0]
q1 = int(q/2)
z_max = np.abs(z_tilde)+0.001
z_max[q1:] = 2*z_max[q1:]
# Generate cost coefficients
c = np.random.rand(q)
d = np.random.rand(p)
# Solve by CVXPY
z = Variable(q)
s = Variable(p)
C = sparse.diags(c)
D = sparse.diags(d)
obj = quad_form(z, C) + quad_form(s, D)
constr = [-z_max<=z, z<=z_max, s[:p1]==0, s[p1:p2]==L, 0<=s[p2:], s[p2:]<=s_max, B*z+s==0]
prob = Problem(Minimize(obj), constr)
prob.solve(solver='SCS', verbose=True) # 'OSQP'
cvxpy_z = z.value
cvxpy_s = s.value
# Convert problem to standard form.
# f_1(z) = \sum_j c_j*z_j^2 + I(-z_max <= z_j <= z_max),
# f_2(s) = \sum_i d_i*s_i^(source)^2 + I(0 <= s_i^(source) <= s_max)
# + \sum_{i'} I(s_{i'}^{transfer}=0) + \sum_{i''}
# + \sum_{i"} I(s_{i"}^{sink}=L_{i"}).
# A = [B, I], b = 0
zeros = np.zeros(p1)
def prox_sat(v, t, c, v_lo = -np.inf, v_hi = np.inf):
return prox_box_constr(prox_sum_squares(v, t*c), t, v_lo, v_hi)
prox_list = [lambda v, t: prox_sat(v, t, c, -z_max, z_max),
lambda v, t: np.hstack([zeros, L, prox_sat(v[p2:], t, d[p2:], 0, s_max)])]
A_list = [B, sparse.eye(p)]
b = np.zeros(p)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=2*self.MAX_ITER)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=2*self.MAX_ITER)
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result, figname)
# Check solution correctness
a2dr_z = a2dr_result['x_vals'][0]
a2dr_s = a2dr_result['x_vals'][1]
cvxpy_obj_raw = np.sum(c*cvxpy_z**2) + np.sum(d*cvxpy_s**2)
a2dr_obj = np.sum(c*a2dr_z**2) + np.sum(d*a2dr_s**2)
cvxpy_constr_vio = [np.maximum(np.abs(cvxpy_z) - z_max, 0),
cvxpy_s[:p1],
np.abs(cvxpy_s[p1:p2]-L),
np.maximum(-cvxpy_s[p2:],0),
np.maximum(cvxpy_s[p2:]-s_max,0),
B.dot(cvxpy_z)+cvxpy_s]
cvxpy_constr_vio_val = np.linalg.norm(np.hstack(cvxpy_constr_vio))
a2dr_constr_vio = [np.maximum(np.abs(a2dr_z) - z_max, 0),
a2dr_s[:p1],
np.abs(a2dr_s[p1:p2]-L),
np.maximum(-a2dr_s[p2:],0),
np.maximum(a2dr_s[p2:]-s_max,0),
B.dot(a2dr_z)+a2dr_s]
a2dr_constr_vio_val = np.linalg.norm(np.hstack(a2dr_constr_vio))
print('objective cvxpy raw = {}, objective a2dr = {}'.format(cvxpy_obj_raw, a2dr_obj))
print('constraint violation cvxpy = {}, constraint violation a2dr = {}'.format(
cvxpy_constr_vio_val, a2dr_constr_vio_val))
def test_multi_task_logistic(self, figname):
# minimize \sum_{il} log(1 + exp(-Y_{il}*Z_{il})) + \alpha*||\theta||_{2,1} + \beta*||\theta||_*
# subject to Z = W\theta, ||.||_{2,1} = group lasso, ||.||_* = nuclear norm.
# Problem data.
L = 10 # Number of tasks.
s = 500 # Number of features.
p = 300 # Number of samples.
alpha = 0.1
beta = 0.1
W = np.random.randn(p,s)
theta_true = np.random.randn(s,L)
Z_true = W.dot(theta_true)
Y = 2*(Z_true > 0) - 1 # Y_{ij} = 1 or -1.
def calc_obj(theta):
obj = np.sum(-np.log(sp.special.expit(np.multiply(Y, W.dot(theta)))))
reg = alpha*np.sum([LA.norm(theta[:,l], 2) for l in range(L)])
reg += beta*LA.norm(theta, ord='nuc')
return obj + reg
# Convert problem to standard form.
# f_1(Z) = \sum_{il} log(1 + exp(-Y_{il}*Z_{il})),
# f_2(\theta) = \alpha*||\theta||_{2,1},
# f_3(\tilde \theta) = \beta*||\tilde \theta||_*.
# A_1 = [I; 0], A_2 = [-W; I], A_3 = [0; -I], b = 0.
prox_list = [lambda v, t: prox_logistic(v, t, y = Y.ravel(order='F')),
# TODO: Calculate in parallel for l = 1,...L.
lambda v, t: prox_group_lasso(v.reshape((s,L), order='F'), t*alpha).ravel(order='F'),
lambda v, t: prox_norm_nuc(v.reshape((s,L), order='F'), t*beta).ravel(order='F')]
A_list = [sparse.vstack([sparse.eye(p*L), sparse.csr_matrix((s*L,p*L))]),
sparse.vstack([-sparse.block_diag(L*[W]), sparse.eye(s*L)]),
sparse.vstack([sparse.csr_matrix((p*L,s*L)), -sparse.eye(s*L)])]
b = np.zeros(p*L + s*L)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
a2dr_theta = a2dr_result["x_vals"][-1].reshape((s,L), order='F')
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result, figname)
| 20,198 | 43.393407 | 132 | py |
a2dr | a2dr-master/a2dr/acceleration.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy.sparse as sp
def aa_weights(Y, g, reg = 0, type = "lstsq", *args, **kwargs):
""" Solve the constrained least-squares problem
Minimize sum_squares(\\sum_{j=0}^m w_j * G^(k-m+j))
subject to \\sum_{j=0}^m w_j = 1.
with respect to w \\in \\reals^{m+1}.
This can be transformed via a change of variables
w_0 = c_0, w_j = c_j - c_{j-1} for j = 1,...,m-1, and w_m = 1 - c_{m-1}
into the unconstrained problem
Minimize sum_squares(g - Y*c)
with respect to c \\in \\reals^m, where g_i = G^(i) and Y_k = [y_{k-m},...,y_{k-1}]
for y_i = g_{i+1} - g_i.
We add a regularization term for stability, so the final problem we solve is
Minimize sum_squares(g - Y*c) + \\lambda*sum_squares(c)
and return w as defined above.
"""
if type == "lstsq":
if reg != 0:
m = Y.shape[1]
Y = np.vstack([Y, np.sqrt(reg)*np.eye(m)])
g = np.concatenate([g, np.zeros(m)])
gamma = np.linalg.lstsq(Y, g, *args, **kwargs)[0]
elif type == "lsqr":
if reg != 0:
m = Y.shape[1]
Y = sp.csc_matrix(Y)
Y = sp.vstack([Y, np.sqrt(reg)*sp.eye(m)])
g = np.concatenate([g, np.zeros(m)])
gamma = sp.linalg.lsqr(Y, g, *args, **kwargs)[0]
else:
raise ValueError("Algorithm type not supported:", type)
gamma_diff = np.diff(gamma, n=1)
alpha = np.concatenate(([gamma[0]], gamma_diff, [1-gamma[-1]]))
return alpha
| 2,012 | 33.706897 | 84 | py |
a2dr | a2dr-master/a2dr/utilities.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import codecs
import os.path
# code for single sourcing versions
# reference: https://packaging.python.org/guides/single-sourcing-package-version/
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
def solve_vec(x, var_type='vec'):
# reshape to vector format for input to a2dr
if var_type == 'vec':
return x, x.shape
elif var_type == 'mat_C':
return x.ravel(order='C'), x.shape
elif var_type == 'mat_F':
return x.ravel(order='F'), x.shape
elif var_type == 'mat_symm':
# lower triangular part, row-wise stacking
if x.shape[0] != x.shape[1] or np.linalg.norm(x-x.T) != 0:
raise ValueError("input must be square and symmetric")
mask = np.ones(Q.shape, dtype=bool)
np.fill_diagonal(mask, 0)
x[mask] *= np.sqrt(2)
ind = np.tril_indices(x.shape[0])
return x[ind], x.shape
else:
raise ValueError("var_type = must be vec, mat_C, mat_F or mat_symm")
def solve_mat(x, shape=None, var_type='vec'):
# reshape back to the original format after running a2dr
if var_type == 'vec':
return x
elif var_type == 'mat_C':
if shape == None:
raise ValueError("shape must be provided for var_type = mat_C")
return x.reshape(shape, order='C')
elif var_type == 'mat_F':
if shape == None:
raise ValueError("shape must be provided for var_type = mat_F")
return x.reshape(shape, order='F')
elif var_type == 'mat_symm':
# lower triangular part, row-wise stacking
if x.shape[0] != x.shape[1] or np.linalg.norm(x-x.T) != 0:
raise ValueError("input must be square and symmetric")
if shape == None:
raise ValueError("shape must be provided for var_type = mat_symm")
ind = np.tril_indices(shape[0])
ind_u = np.triu_indices(shape[0])
y = np.zeros(shape)
y[ind] = x
y[ind_u] = y.T[ind_u]
return y
else:
raise ValueError("var_type = must be vec, mat_C, mat_F or mat_symm")
| 2,883 | 32.929412 | 81 | py |
a2dr | a2dr-master/a2dr/__init__.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
from a2dr.solver import a2dr
__version__ = "0.2.3.post3"
| 719 | 30.304348 | 68 | py |
a2dr | a2dr-master/a2dr/solver.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import numpy.linalg as LA
import scipy.sparse as sp
from scipy.stats.mstats import gmean
from time import time
from multiprocessing import Process, Pipe
import sys, os, warnings
from a2dr.precondition import precondition
from a2dr.acceleration import aa_weights
from a2dr.utilities import get_version
sys_stdout_origin = sys.stdout
def a2dr_worker(pipe, prox, v_init, A, t, anderson, m_accel):
# Initialize AA-II parameters.
if anderson: # TODO: Store and update these efficiently as arrays.
F_hist = [] # History of F(v^(k)).
v_vec = v_init.copy()
v_res = np.zeros(v_init.shape[0])
# A2DR loop.
while True:
# Proximal step for x^(k+1/2).
warnings.filterwarnings("ignore")
sys.stdout = open(os.devnull, 'w')
x_half = prox(v_vec, t)
sys.stdout.close()
sys.stdout = sys_stdout_origin
warnings.filterwarnings("default")
# Calculate v^(k+1/2) = 2*x^(k+1/2) - v^(k).
v_half = 2*x_half - v_vec
# Project to obtain x^(k+1) = v^(k+1/2) - A^T(AA^T)^{-1}(Av^(k+1/2) - b).
pipe.send(v_half)
dk, k = pipe.recv() # dk = A^\dagger(Av^(k+1/2) - b)[i] for node i.
x_new = v_half - dk
if anderson and k > 0: # for k = 0, always do the vanilla DRS update
m_k = min(m_accel, k) # Keep F(v^(j)) for iterations (k-m_k) through k.
# Save history of F(v^(k)).
F_hist.append(v_vec + x_new - x_half)
if len(F_hist) > m_k + 1:
F_hist.pop(0)
# Send s^(k-1) = v^(k) - v^(k-1) and g^(k) = v^(k) - F(v^(k)) = x^(k+1/2) - x^(k+1).
pipe.send((v_res, x_half - x_new))
# Receive safeguarding decision.
AA_update = pipe.recv()
if AA_update:
# Receive AA-II weights for v^(k+1).
alpha = pipe.recv()
# Weighted update of v^(k+1).
v_new = np.column_stack(F_hist).dot(alpha) #.dot(alpha[:(k + 1)]) ### Why truncate to (k+1)???
else:
# Revert to DRS update of v^(k+1).
v_new = v_vec + x_new - x_half
# Save v^(k+1) - v^(k) for next iteration.
v_res = v_new - v_vec
elif anderson and k == 0:
# Update v^(k+1) = v^(k) + x^(k+1) - x^(k+1/2).
v_new = v_vec + x_new - x_half
## only useful when anderson = True but k == 0
# Store v_res in case anderson = True
v_res = v_new - v_vec
# Update F_hist in case anderson = True
F_hist.append(v_vec + x_new - x_half)
# Send g^(k) = v^(k) - F(v^(k)) = x^(k+1/2) - x^(k+1).
pipe.send(x_half - x_new)
else:
# Update v^(k+1) = v^(k) + x^(k+1) - x^(k+1/2).
v_new = v_vec + x_new - x_half
# Send x^(k+1/2) along with A*x^(k+1/2) and x^(k+1/2) - v^(k) for computing residuals.
Ax_half = A.dot(x_half)
xv_diff = x_half - v_vec
pipe.send((x_half, Ax_half, xv_diff))
v_vec = v_new
def a2dr(p_list, A_list = [], b = np.array([]), v_init = None, n_list = None, *args, **kwargs):
start = time()
# Problem parameters.
max_iter = kwargs.pop("max_iter", 1000)
t_init = kwargs.pop("t_init", 1/10) # Step size.
eps_abs = kwargs.pop("eps_abs", 1e-6) # Absolute stopping tolerance.
eps_rel = kwargs.pop("eps_rel", 1e-8) # Relative stopping tolerance.
precond = kwargs.pop("precond", True) # Precondition A and b?
ada_reg = kwargs.pop("ada_reg", True) # Adaptive regularization?
# AA-II parameters.
anderson = kwargs.pop("anderson", True)
m_accel = int(kwargs.pop("m_accel", 10)) # Maximum past iterations to keep (>= 0).
lam_accel = kwargs.pop("lam_accel", 1e-8) # AA-II regularization weight.
aa_method = kwargs.pop("aa_method", "lstsq") # Algorithm for solving AA LS problem.
# Safeguarding parameters.
D_safe = kwargs.pop("D_safe", 1e6)
eps_safe = kwargs.pop("eps_safe", 1e-6)
M_safe = kwargs.pop("M_safe", int(max_iter/100))
# Printout parameters
verbose = kwargs.pop("verbose", True)
# Validate parameters.
if max_iter <= 0:
raise ValueError("max_iter must be a positive integer.")
if t_init <= 0:
raise ValueError("t_init must be a positive scalar.")
if eps_abs < 0:
raise ValueError("eps_abs must be a non-negative scalar.")
if eps_rel < 0:
raise ValueError("eps_rel must be a non-negative scalar.")
if m_accel <= 0:
raise ValueError("m_accel must be a positive integer.")
if lam_accel < 0:
raise ValueError("lam_accel must be a non-negative scalar.")
if not aa_method in ["lstsq", "lsqr"]:
raise ValueError("aa_method must be either 'lstsq' or 'lsqr'.")
if D_safe < 0:
raise ValueError("D_safe must be a non-negative scalar.")
if eps_safe < 0:
raise ValueError("eps_safe must be a non-negative scalar.")
if M_safe <= 0:
raise ValueError("M_safe must be a positive integer.")
# DRS parameters.
N = len(p_list) # Number of subproblems.
has_constr = len(A_list) != 0
if len(A_list) == 0:
if b.size != 0:
raise ValueError("Dimension mismatch: nrow(A_i) != nrow(b)")
if n_list is not None:
if len(n_list) != N:
raise ValueError("n_list must have exactly {} entries".format(N))
A_list = [sp.csr_matrix((0, ni)) for ni in n_list]
elif v_init is not None:
if len(v_init) != N:
raise ValueError("v_init must be None or contain exactly {} entries".format(N))
A_list = [sp.csr_matrix((0, vi.shape[0])) for vi in v_init]
else:
raise ValueError("n_list or v_init must be defined if A_list and b are empty")
if len(A_list) != N:
raise ValueError("A_list must be empty or contain exactly {} entries".format(N))
if v_init is None:
# v_init = [np.random.randn(A.shape[1]) for A in A_list]
v_init = [np.zeros(A.shape[1]) for A in A_list]
# v_init = [sp.csc_matrix((A.shape[1],1)) for A in A_list]
if len(v_init) != N:
raise ValueError("v_init must be None or contain exactly {} entries".format(N))
# Variable size list.
if n_list is None:
n_list = [A_list[i].shape[1] for i in range(N)]
if len(n_list) != N:
raise ValueError("n_list must be None or contain exactly {} entries".format(N))
n_list_cumsum = np.insert(np.cumsum(n_list), 0, 0)
for i in range(N):
if A_list[i].shape[0] != b.shape[0]:
raise ValueError("Dimension mismatch: nrow(A_i) != nrow(b)")
elif A_list[i].shape[1] != v_init[i].shape[0]:
raise ValueError("Dimension mismatch: ncol(A_i) != nrow(v_i)")
elif A_list[i].shape[1] != n_list[i]:
raise ValueError("Dimension mismatch: ncol(A_i) != n_i")
if not sp.issparse(A_list[i]):
A_list[i] = sp.csr_matrix(A_list[i])
if verbose:
version = get_version("__init__.py")
line_solver = "a2dr v" + version + " - Prox-Affine Distributed Convex Optimization Solver"
dashes = "-" * len(line_solver)
ddashes = "=" * len(line_solver)
line_authors = "(c) Anqi Fu, Junzi Zhang"
num_spaces_authors = (len(line_solver) - len(line_authors)) // 2
line_affil = "Stanford University 2019"
num_spaces_affil = (len(line_solver) - len(line_affil)) // 2
print(dashes)
print(line_solver)
print(" " * num_spaces_authors + line_authors)
print(" " * num_spaces_affil + line_affil)
print(dashes)
# Precondition data.
if precond and has_constr:
if verbose:
print('### Preconditioning starts ... ###')
p_list, A_list, b, e_pre = precondition(p_list, A_list, b)
t_init = 1/gmean(e_pre)**2/10
if verbose:
print('### Preconditioning finished. ###')
if verbose:
print("max_iter = {}, t_init (after preconditioning) = {:.2f}".format(
max_iter, t_init))
print("eps_abs = {:.2e}, eps_rel = {:.2e}, precond = {!r}".format(
eps_abs, eps_rel, precond))
print("ada_reg = {!r}, anderson = {!r}, m_accel = {}".format(
ada_reg, anderson, m_accel))
print("lam_accel = {:.2e}, aa_method = {}, D_safe = {:.2e}".format(
lam_accel, aa_method, D_safe))
print("eps_safe = {:.2e}, M_safe = {:d}".format(
eps_safe, M_safe))
# Store constraint matrix for projection step.
A = sp.csr_matrix(sp.hstack(A_list))
if verbose:
print("variables n = {}, constraints m = {}".format(A.shape[1], A.shape[0]))
print("nnz(A) = {}".format(A.nnz))
print("Setup time: {:.2e}".format(time() - start))
# Check linear feasibility
sys.stdout = open(os.devnull, 'w')
r1norm = sp.linalg.lsqr(A, b)[3]
sys.stdout.close()
sys.stdout = sys_stdout_origin
if r1norm >= np.sqrt(eps_abs): # infeasible
if verbose:
print('Infeasible linear equality constraint: minimum constraint violation = {:.2e}'.format(r1norm))
print('Status: Terminated due to linear infeasibility')
print("Solve time: {:.2e}".format(time() - start))
return {"x_vals": None, "primal": None, "dual": None, "num_iters": None, "solve_time": None}
if verbose:
print("----------------------------------------------------")
print(" iter | total res | primal res | dual res | time (s)")
print("----------------------------------------------------")
# Set up the workers.
pipes = []
procs = []
for i in range(N):
local, remote = Pipe()
pipes += [local]
procs += [Process(target=a2dr_worker, args=(remote, p_list[i], v_init[i], A_list[i], \
t_init, anderson, m_accel) + args)]
procs[-1].start()
# Initialize AA-II variables.
if anderson: # TODO: Store and update these efficiently as arrays.
n_sum = np.sum([np.prod(v.shape) for v in v_init])
g_vec = np.zeros(n_sum) # g^(k) = v^(k) - F(v^(k)).
s_hist = [] # History of s^(j) = v^(j+1) - v^(j), kept in S^(k) = [s^(k-m_k) ... s^(k-1)].
y_hist = [] # History of y^(j) = g^(j+1) - g^(j), kept in Y^(k) = [y^(k-m_k) ... y^(k-1)].
n_AA = M_AA = 0 # Safeguarding counters.
# A2DR loop.
k = 0
finished = False
safeguard = True
r_primal = np.zeros(max_iter)
r_dual = np.zeros(max_iter)
r_best = np.inf
# Warm start terms.
dk = np.zeros(A.shape[1])
sol = np.zeros(A.shape[0])
while not finished:
# Gather v_i^(k+1/2) from nodes.
v_halves = [pipe.recv() for pipe in pipes]
# Projection step for x^(k+1).
v_half = np.concatenate(v_halves, axis=0)
sys.stdout = open(os.devnull, 'w')
dk = sp.linalg.lsqr(A, A.dot(v_half) - b, atol=1e-10, btol=1e-10, x0=dk)[0]
sys.stdout.close()
sys.stdout = sys_stdout_origin
# Scatter d^k = A^\dagger(Av^(k+1/2) - b).
for i in range(N):
pipes[i].send((dk[n_list_cumsum[i]:n_list_cumsum[i+1]], k))
if anderson and k > 0: # for k = 0, always do the vanilla DRS update
m_k = min(m_accel, k) # Keep (y^(j), s^(j)) for iterations (k-m_k) through (k-1).
# Gather s_i^(k-1) and g_i^(k) from nodes.
sg_update = [pipe.recv() for pipe in pipes]
s_new, g_new = map(list, zip(*sg_update))
s_new = np.concatenate(s_new, axis=0) # s_i^(k-1) = v_i^(k) - v_i^(k-1).
g_new = np.concatenate(g_new, axis=0) # g_i^(k) = v_i^(k) - F(v_i^(k)) = x_i^(k+1/2) - x_i^(k+1).
# Save newest column y^(k-1) = g^(k) - g^(k-1) of matrix Y^(k).
y_hist.append(g_new - g_vec)
if len(y_hist) > m_k:
y_hist.pop(0)
g_vec = g_new
# Save newest column s^(k-1) = v^(k) - v^(k-1) of matrix S^(k).
s_hist.append(s_new)
if len(s_hist) > m_k:
s_hist.pop(0)
# Safeguard update.
if safeguard or M_AA >= M_safe:
if LA.norm(g_vec) <= D_safe*g0_norm*(n_AA/M_safe + 1)**(-(1 + eps_safe)):
AA_update = True
n_AA = n_AA + 1
M_AA = 1
safeguard = False
else:
AA_update = False
M_AA = 0
safeguard = True
else:
AA_update = True
M_AA = M_AA + 1
n_AA = n_AA + 1
# Scatter safeguarding decision.
for pipe in pipes:
pipe.send(AA_update)
if AA_update:
# Compute and scatter AA-II weights.
Y_mat = np.column_stack(y_hist)
S_mat = np.column_stack(s_hist)
if ada_reg:
reg = lam_accel * (LA.norm(Y_mat)**2 + LA.norm(S_mat)**2) # AA-II regularization.
else:
reg = lam_accel
alpha = aa_weights(Y_mat, g_new, reg, type=aa_method, rcond=None)
for pipe in pipes:
pipe.send(alpha)
elif anderson and k == 0:
AA_update = False # Initial step is always DRS.
g_new = [pipe.recv() for pipe in pipes]
g_vec = np.concatenate(g_new, axis=0)
g0_norm = LA.norm(g_vec)
# Compute l2-norm of primal and dual residuals.
r_update = [pipe.recv() for pipe in pipes]
x_halves, Ax_halves, xv_diffs = map(list, zip(*r_update))
r_primal_vec = sum(Ax_halves) - b
r_primal[k] = LA.norm(r_primal_vec, ord=2)
subgrad = np.concatenate(xv_diffs)/t_init
# sol = LA.lstsq(A.T, subgrad, rcond=None)[0]
sys.stdout = open(os.devnull, 'w')
sol = sp.linalg.lsqr(A.T, subgrad, atol=1e-10, btol=1e-10, x0=sol)[0]
sys.stdout.close()
sys.stdout = sys_stdout_origin
r_dual_vec = A.T.dot(sol) - subgrad
r_dual[k] = LA.norm(r_dual_vec, ord=2)
# Save x_i^(k+1/2) if residual norm is smallest so far.
r_all = LA.norm(np.concatenate([r_primal_vec, r_dual_vec]), ord=2)
if k == 0: # Store ||r^0||_2 for stopping criterion.
r_all_0 = r_all
if k == 0 or r_all < r_best:
x_final = x_halves
r_best = r_all
k_best = k
if (k % 100 == 0 or k == max_iter-1) and verbose:
# print every 100 iterations or reaching maximum
print("{}| {} {} {} {}".format(str(k).rjust(6),
format(r_all, ".2e").ljust(10),
format(r_primal[k], ".2e").ljust(11),
format(r_dual[k], ".2e").ljust(9),
format(time() - start, ".2e").ljust(8)))
# Stop when residual norm falls below tolerance.
k = k + 1
finished = k >= max_iter or (r_all <= eps_abs + eps_rel * r_all_0)
if r_all <= eps_abs + eps_rel * r_all_0 and k % 100 != 0 and verbose:
# print the best iterate
print("{}| {} {} {} {}".format(str(k-1).rjust(6),
format(r_all, ".2e").ljust(10),
format(r_primal[k-1], ".2e").ljust(11),
format(r_dual[k-1], ".2e").ljust(9),
format(time() - start, ".2e").ljust(8)))
# Unscale and return x_i^(k+1/2).
[p.terminate() for p in procs]
if precond and has_constr:
x_final = [ei*x for x, ei in zip(x_final, e_pre)]
end = time()
if verbose:
print("----------------------------------------------------")
if k < max_iter:
print("Status: Solved")
else:
print("Status: Reach maximum iterations")
print("Solve time: {:.2e}".format(end - start))
print("Total number of iterations: {}".format(k))
print("Best total residual: {:.2e}; reached at iteration {}".format(r_best, k_best))
print(ddashes)
return {"x_vals": x_final, "primal": np.array(r_primal[:k]), "dual": np.array(r_dual[:k]), \
"num_iters": k, "solve_time": (end - start)}
| 17,229 | 40.518072 | 112 | py |
a2dr | a2dr-master/a2dr/precondition.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from scipy import sparse
from scipy.sparse import block_diag, issparse, csr_matrix, csc_matrix, diags
from scipy.sparse.linalg import norm
import scipy.linalg as sLA
from scipy.stats.mstats import gmean
def precondition(p_list, A_list, b, tol = 1e-3, max_iter = 1000):
# print('### Preconditioning starts ...')
if all([Ai.size == 0 for Ai in A_list]):
return p_list, A_list, b, np.ones(len(A_list))
n_list = [A.shape[1] for A in A_list]
sparse_check = [sparse.issparse(A) for A in A_list]
# Enforce csr format for better matrix operation efficiency.
if np.sum(sparse_check) == 0: # all dense
A = csr_matrix(np.hstack(A_list))
elif np.prod(sparse_check) == 1: # all sparse
A = csr_matrix(sparse.hstack(A_list))
else:
A_list_csr = [csr_matrix(A) for A in A_list]
A = csr_matrix(sparse.hstack(A_list))
d, e, A_hat, k = mat_equil(A, n_list, tol, max_iter)
split_idx = np.cumsum(n_list)
split_idx = np.hstack([0, split_idx])
A_hat = csc_matrix(A_hat) # faster column slicing
A_eq_list = [A_hat[:,split_idx[i]:split_idx[i+1]] for i in range(len(n_list))]
A_eq_list = [csr_matrix(A_eq_list[i]) for i in range(len(A_eq_list))] # change back to csr format
# Note: We must do it this way because the standard pythonic list comprehension, i.e., [f(x) for x in iterable]
# will create *duplicate* function handles, leading to incorrect results! This is due to late binding:
# https://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
# https://docs.python-guide.org/writing/gotchas/#late-binding-closures
def proto(i, p_list, e):
return lambda v, t: p_list[i](e[i]*v, t*e[i]**2)/e[i]
p_eq_list = list(map(lambda i: proto(i,p_list,e), range(len(p_list))))
return p_eq_list, A_eq_list, d*b, e
def mat_equil(A, n_list, tol, max_iter):
# Block matrix equilibration using regularized Sinkhorn-Knopp
# Reference: [POGS] http://stanford.edu/~boyd/papers/pdf/pogs.pdf
'''
1. Input
A: a numpy 2d-array or matrix of size m-by-n to be equilibrated
n_list: a list of size N containing size n_i of each variable block i (n_1+...+n_N = n)
tol: tolerance for termination
max_iter: maximum number of iterations
2. Output
d: left scaling vector (of size m)
e: right scaling vector (of size N)
B: equilibrated matrix B = diag(d) A diag(e_1I_{n_1},...,e_NI_{n_N})
k: number of iterations terminated
3. Requirement:
import numpy as np
from scipy.linalg import block_diag
'''
N = len(n_list)
m = A.shape[0]
em, eN = np.ones(m), np.ones(N)
d, e = em, eN
# Form the size m-by-N matrix A_block, whose (i,j)-th entry is \sum_{k=n_1+...+n_{j-1}}}^{n_1+...+n_j} A_{ik}^2
gamma = (m + N)/m/N * np.sqrt(np.finfo(float).eps)
A2 = A.power(2) if issparse(A) else np.power(A,2)
ave_list = [np.ones([n_i,1]) for n_i in n_list]
A_block = A2.dot(block_diag(ave_list))
A_block_T = A_block.transpose()
# print('Block matrix shape = {}'.format(A_block.shape))
# print('gamma={}'.format(gamma))
# Apply regularized Sinkhorn-Knopp on A_block
for k in range(max_iter):
d1 = N / (A_block.dot(e) + N * gamma * em)
e1 = m / (A_block_T.dot(d1) + m * gamma * eN)
err_d = np.linalg.norm(d1 - d)
err_e = np.linalg.norm(e1 - e)
d = d1
e = e1
# print('k={}, err_d={}, err_e={}'.format(k, err_d/np.sqrt(m), err_e/np.sqrt(N)))
if err_d/np.sqrt(m) <= tol and err_e/np.sqrt(N) <= tol:
break
d = np.sqrt(d)
e = np.sqrt(e)
I_list = [sparse.eye(n_list[i]) * e[i] for i in range(N)]
E = csr_matrix(block_diag(I_list))
D = csr_matrix(diags(d))
# print('generate D, E')
B = D.dot(csr_matrix(A).dot(E))
# print('compute scaled matrix')
# Rescale to have \|DAE\|_2 close to 1
scale = norm(B, 'fro') / np.sqrt(np.min([m,N]))
d_mean = gmean(d)
e_mean = gmean(e)
q = np.log(d_mean / e_mean * scale) / 2 / np.log(scale)
d = d * (scale ** (-q))
e = e * (scale ** (q-1))
B = B / scale
return d, e, B, k
| 4,872 | 37.984 | 115 | py |
a2dr | a2dr-master/a2dr/tests/test_proximal.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from scipy import sparse
from cvxpy import *
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestProximal(BaseTest):
"""Unit tests for proximal operators"""
def setUp(self):
np.random.seed(1)
self.TOLERANCE = 1e-6
self.SCS_TOLERANCE = 1e-6 #1e-8
self.SCS_MAXITER = 10000
self.t = 5*np.abs(np.random.randn()) + self.TOLERANCE
self.c = np.random.randn()
self.v = np.random.randn(100)
self.v_small = np.random.randn(10)
self.B = np.random.randn(50,10)
self.B_small = np.random.randn(10,5)
self.B_square = np.random.randn(10,10)
self.B_symm = np.random.randn(10,10)
self.B_symm = (self.B_symm + self.B_symm.T) / 2.0
self.B_psd = np.random.randn(10,10)
self.B_psd = self.B_psd.T.dot(self.B_psd)
self.u_sparse = sparse.random(100,1)
self.u_dense = self.u_sparse.todense()
self.C_sparse = sparse.random(50,10)
self.C_dense = self.C_sparse.todense()
self.C_square_sparse = sparse.random(50,50)
self.C_square_dense = self.C_square_sparse.todense()
def prox_cvxpy(self, v, fun, constr_fun = None, t = 1, scale = 1, offset = 0, lin_term = 0, quad_term = 0, *args, **kwargs):
x_var = Variable() if np.isscalar(v) else Variable(v.shape)
expr = t * fun(scale * x_var - offset) + sum(multiply(lin_term, x_var)) + quad_term * sum_squares(x_var)
constrs = [] if constr_fun is None else constr_fun(scale * x_var - offset)
prob = Problem(Minimize(expr + 0.5 * sum_squares(x_var - v)), constrs)
prob.solve(*args, **kwargs)
return x_var.value
def check_composition(self, prox, fun, v_init, places = 3, symm=False, *args, **kwargs):
x_a2dr = prox(v_init)
x_cvxpy = self.prox_cvxpy(v_init, fun, *args, **kwargs)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = places)
x_a2dr = prox(v_init, t = self.t)
x_cvxpy = self.prox_cvxpy(v_init, fun, t = self.t, *args, **kwargs)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = places)
x_a2dr = prox(v_init, scale = -1)
x_cvxpy = self.prox_cvxpy(v_init, fun, scale = -1, *args, **kwargs)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = places)
x_a2dr = prox(v_init, scale = 2, offset = 0.5)
x_cvxpy = self.prox_cvxpy(v_init, fun, scale = 2, offset = 0.5, *args, **kwargs)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = places)
x_a2dr = prox(v_init, t = self.t, scale = 2, offset = 0.5, lin_term = 1.5, quad_term = 2.5)
x_cvxpy = self.prox_cvxpy(v_init, fun, t = self.t, scale = 2, offset = 0.5, lin_term = 1.5, quad_term = 2.5,
*args, **kwargs)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = places)
if np.isscalar(v_init):
offset = np.random.randn()
lin_term = np.random.randn()
else:
offset = np.random.randn(*v_init.shape)
lin_term = np.random.randn(*v_init.shape)
if symm:
# symmetrization: useful for -logdet, etc.
offset = (offset + offset.T)/2
lin_term = (lin_term + lin_term.T)/2
x_a2dr = prox(v_init, t = self.t, scale = 0.5, offset = offset, lin_term = lin_term, quad_term = 2.5)
x_cvxpy = self.prox_cvxpy(v_init, fun, t = self.t, scale = 0.5, offset = offset, lin_term = lin_term,
quad_term = 2.5, *args, **kwargs)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = places)
def check_elementwise(self, prox, places = 4):
# Vector input.
x_vec1 = prox(self.v_small)
x_vec2 = np.array([prox(self.v_small[i]) for i in range(self.v_small.shape[0])])
self.assertItemsAlmostEqual(x_vec1, x_vec2, places = places)
x_vec1 = prox(self.v_small, t = self.t)
x_vec2 = np.array([prox(self.v_small[i], t = self.t) for i in range(self.v_small.shape[0])])
self.assertItemsAlmostEqual(x_vec1, x_vec2, places = places)
offset = np.random.randn(*self.v_small.shape)
lin_term = np.random.randn(*self.v_small.shape)
x_vec1 = prox(self.v_small, t = self.t, scale = 0.5, offset = offset, lin_term = lin_term, quad_term = 2.5)
x_vec2 = np.array([prox(self.v_small[i], t = self.t, scale = 0.5, offset = offset[i], lin_term = lin_term[i], \
quad_term = 2.5) for i in range(self.v_small.shape[0])])
self.assertItemsAlmostEqual(x_vec1, x_vec2, places = places)
# Matrix input.
x_mat1 = prox(self.B_small)
x_mat2 = [[prox(self.B_small[i,j]) for j in range(self.B_small.shape[1])] for i in range(self.B_small.shape[0])]
x_mat2 = np.array(x_mat2)
self.assertItemsAlmostEqual(x_mat1, x_mat2, places = places)
x_mat1 = prox(self.B_small, t = self.t)
x_mat2 = [[prox(self.B_small[i,j], t = self.t) for j in range(self.B_small.shape[1])] \
for i in range(self.B_small.shape[0])]
x_mat2 = np.array(x_mat2)
self.assertItemsAlmostEqual(x_mat1, x_mat2, places = places)
offset = np.random.randn(*self.B_small.shape)
lin_term = np.random.randn(*self.B_small.shape)
x_mat1 = prox(self.B_small, t = self.t, scale = 0.5, offset = offset, lin_term = lin_term, quad_term = 2.5)
x_mat2 = [[prox(self.B_small[i,j], t = self.t, scale = 0.5, offset = offset[i,j], lin_term = lin_term[i,j], \
quad_term = 2.5) for j in range(self.B_small.shape[1])] for i in range(self.B_small.shape[0])]
x_mat2 = np.array(x_mat2)
self.assertItemsAlmostEqual(x_mat1, x_mat2, places = places)
def check_sparsity(self, prox, places = 4, check_vector = True, check_matrix = True, matrix_type = "general"):
if check_vector:
# Vector input.
x_vec1 = prox(self.u_sparse)
x_vec2 = prox(self.u_dense)
self.assertTrue(sparse.issparse(x_vec1))
self.assertItemsAlmostEqual(x_vec1.todense(), x_vec2, places = places)
x_vec1 = prox(self.u_sparse, t=self.t)
x_vec2 = prox(self.u_dense, t=self.t)
self.assertTrue(sparse.issparse(x_vec1))
self.assertItemsAlmostEqual(x_vec1.todense(), x_vec2, places = places)
offset = sparse.random(*self.u_sparse.shape)
lin_term = sparse.random(*self.u_sparse.shape)
x_vec1 = prox(self.u_sparse, t=self.t, scale=0.5, offset=offset, lin_term=lin_term, quad_term=2.5)
x_vec2 = prox(self.u_dense, t=self.t, scale=0.5, offset=offset, lin_term=lin_term, quad_term=2.5)
self.assertTrue(sparse.issparse(x_vec1))
self.assertItemsAlmostEqual(x_vec1.todense(), x_vec2, places = places)
if check_matrix:
if matrix_type == "general":
C_sparse = self.C_sparse
C_dense = self.C_dense
elif matrix_type == "square":
C_sparse = self.C_square_sparse
C_dense = self.C_square_dense
else:
raise ValueError("matrix_type must be 'general' or 'square'")
# Matrix input.
x_mat1 = prox(C_sparse)
x_mat2 = prox(C_dense)
self.assertTrue(sparse.issparse(x_mat1))
self.assertItemsAlmostEqual(x_mat1.todense(), x_mat2, places = places)
x_mat1 = prox(C_sparse, t=self.t)
x_mat2 = prox(C_dense, t=self.t)
self.assertTrue(sparse.issparse(x_mat1))
self.assertItemsAlmostEqual(x_mat1.todense(), x_mat2, places = places)
offset = sparse.random(*C_sparse.shape)
lin_term = sparse.random(*C_sparse.shape)
x_mat1 = prox(C_sparse, t=self.t, scale=0.5, offset=offset, lin_term=lin_term, quad_term=2.5)
x_mat2 = prox(C_dense, t=self.t, scale=0.5, offset=offset, lin_term=lin_term, quad_term=2.5)
self.assertTrue(sparse.issparse(x_mat1))
self.assertItemsAlmostEqual(x_mat1.todense(), x_mat2, places = places)
def test_box_constr(self):
# Projection onto a random interval.
lo = np.random.randn()
hi = lo + 5*np.abs(np.random.randn())
x_a2dr = prox_box_constr(self.v, self.t, v_lo = lo, v_hi = hi)
self.assertTrue(np.all(lo - self.TOLERANCE <= x_a2dr) and np.all(x_a2dr <= hi + self.TOLERANCE))
# Projection onto a random interval with affine composition.
scale = 2 * np.abs(np.random.randn()) + self.TOLERANCE
if np.random.rand() < 0.5:
scale = -scale
offset = np.random.randn(*self.v.shape)
lin_term = np.random.randn(*self.v.shape)
quad_term = np.abs(np.random.randn())
x_a2dr = prox_box_constr(self.v, self.t, v_lo = lo, v_hi = hi, scale = scale, offset = offset, \
lin_term = lin_term, quad_term = quad_term)
x_scaled = scale*x_a2dr - offset
self.assertTrue(np.all(lo - self.TOLERANCE <= x_scaled) and np.all(x_scaled <= hi + self.TOLERANCE))
# Common box intervals.
bounds = [(0, 0), (-1, 1), (0, np.inf), (-np.inf, 0)]
for bound in bounds:
lo, hi = bound
# Elementwise consistency tests.
self.check_elementwise(lambda v, *args, **kwargs: prox_box_constr(v, v_lo = lo, v_hi = hi, *args, **kwargs))
# Sparsity consistency tests.
self.check_sparsity(lambda v, *args, **kwargs: prox_box_constr(v, v_lo = lo, v_hi = hi, *args, **kwargs))
# General composition tests.
self.check_composition(lambda v, *args, **kwargs: prox_box_constr(v, v_lo = lo, v_hi = hi, *args, **kwargs),
lambda x: 0, self.v, constr_fun = lambda x: [lo <= x, x <= hi])
self.check_composition(lambda v, *args, **kwargs: prox_box_constr(v, v_lo = lo, v_hi = hi, *args, **kwargs),
lambda x: 0, self.B, constr_fun = lambda x: [lo <= x, x <= hi])
# Optimal control term: f(x) = I(||x||_{\infty} <= 1) = I(-1 <= x <= 1).
x_a2dr = prox_box_constr(self.v, self.t, v_lo = -1, v_hi = 1)
x_cvxpy = self.prox_cvxpy(self.v, lambda x: 0, constr_fun = lambda x: [norm_inf(x) <= 1], t = self.t)
self.assertTrue(np.all(-1 - self.TOLERANCE <= x_a2dr) and np.all(x_a2dr <= 1 + self.TOLERANCE))
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 3)
def test_nonneg_constr(self):
x_a2dr = prox_nonneg_constr(self.v, self.t, scale = -2, offset = 0)
self.assertTrue(np.all(-2*x_a2dr >= -self.TOLERANCE))
scale = 2*np.abs(np.random.randn()) + self.TOLERANCE
if np.random.rand() < 0.5:
scale = -scale
offset = np.random.randn(*self.v.shape)
lin_term = np.random.randn(*self.v.shape)
quad_term = np.abs(np.random.randn())
x_a2dr = prox_nonneg_constr(self.v, self.t, scale = scale, offset = offset, lin_term = lin_term, \
quad_term = quad_term)
self.assertTrue(np.all(scale*x_a2dr - offset) >= -self.TOLERANCE)
# Elementwise consistency tests.
self.check_elementwise(prox_nonneg_constr)
# Sparsity consistency tests.
self.check_sparsity(prox_nonneg_constr)
# General composition tests.
self.check_composition(prox_nonneg_constr, lambda x: 0, self.v, constr_fun = lambda x: [x >= 0])
self.check_composition(prox_nonneg_constr, lambda x: 0, self.B, constr_fun = lambda x: [x >= 0])
# Non-negative least squares term: f(x) = I(x >= 0).
x_a2dr = prox_nonneg_constr(self.v, self.t)
x_cvxpy = self.prox_cvxpy(self.v, lambda x: 0, constr_fun = lambda x: [x >= 0], t = self.t)
self.assertTrue(np.all(x_a2dr >= -self.TOLERANCE))
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 3)
def test_nonpos_constr(self):
x_a2dr = prox_nonpos_constr(self.v, self.t)
self.assertTrue(np.all(x_a2dr <= self.TOLERANCE))
x_a2dr = prox_nonpos_constr(self.v, self.t, scale=-2, offset=0)
self.assertTrue(np.all(-2*x_a2dr <= self.TOLERANCE))
scale = 2 * np.abs(np.random.randn()) + self.TOLERANCE
if np.random.rand() < 0.5:
scale = -scale
offset = np.random.randn(*self.v.shape)
lin_term = np.random.randn(*self.v.shape)
quad_term = np.abs(np.random.randn())
x_a2dr = prox_nonpos_constr(self.v, self.t, scale=scale, offset=offset, lin_term=lin_term, \
quad_term=quad_term)
self.assertTrue(np.all(scale * x_a2dr - offset) <= self.TOLERANCE)
# Elementwise consistency tests.
self.check_elementwise(prox_nonpos_constr)
# Sparsity consistency tests.
self.check_sparsity(prox_nonpos_constr)
# General composition tests.
self.check_composition(prox_nonpos_constr, lambda x: 0, self.v, constr_fun=lambda x: [x <= 0])
self.check_composition(prox_nonpos_constr, lambda x: 0, self.B, constr_fun=lambda x: [x <= 0])
def test_psd_cone(self):
# Projection onto the PSD cone.
B_a2dr = prox_psd_cone(self.B_symm, self.t)
self.assertTrue(np.all(np.linalg.eigvals(B_a2dr) >= -self.TOLERANCE))
# Projection onto the PSD cone with affine composition.
scale = 2 * np.abs(np.random.randn()) + self.TOLERANCE
if np.random.rand() < 0.5:
scale = -scale
offset = np.random.randn(*self.B_symm.shape)
lin_term = np.random.randn(*self.B_symm.shape)
quad_term = np.abs(np.random.randn())
B_a2dr = prox_psd_cone(self.B_symm, self.t, scale = scale, offset = offset, lin_term = lin_term, \
quad_term = quad_term)
B_scaled = scale*B_a2dr - offset
self.assertTrue(np.all(np.linalg.eigvals(B_scaled) >= -self.TOLERANCE))
# Simple composition.
B_a2dr = prox_psd_cone(self.B_symm, t = self.t, scale = 2, offset = 0.5, lin_term = 1.5, quad_term = 2.5)
B_cvxpy = self.prox_cvxpy(self.B_symm, lambda X: 0, constr_fun = lambda X: [X >> 0], t = self.t, scale = 2, \
offset = 0.5, lin_term = 1.5, quad_term = 2.5)
self.assertItemsAlmostEqual(B_a2dr, B_cvxpy)
def test_soc(self):
# Projection onto the SOC.
x_a2dr = prox_soc(self.v, self.t)
self.assertTrue(np.linalg.norm(x_a2dr[:-1],2) <= x_a2dr[-1] + self.TOLERANCE)
# Projection onto the SOC with affine composition.
x_a2dr = prox_soc(self.v, self.t, scale=2, offset=0.5)
x_scaled = 2*x_a2dr - 0.5
self.assertTrue(np.linalg.norm(x_scaled[:-1],2) <= x_scaled[-1] + self.TOLERANCE)
scale = 2 * np.abs(np.random.randn()) + self.TOLERANCE
if np.random.rand() < 0.5:
scale = -scale
offset = np.random.randn(*self.v.shape)
lin_term = np.random.randn(*self.v.shape)
quad_term = np.abs(np.random.randn())
x_a2dr = prox_soc(self.v, self.t, scale=scale, offset=offset, lin_term=lin_term, quad_term=quad_term)
x_scaled = scale*x_a2dr - offset
self.assertTrue(np.linalg.norm(x_scaled[:-1], 2) <= x_scaled[-1] + self.TOLERANCE)
# Sparsity consistency tests.
self.check_sparsity(prox_soc, check_matrix = False)
# General composition tests.
self.check_composition(prox_soc, lambda x: 0, self.v, constr_fun = lambda x: [SOC(x[-1], x[:-1])], \
solver = "SCS", eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_abs(self):
# Elementwise consistency tests.
self.check_elementwise(prox_abs)
# General composition tests.
self.check_composition(prox_abs, cvxpy.abs, self.c)
self.check_composition(prox_abs, lambda x: sum(abs(x)), self.v)
self.check_composition(prox_abs, lambda x: sum(abs(x)), self.B)
def test_constant(self):
# Elementwise consistency tests.
self.check_elementwise(prox_constant)
# Sparsity consistency tests.
self.check_sparsity(prox_constant)
# General composition tests.
self.check_composition(prox_constant, lambda x: 0, self.c)
self.check_composition(prox_constant, lambda x: 0, self.v)
self.check_composition(prox_constant, lambda x: 0, self.B)
def test_exp(self):
# Elementwise consistency tests.
self.check_elementwise(prox_exp)
# General composition tests.
self.check_composition(prox_exp, cvxpy.exp, self.c, solver = "SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_exp, lambda x: sum(exp(x)), self.v, solver = "SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_exp, lambda x: sum(exp(x)), self.B, places=2, solver = "SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_huber(self):
for M in [0, 0.5, 1, 2]:
# Elementwise consistency tests.
self.check_elementwise(lambda v, *args, **kwargs: prox_huber(v, *args, **kwargs, M = M))
# Sparsity consistency tests.
self.check_sparsity(lambda v, *args, **kwargs: prox_huber(v, *args, **kwargs, M = M))
# Scalar input.
self.check_composition(lambda v, *args, **kwargs: prox_huber(v, M = M, *args, **kwargs),
lambda x: huber(x, M = M), self.c)
# Vector input.
self.check_composition(lambda v, *args, **kwargs: prox_huber(v, M = M, *args, **kwargs),
lambda x: sum(huber(x, M = M)), self.v)
# Matrix input.
self.check_composition(lambda v, *args, **kwargs: prox_huber(v, M = M, *args, **kwargs),
lambda x: sum(huber(x, M = M)), self.B)
def test_identity(self):
# Elementwise consistency tests.
self.check_elementwise(prox_identity)
# General composition tests.
self.check_composition(prox_identity, lambda x: x, self.c)
self.check_composition(prox_identity, lambda x: sum(x), self.v)
self.check_composition(prox_identity, lambda x: sum(x), self.B)
def test_logistic(self):
# General composition tests.
# self.check_composition(prox_logistic, lambda x: logistic(x), self.c, solver='ECOS')
self.check_composition(prox_logistic, lambda x: logistic(x), self.c, solver = 'SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_logistic, lambda x: sum(logistic(x)), self.v, solver = 'SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_logistic, lambda x: sum(logistic(x)), self.B, solver = "SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
# Simple logistic function: f(x) = \sum_i log(1 + exp(-y_i*x_i)).
y = np.random.randn(*self.v.shape)
self.check_composition(lambda v, *args, **kwargs: prox_logistic(v, y = y, *args, **kwargs),
lambda x: sum(logistic(-multiply(y,x))), self.v, places = 2, solver = "SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
# Multi-task logistic regression term: f(B) = \sum_i log(1 + exp(-Y_{ij}*B_{ij}).
Y_mat = np.random.randn(*self.B.shape)
B_a2dr = prox_logistic(self.B, t = self.t, y = Y_mat)
B_cvxpy = self.prox_cvxpy(self.B, lambda B: sum(logistic(-multiply(Y_mat,B))), t = self.t, solver = "SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.assertItemsAlmostEqual(B_a2dr, B_cvxpy, places = 2)
self.check_composition(lambda v, *args, **kwargs: prox_logistic(v, y = Y_mat, *args, **kwargs),
lambda B: sum(logistic(-multiply(Y_mat,B))), self.B, places = 2, solver = "SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_pos(self):
# Elementwise consistency tests.
self.check_elementwise(prox_pos)
# Sparsity consistency tests.
self.check_sparsity(prox_pos)
# General composition tests.
self.check_composition(prox_pos, cvxpy.pos, self.c)
self.check_composition(prox_pos, lambda x: sum(pos(x)), self.v)
self.check_composition(prox_pos, lambda x: sum(pos(x)), self.B)
def test_neg(self):
# Elementwise consistency tests.
self.check_elementwise(prox_neg)
# Sparsity consistency tests.
self.check_sparsity(prox_neg)
# General composition tests.
self.check_composition(prox_neg, cvxpy.neg, self.c)
self.check_composition(prox_neg, lambda x: sum(neg(x)), self.v)
self.check_composition(prox_neg, lambda x: sum(neg(x)), self.B)
def test_neg_entr(self):
# Elementwise consistency tests.
self.check_elementwise(prox_neg_entr)
# General composition tests.
self.check_composition(prox_neg_entr, lambda x: -entr(x), self.c, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_neg_entr, lambda x: sum(-entr(x)), self.v, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_neg_entr, lambda x: sum(-entr(x)), self.B, places=2, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_neg_log(self):
# Elementwise consistency tests.
self.check_elementwise(prox_neg_log)
# General composition tests.
self.check_composition(prox_neg_log, lambda x: -log(x), self.c, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_neg_log, lambda x: sum(-log(x)), self.v, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_neg_log, lambda x: sum(-log(x)), self.B, places=2, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_neg_log_det(self):
# General composition tests.
self.check_composition(prox_neg_log_det, lambda X: -log_det(X), self.B_symm, places=2, symm=True, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_neg_log_det, lambda X: -log_det(X), self.B_psd, places=2, symm=True, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
# Sparse inverse covariance estimation term: f(B) = -log(det(B)) for symmetric positive definite B.
B_spd = self.B_psd + np.eye(self.B_psd.shape[0])
B_a2dr = prox_neg_log_det(B_spd, self.t)
B_cvxpy = self.prox_cvxpy(B_spd, lambda X: -log_det(X), t=self.t, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.assertItemsAlmostEqual(B_a2dr, B_cvxpy, places=2)
# Sparse inverse covariance estimation term: f(B) = -log(det(B)) + tr(BQ) for symmetric positive definite B
# and given matrix Q.
Q = np.random.randn(*B_spd.shape)
Q = (Q + Q.T)/2 # keep Q symmetric to be valid input for -logdet proximal
B_a2dr = prox_neg_log_det(B_spd, self.t, lin_term = self.t*Q.T) # tr(A^TB) = \sum_{ij} A_{ij}B_{ij}
B_cvxpy = self.prox_cvxpy(B_spd, lambda X: -log_det(X) + trace(Q*X), t=self.t, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.assertItemsAlmostEqual(B_a2dr, B_cvxpy, places=2)
def test_max(self):
# General composition tests.
self.check_composition(prox_max, cvxpy.max, self.c)
self.check_composition(prox_max, cvxpy.max, self.v)
# self.check_composition(prox_max, cvxpy.max, self.B, solver = "SCS",
# eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_max, cvxpy.max, self.B)
def test_kl(self):
# General composition tests.
u_c = np.random.rand() + 1e-8
u_v = np.random.rand(*self.v.shape) + 1e-8
u_B = np.random.rand(*self.B.shape) + 1e-8
self.check_composition(lambda v, *args, **kwargs: prox_kl(v, u = u_c, *args, **kwargs),
lambda x: sum(-entr(x)-x*log(u_c)), self.c, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(lambda v, *args, **kwargs: prox_kl(v, u = u_v, *args, **kwargs),
lambda x: sum(-entr(x)-multiply(x, log(u_v))), self.v, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(lambda v, *args, **kwargs: prox_kl(v, u = u_B, *args, **kwargs),
lambda x: sum(-entr(x)-multiply(x, log(u_B))), self.B, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_norm1(self):
# Sparsity consistency tests.
self.check_sparsity(prox_norm1)
# General composition tests.
self.check_composition(prox_norm1, norm1, self.c)
self.check_composition(prox_norm1, norm1, self.v)
self.check_composition(prox_norm1, norm1, self.B)
# l1 trend filtering term: f(x) = \alpha*||x||_1.
alpha = 0.5 + np.abs(np.random.randn())
x_a2dr = prox_norm1(self.v, t = alpha*self.t)
x_cvxpy = self.prox_cvxpy(self.v, norm1, t = alpha*self.t)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 4)
# Sparse inverse covariance estimation term: f(B) = \alpha*||B||_1.
B_symm_a2dr = prox_norm1(self.B_symm, t = alpha*self.t)
B_symm_cvxpy = self.prox_cvxpy(self.B_symm, norm1, t = alpha*self.t)
self.assertItemsAlmostEqual(B_symm_a2dr, B_symm_cvxpy, places = 4)
def test_norm2(self):
# Sparsity consistency tests.
self.check_sparsity(prox_norm2)
# General composition tests.
self.check_composition(prox_norm2, norm2, self.c, solver ="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_norm2, norm2, self.v, solver ="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.check_composition(prox_norm2, lambda B: cvxpy.norm(B, 'fro'), self.B, solver ="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
# f(x) = \alpha*||x||_2
alpha = 0.5 + np.abs(np.random.randn())
x_a2dr = prox_norm2(self.v, t = alpha*self.t)
x_cvxpy = self.prox_cvxpy(self.v, norm2, t = alpha*self.t)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 4)
def test_norm_inf(self):
# General composition tests.
self.check_composition(prox_norm_inf, norm_inf, self.c)
self.check_composition(prox_norm_inf, norm_inf, self.v)
# self.check_composition(prox_norm_inf, norm_inf, self.B, solver='ECOS')
self.check_composition(prox_norm_inf, norm_inf, self.B, solver ="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
# f(x) = \alpha*||x||_{\infty}
alpha = 0.5 + np.abs(np.random.randn())
x_a2dr = prox_norm_inf(self.v, t = alpha*self.t)
x_cvxpy = self.prox_cvxpy(self.v, norm_inf, t = alpha*self.t)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 4)
def test_norm_fro(self):
# General composition tests.
self.check_composition(prox_norm_fro, lambda X: cvxpy.norm(X,'fro'), self.B, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_norm_nuc(self):
# General composition tests.
self.check_composition(prox_norm_nuc, normNuc, self.B, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
# Multi-task logistic regression term: f(B) = \beta*||B||_*.
beta = 1.5 + np.abs(np.random.randn())
B_a2dr = prox_norm_nuc(self.B, t = beta*self.t)
B_cvxpy = self.prox_cvxpy(self.B, normNuc, t = beta*self.t, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.assertItemsAlmostEqual(B_a2dr, B_cvxpy, places = 3)
def test_group_lasso(self):
# Sparsity consistency tests.
self.check_sparsity(prox_group_lasso)
# General composition tests.
groupLasso = lambda B: sum([norm2(B[:,j]) for j in range(B.shape[1])])
self.check_composition(prox_group_lasso, groupLasso, self.B, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
# Multi-task logistic regression term: f(B) = \alpha*||B||_{2,1}.
alpha = 1.5 + np.abs(np.random.randn())
B_a2dr = prox_group_lasso(self.B, t = alpha*self.t)
B_cvxpy = self.prox_cvxpy(self.B, groupLasso, t = alpha*self.t, solver="SCS",
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
self.assertItemsAlmostEqual(B_a2dr, B_cvxpy, places = 3)
# Compare with taking l2-norm separately on each column.
B_norm2 = [prox_norm2(self.B[:,j], t = alpha*self.t) for j in range(self.B.shape[1])]
B_norm2 = np.vstack(B_norm2)
self.assertItemsAlmostEqual(B_a2dr, B_norm2, places = 3)
def test_sigma_max(self):
# General composition tests.
self.check_composition(prox_sigma_max, sigma_max, self.B, solver='SCS',
eps=self.SCS_TOLERANCE, max_iters=self.SCS_MAXITER)
def test_sum_squares(self):
# Sparsity consistency tests.
self.check_sparsity(prox_sum_squares)
# General composition tests.
self.check_composition(prox_sum_squares, sum_squares, self.v)
self.check_composition(prox_sum_squares, sum_squares, self.B)
# Optimal control term: f(x) = ||x||_2^2.
x_a2dr = prox_sum_squares(self.v, t = self.t)
x_cvxpy = self.prox_cvxpy(self.v, sum_squares, t = self.t)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 4)
# l1 trend filtering term: f(x) = (1/2)*||x - y||_2^2 for given y.
y = np.random.randn(*self.v.shape)
x_a2dr = prox_sum_squares(self.v, t = 0.5*self.t, offset = y)
x_cvxpy = self.prox_cvxpy(self.v, sum_squares, t = 0.5*self.t, offset = y)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 4)
def test_sum_squares_affine(self):
# Scalar terms.
F = np.random.randn()
g = np.random.randn()
v = np.random.randn()
self.check_composition(lambda v, *args, **kwargs: prox_sum_squares_affine(v, F = F, g = g, method ="lsqr",
*args, **kwargs), lambda x: sum_squares(F*x - g), v)
self.check_composition(lambda v, *args, **kwargs: prox_sum_squares_affine(v, F = F, g = g, method ="lstsq",
*args, **kwargs), lambda x: sum_squares(F*x - g), v)
# Simple sum of squares: f(x) = ||x||_2^2.
n = 100
F = np.eye(n)
g = np.zeros(n)
v = np.random.randn(n)
F_sparse = sparse.eye(n)
g_sparse = sparse.csr_matrix((n,1))
for method in ["lsqr", "lstsq"]:
self.check_composition(lambda v, *args, **kwargs: prox_sum_squares_affine(v, F = F, g = g, \
method = method, *args, **kwargs), lambda x: sum_squares(F*x - g), v)
self.check_composition(lambda v, *args, **kwargs: prox_sum_squares_affine(v, F = F_sparse, g = g_sparse, \
method = method, *args, **kwargs), lambda x: sum_squares(F*x - g), v)
# Non-negative least squares term: f(x) = ||Fx - g||_2^2.
m = 1000
n = 100
F = 10 + 5*np.random.randn(m,n)
x = 2*np.random.randn(n)
g = F.dot(x) + 0.01*np.random.randn(m)
v = np.random.randn(n)
for method in ["lsqr", "lstsq"]:
x_a2dr = prox_sum_squares_affine(self.v, t = self.t, F = F, g = g, method = method)
x_cvxpy = self.prox_cvxpy(self.v, lambda x: sum_squares(F*x - g), t = self.t)
self.assertItemsAlmostEqual(x_a2dr, x_cvxpy, places = 4)
# General composition tests.
self.check_composition(lambda v, *args, **kwargs: prox_sum_squares_affine(v, F = F, g = g, method ="lsqr",
*args, **kwargs), lambda x: sum_squares(F*x - g), v)
self.check_composition(lambda v, *args, **kwargs: prox_sum_squares_affine(v, F = F, g = g, method ="lstsq",
*args, **kwargs), lambda x: sum_squares(F*x - g), v)
def test_quad_form(self):
# Simple quadratic.
v = np.random.randn(1)
Q = np.array([[5]])
self.check_composition(lambda v, *args, **kwargs: prox_quad_form(v, Q = Q, method = "lsqr", *args, **kwargs),
lambda x: quad_form(x, P = Q), v)
self.check_composition(lambda v, *args, **kwargs: prox_quad_form(v, Q = Q, method = "lstsq", *args, **kwargs),
lambda x: quad_form(x, P = Q), v)
# General composition tests.
n = 10
v = np.random.randn(n)
Q = np.random.randn(n,n)
Q = Q.T.dot(Q) + 0.5*np.eye(n)
self.check_composition(lambda v, *args, **kwargs: prox_quad_form(v, Q = Q, method = "lsqr", *args, **kwargs),
lambda x: quad_form(x, P = Q), v)
self.check_composition(lambda v, *args, **kwargs: prox_quad_form(v, Q = Q, method = "lstsq", *args, **kwargs),
lambda x: quad_form(x, P = Q), v)
def test_trace(self):
# Sparsity consistency tests.
C = sparse.random(*self.C_square_sparse.shape)
self.check_sparsity(prox_trace, check_vector = False, matrix_type = "square")
self.check_sparsity(lambda B, *args, **kwargs: prox_trace(B, C = C, *args, **kwargs), check_vector = False, \
matrix_type = "square")
# General composition tests.
C = np.random.randn(*self.B.shape)
self.check_composition(prox_trace, cvxpy.trace, self.B_square)
self.check_composition(lambda B, *args, **kwargs: prox_trace(B, C = C, *args, **kwargs),
lambda X: cvxpy.trace(C.T * X), self.B)
# Sparse inverse covariance estimation term: f(B) = tr(BQ) for given symmetric positive semidefinite Q.
Q = np.random.randn(*self.B_square.shape)
Q = Q.T.dot(Q)
B_a2dr = prox_trace(self.B_square, t = self.t, C = Q.T) # tr(BQ) = tr(QB) = tr((Q^T)^TB).
B_cvxpy = self.prox_cvxpy(self.B_square, lambda X: cvxpy.trace(X * Q), t = self.t)
self.assertItemsAlmostEqual(B_a2dr, B_cvxpy, places = 4)
| 36,504 | 50.127451 | 134 | py |
a2dr | a2dr-master/a2dr/tests/test_basic.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import numpy.linalg as LA
from scipy import sparse
from a2dr import a2dr
from a2dr.proximal import prox_sum_squares_affine, prox_nonneg_constr
from a2dr.tests.base_test import BaseTest
class TestBasic(BaseTest):
"""Unit tests for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_unconstrained(self):
# minimize ||y - X\beta||_2^2.
# Problem data.
m, n = 100, 80
density = 0.1
X = sparse.random(m, n, density=density, data_rvs=np.random.randn)
y = np.random.randn(m)
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F=X, g=y, method="lstsq")]
# Solve with NumPy.
np_result = LA.lstsq(X.todense(), y, rcond=None)
np_beta = np_result[0]
np_obj = np.sum(np_result[1])
# Solve with DRS.
drs_result = a2dr(prox_list, n_list=[n], anderson=False, max_iter=self.MAX_ITER)
drs_beta = drs_result["x_vals"][-1]
drs_obj = np.sum((y - X.dot(drs_beta))**2)
print("Finish DRS.")
# Solve with A2DR.
a2dr_result = a2dr(prox_list, n_list=[n], anderson=True, max_iter=self.MAX_ITER)
a2dr_beta = a2dr_result["x_vals"][-1]
a2dr_obj = np.sum((y - X.dot(a2dr_beta))**2)
print("Finish A2DR.")
self.assertAlmostEqual(np_obj, drs_obj)
self.assertAlmostEqual(np_obj, a2dr_obj)
def test_ols(self):
# minimize ||y - X\beta||_2^2.
m = 100
n = 10
N = 4 # Number of splits. (split X row-wise)
beta_true = np.array(np.arange(-n / 2, n / 2) + 1)
X = np.random.randn(m, n)
y = X.dot(beta_true) + np.random.randn(m)
# Split problem.
X_split = np.split(X, N)
y_split = np.split(y, N)
# Construct list of proximal operators.
# Note: We must do it this way to avoid problems caused by late binding:
# https://docs.python-guide.org/writing/gotchas/#late-binding-closures
prox_list = [lambda v, t, i=i: prox_sum_squares_affine(v, t, F=X_split[i], g=y_split[i], method="lstsq") \
for i in range(N)]
v_init = N * [np.random.randn(n)]
# Solve with NumPy.
np_beta = []
np_obj = 0
for i in range(N):
np_result = LA.lstsq(X_split[i], y_split[i], rcond=None)
np_beta += [np_result[0]]
np_obj += np.sum(np_result[1])
print("NumPy Objective:", np_obj)
print("NumPy Solution:", np_beta)
# Solve with DRS (proximal point method).
drs_result = a2dr(prox_list, v_init=v_init, max_iter=self.MAX_ITER, eps_abs=self.eps_abs, \
eps_rel=self.eps_rel, anderson=False)
drs_beta = drs_result["x_vals"]
drs_obj = np.sum([(yi - Xi.dot(beta)) ** 2 for yi, Xi, beta in zip(y_split, X_split, drs_beta)])
print("DRS Objective:", drs_obj)
print("DRS Solution:", drs_beta)
# Solve with A2DR (proximal point method with Anderson acceleration).
a2dr_result = a2dr(prox_list, v_init=v_init, max_iter=self.MAX_ITER, eps_abs=self.eps_abs, \
eps_rel=self.eps_rel, anderson=True)
a2dr_beta = a2dr_result["x_vals"]
a2dr_obj = np.sum([(yi - Xi.dot(beta)) ** 2 for yi, Xi, beta in zip(y_split, X_split, drs_beta)])
print("A2DR Objective:", a2dr_obj)
print("A2DR Solution:", a2dr_beta)
# Compare results.
self.assertAlmostEqual(np_obj, drs_obj)
self.assertAlmostEqual(np_obj, a2dr_obj)
for i in range(N):
self.assertItemsAlmostEqual(np_beta[i], drs_beta[i])
self.assertItemsAlmostEqual(np_beta[i], a2dr_beta[i])
def test_infeas(self):
# a modified non-negative least squares example with infeasible linear constraints
m, n = 150, 300
density = 0.001
X = sparse.random(m, n, density=density, data_rvs=np.random.randn)
y = np.random.randn(m)
# Convert problem to standard form.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F=X, g=y),
prox_nonneg_constr]
Z = sparse.eye(n);
e1 = sparse.lil_matrix((1,n))
e1[0,0] = 1
A1 = sparse.bmat([[Z], [e1]])
A2 = sparse.bmat([[-Z], [-e1]])
A_list = [A1, A2]
b = np.zeros(n+1)
b[0] = 1
b[-1] = -1
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False)
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True)
self.assertTrue(drs_result ==
{"x_vals": None, "primal": None, "dual": None, "num_iters": None, "solve_time": None}
and a2dr_result ==
{"x_vals": None, "primal": None, "dual": None, "num_iters": None, "solve_time": None})
| 5,654 | 37.209459 | 114 | py |
a2dr | a2dr-master/a2dr/tests/test_precondition.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
from scipy import sparse
from a2dr import a2dr
from a2dr.proximal import prox_norm1, prox_sum_squares_affine
from a2dr.precondition import precondition
from a2dr.tests.base_test import BaseTest
class TestPrecondition(BaseTest):
"""Unit tests for preconditioning data before S-DRS"""
def setUp(self):
np.random.seed(1)
self.MAX_ITERS = 1000
def test_precond_l1_trend_filter(self):
# Problem data.
N = 2
n0 = 2*10**4
n = 2*n0-2
m = n0-2
y = np.random.randn(n)
alpha = 0.1*np.linalg.norm(y, np.inf)
# Form second difference matrix.
D = sparse.lil_matrix(sparse.eye(n0))
D.setdiag(-2, k = 1)
D.setdiag(1, k = 2)
D = D[:(n0-2),:]
# Convert problem to standard form.
# f_1(x_1) = (1/2)||y - x_1||_2^2, f_2(x_2) = \alpha*||x_2||_1.
# A_1 = D, A_2 = -I_{n-2}, b = 0.
prox_list = [lambda v, t: (t*y + v)/(t + 1.0), lambda v, t: prox_norm1(v, t = alpha*t)]
A_list = [D, -sparse.eye(n0-2)]
b = np.zeros(n0-2)
b = np.random.randn(m)
prox_list = [prox_norm1] * N
A = sparse.csr_matrix(sparse.hstack(A_list))
p_eq_list, A_eq_list, db, e = precondition(prox_list, A_list, b)
A_eq = sparse.csr_matrix(sparse.hstack(A_eq_list))
print(r'[Sanity Check]')
print(r'\|A\|_2 = {}, \|DAE\|_2 = {}'.format(sparse.linalg.norm(A), sparse.linalg.norm(A_eq)))
print(r'min(|A|) = {}, max(|A|) = {}, mean(|A|) = {}'.format(np.min(np.abs(A)),
np.max(np.abs(A)), sparse.csr_matrix.mean(np.abs(A))))
print(r'min(|DAE|) = {}, max(|DAE|) = {}, mean(|DAE|) = {}'.format(np.min(np.abs(A_eq)),
np.max(np.abs(A_eq)), sparse.csr_matrix.mean(np.abs(A_eq))))
def test_nnls(self):
# Solve the non-negative least squares problem
# Minimize (1/2)*||A*x - b||_2^2 subject to x >= 0.
m = 100
n = 10
N = 1 # Number of nodes (split A row-wise)
# Problem data.
mu = 100
sigma = 10
X = mu + sigma*np.random.randn(m,n)
y = mu + sigma*np.random.randn(m)
# Solve with SciPy.
sp_result = sp.optimize.nnls(X, y)
sp_beta = sp_result[0]
sp_obj = sp_result[1] ** 2 # SciPy objective is ||y - X\beta||_2.
print("Scipy Objective:", sp_obj)
print("SciPy Solution:", sp_beta)
X_split = np.split(X, N)
y_split = np.split(y, N)
p_list = [lambda v, t: prox_sum_squares_affine(v, t, F=X_sub, g=y_sub, method="lsqr") \
for X_sub, y_sub in zip(X_split, y_split)]
p_list += [lambda u, rho: np.maximum(u, 0)] # Projection onto non-negative orthant.
A_list = np.hsplit(np.eye(N*n), N) + [-np.vstack(N*(np.eye(n),))]
b = np.zeros(N*n)
# Solve with A2DR.
a2dr_result = a2dr(p_list, A_list, b, anderson=True, precond=False, max_iter=self.MAX_ITERS)
a2dr_beta = a2dr_result["x_vals"][-1]
a2dr_obj = np.sum((y - X.dot(a2dr_beta))**2)
print("A2DR Objective:", a2dr_obj)
print("A2DR Solution:", a2dr_beta)
self.assertAlmostEqual(sp_obj, a2dr_obj)
self.assertItemsAlmostEqual(sp_beta, a2dr_beta, places=3)
# Solve with preconditioned A2DR.
cond_result = a2dr(p_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITERS)
cond_beta = cond_result["x_vals"][-1]
cond_obj = np.sum((y - X.dot(cond_beta))**2)
print("Preconditioned A2DR Objective:", cond_obj)
print("Preconditioned A2DR Solution:", cond_beta)
self.assertAlmostEqual(sp_obj, cond_obj)
self.assertItemsAlmostEqual(sp_beta, cond_beta, places=3)
| 4,577 | 38.465517 | 128 | py |
a2dr | a2dr-master/a2dr/tests/base_test.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
# Base class for unit tests.
from unittest import TestCase
import numpy as np
import matplotlib.pyplot as plt
class BaseTest(TestCase):
# AssertAlmostEqual for lists.
def assertItemsAlmostEqual(self, a, b, places=4):
if np.isscalar(a):
a = [a]
else:
a = self.mat_to_list(a)
if np.isscalar(b):
b = [b]
else:
b = self.mat_to_list(b)
for i in range(len(a)):
self.assertAlmostEqual(a[i], b[i], places)
# Overridden method to assume lower accuracy.
def assertAlmostEqual(self, a, b, places=4):
super(BaseTest, self).assertAlmostEqual(a.real, b.real, places=places)
super(BaseTest, self).assertAlmostEqual(a.imag, b.imag, places=places)
def mat_to_list(self, mat):
"""Convert a numpy matrix to a list.
"""
if isinstance(mat, (np.matrix, np.ndarray)):
return np.asarray(mat).flatten('F').tolist()
else:
return mat
def plot_residuals(self, r_primal, r_dual, normalize = False, show = True, title = None, semilogy = False, savefig = None, *args, **kwargs):
if normalize:
r_primal = r_primal / r_primal[0] if r_primal[0] != 0 else r_primal
r_dual = r_dual / r_dual[0] if r_dual[0] != 0 else r_dual
if semilogy:
plt.semilogy(range(len(r_primal)), r_primal, label = "Primal", *args, **kwargs)
plt.semilogy(range(len(r_dual)), r_dual, label = "Dual", *args, **kwargs)
else:
plt.plot(range(len(r_primal)), r_primal, label = "Primal", *args, **kwargs)
plt.plot(range(len(r_dual)), r_dual, label = "Dual", *args, **kwargs)
plt.legend()
plt.xlabel("Iteration")
plt.ylabel("Residual")
if title:
plt.title(title)
if show:
plt.show()
if savefig:
plt.savefig(savefig, bbox_inches="tight")
def compare_results(self, probs, obj_a2dr, obj_comb, x_a2dr, x_comb):
N = len(probs.variables())
for i in range(N):
print("\nA2DR Solution:\n", x_a2dr[i])
print("Base Solution:\n", x_comb[i])
print("MSE: ", np.mean(np.square(x_a2dr[i] - x_comb[i])), "\n")
print("A2DR Objective: %f" % obj_a2dr)
print("Base Objective: %f" % obj_comb)
print("Iterations: %d" % probs.solver_stats["num_iters"])
print("Elapsed Time: %f" % probs.solver_stats["solve_time"])
def compare_residuals(self, res_drs, res_a2dr, m_vals):
if not isinstance(res_a2dr, list):
res_a2dr = [res_a2dr]
if not isinstance(m_vals, list):
m_vals = [m_vals]
if len(m_vals) != len(res_a2dr):
raise ValueError("Must have same number of AA-II residuals as memory parameter values")
plt.semilogy(range(res_drs.shape[0]), res_drs, label="DRS")
for i in range(len(m_vals)):
label = "A2DR (m = {})".format(m_vals[i])
plt.semilogy(range(res_a2dr[i].shape[0]), res_a2dr[i], linestyle="--", label=label)
plt.legend()
plt.xlabel("Iteration")
plt.ylabel("Residual")
plt.show()
def compare_primal_dual(self, drs_result, a2dr_result, savefig = None):
# Compare residuals
plt.semilogy(range(drs_result["num_iters"]), drs_result["primal"], color="blue", linestyle="--",
label="Primal (DRS)")
plt.semilogy(range(a2dr_result["num_iters"]), a2dr_result["primal"], color="blue", label="Primal (A2DR)")
plt.semilogy(range(drs_result["num_iters"]), drs_result["dual"], color="darkorange", linestyle="--",
label="Dual (DRS)")
plt.semilogy(range(a2dr_result["num_iters"]), a2dr_result["dual"], color="darkorange", label="Dual (A2DR) ")
# plt.title("Residuals")
plt.legend()
if savefig:
plt.savefig(savefig, bbox_inches="tight")
plt.show()
def compare_total(self, drs_result, a2dr_result, savefig = None):
# Compare residuals
plt.semilogy(range(drs_result["num_iters"]), np.sqrt(drs_result["primal"]**2+drs_result["dual"]**2), color="blue", label="Residuals (DRS)")
plt.semilogy(range(a2dr_result["num_iters"]), np.sqrt(a2dr_result["primal"]**2+a2dr_result["dual"]**2), color="darkorange", label="Residuals (A2DR)")
# plt.title("Residuals")
plt.legend()
if savefig:
plt.savefig(savefig, bbox_inches="tight")
plt.show()
def compare_total_all(self, results, names, savefig = None):
# Compare residuals in the results list
# len(names) must be equal to len(results)
for i in range(len(names)):
result = results[i]
name = names[i]
plt.semilogy(range(result["num_iters"]), np.sqrt(result["primal"]**2+result["dual"]**2),
label="Residuals (" + name + ")")
# plt.title("Residuals")
plt.legend()
if savefig:
plt.savefig(savefig, bbox_inches="tight")
plt.show()
| 5,778 | 40.876812 | 157 | py |
a2dr | a2dr-master/a2dr/tests/test_projection.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from cvxpy import *
from a2dr.proximal.projection import *
from a2dr.tests.base_test import BaseTest
class TestProjection(BaseTest):
"""Unit tests for projections"""
def setUp(self):
np.random.seed(1)
self.TOLERANCE = 1e-6
self.c = np.random.randn()
self.v = np.random.randn(100)
self.B = np.random.randn(50,10)
self.radii = [0, 1, 2.5]
self.radii += [np.abs(self.c) + self.TOLERANCE]
self.radii += [np.max(np.abs(self.v)) + self.TOLERANCE,
(np.max(np.abs(self.v)) + np.min(np.abs(self.v)))/2]
self.radii += [np.max(np.abs(self.B)) + self.TOLERANCE,
(np.max(np.abs(self.B)) + np.min(np.abs(self.B)))/2]
def check_simplex(self, val, radius=1, method="bisection", places=4, *args, **kwargs):
# Solve with CVXPY.
x_var = Variable() if np.isscalar(val) else Variable(val.shape)
obj = Minimize(sum_squares(x_var - val))
constr = [x_var >= 0, sum(x_var) == radius]
Problem(obj, constr).solve(*args, **kwargs)
# Solve with projection algorithm.
x_proj = proj_simplex(val, r=radius, method=method)
self.assertItemsAlmostEqual(x_var.value, x_proj, places=places)
def check_l1_ball(self, val, radius=1, method="bisection", places=4, *args, **kwargs):
# Solve with CVXPY.
x_var = Variable() if np.isscalar(val) else Variable(val.shape)
obj = Minimize(sum_squares(x_var - val))
constr = [norm1(x_var) <= radius]
Problem(obj, constr).solve(*args, **kwargs)
# Solve with projection algorithm.
x_proj = proj_l1(val, r=radius, method=method)
self.assertItemsAlmostEqual(x_var.value, x_proj, places=places)
def test_simplex(self):
for radius in self.radii:
for method in ["bisection", "sorted"]:
self.check_simplex(self.c, radius, method)
self.check_simplex(self.v, radius, method)
self.check_simplex(self.B, radius, method)
def test_l1_ball(self):
for radius in self.radii:
for method in ["bisection", "sorted"]:
self.check_l1_ball(self.c, radius, method, solver='ECOS')
self.check_l1_ball(self.v, radius, method)
self.check_l1_ball(self.B, radius, method)
| 3,037 | 37.948718 | 90 | py |
a2dr | a2dr-master/a2dr/tests/__init__.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
| 660 | 33.789474 | 68 | py |
a2dr | a2dr-master/a2dr/proximal/norm.py | import numpy as np
from scipy import sparse
from a2dr.proximal.interface import NUMPY_FUNS, SPARSE_FUNS, apply_to_nonzeros
from a2dr.proximal.projection import proj_l1
from a2dr.proximal.composition import prox_scale
def prox_norm1(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\|x\\|_1`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
return prox_scale(prox_norm1_base, *args, **kwargs)(v, t)
def prox_norm2(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\|x\\|_2`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
return prox_scale(prox_norm2_base, *args, **kwargs)(v, t)
def prox_norm_inf(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\|x\\|_{\\infty}`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
return prox_scale(prox_norm_inf_base, *args, **kwargs)(v, t)
def prox_norm_nuc(B, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(aB-b) + cB + d\\|B\\|_F^2`, where :math:`f(B) = \\|B\\|_*`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(B):
B = np.array([[B]])
return prox_scale(prox_norm_nuc_base, *args, **kwargs)(B, t)
def prox_norm_fro(B, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(aB-b) + cB + d\\|B\\|_F^2`, where :math:`f(B) = \\|B\\|_2`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(B):
B = np.array([[B]])
return prox_scale(prox_norm_fro_base, *args, **kwargs)(B, t)
def prox_group_lasso(B, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(aB-b) + cB + d\\|B\\|_F^2`, where :math:`f(B) = \\|B\\|_{2,1}` is the
group lasso of :math:`B`, for scalar t > 0, and the optional arguments are a = scale, b = offset,
c = lin_term, and d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1,
b = 0, c = 0, and d = 0.
"""
if np.isscalar(B):
B = np.array([[B]])
return prox_scale(prox_group_lasso_base, *args, **kwargs)(B, t)
def prox_norm1_base(v, t):
"""Proximal operator of :math:`f(x) = \\|x\\|_1`.
"""
return apply_to_nonzeros(lambda y: np.maximum(y - t, 0) - np.maximum(-y - t, 0), v)
def prox_norm2_base(v, t):
"""Proximal operator of :math:`f(x) = \\|x\\|_2`.
"""
FUNS = SPARSE_FUNS if sparse.issparse(v) else NUMPY_FUNS
norm, zeros = FUNS["norm"], FUNS["zeros"]
if np.isscalar(v):
v_norm = abs(v)
elif len(v.shape) == 1:
v_norm = norm(v,2)
elif len(v.shape) == 2:
v_norm = norm(v,'fro')
if v_norm == 0:
return zeros(v.shape)
else:
return np.maximum(1 - t/v_norm, 0) * v
def prox_norm_inf_base(v, t):
"""Proximal operator of :math:`f(x) = \\|x\\|_{\\infty}`.
"""
return v - t * proj_l1(v/t)
def prox_norm_fro_base(B, t):
"""Proximal operator of :math:`f(B) = \\|B\\|_2`, the Frobenius norm of :math:`B`.
"""
U, s, Vt = np.linalg.svd(B, full_matrices=False)
s_new = prox_norm2_base(s, t)
# s_norm = np.linalg.norm(s, 2)
# s_new = np.zeros(s.shape) if s_norm == 0 else np.maximum(1 - t/s_norm, 0) * s
return U.dot(np.diag(s_new)).dot(Vt)
def prox_norm_nuc_base(B, t):
"""Proximal operator of :math:`f(B) = \\|B\\|_*`, the nuclear norm of :math:`B`.
"""
U, s, Vt = np.linalg.svd(B, full_matrices=False)
s_new = prox_norm1_base(s, t)
# s_new = np.maximum(s - t, 0)
return U.dot(np.diag(s_new)).dot(Vt)
def prox_group_lasso_base(B, t):
"""Proximal operator of :math:`f(B) = \\|B\\|_{2,1} = \\sum_j \\|B_j\\|_2`, the group lasso of :math:`B`,
where :math:`B_j` is the j-th column of :math:`B`.
"""
# FUNS = SPARSE_FUNS if sparse.issparse(B) else NUMPY_FUNS
# vstack, hstack = FUNS["vstack"], FUNS["hstack"]
# prox_cols = [prox_norm2(B[:, j], t) for j in range(B.shape[1])]
# return vstack(prox_cols).T if prox_cols[0].ndim == 1 else hstack(prox_cols)
if sparse.issparse(B):
B = B.tocsc()
prox_cols = [prox_norm2(B[:, j], t) for j in range(B.shape[1])]
return sparse.hstack(prox_cols)
else:
prox_cols = [prox_norm2(B[:, j], t) for j in range(B.shape[1])]
return np.column_stack(prox_cols)
| 5,328 | 42.680328 | 109 | py |
a2dr | a2dr-master/a2dr/proximal/constraint.py | import numpy as np
from scipy import sparse
from a2dr.proximal.interface import NUMPY_FUNS, SPARSE_FUNS
from a2dr.proximal.composition import prox_scale
def prox_box_constr(v, t = 1, v_lo = -np.inf, v_hi = np.inf, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f` is the set indicator that
:math:`\\underline x \\leq x \\leq \\overline x`. Here the lower/upper bounds are (v_lo, v_hi), which default
to (-Inf, Inf). The scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_box_constr_base, v_lo, v_hi, *args, **kwargs)(v, t)
def prox_nonneg_constr(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f` is the set indicator that
:math:`x \\geq 0`. The scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_nonneg_constr_base, *args, **kwargs)(v, t)
def prox_nonpos_constr(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f` is the set indicator that
:math:`x \\leq 0`. The scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_nonpos_constr_base, *args, **kwargs)(v, t)
def prox_psd_cone(B, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(aB-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f` is the set indicator that
:math:`B \\succeq 0` for :math:`B` a symmetric matrix. The scalar t > 0, and the optional arguments are
a = scale, b = offset, c = lin_term, and d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default,
t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(B):
B = np.array([[B]])
if B.shape[0] != B.shape[1]:
raise ValueError("B must be a square matrix.")
B_symm = (B + B.T) / 2.0
if not np.allclose(B, B_symm):
raise ValueError("B must be a symmetric matrix.")
return prox_scale(prox_psd_cone_base, *args, **kwargs)(B_symm, t)
def prox_soc(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f` is the set indicator that
:math:`\\|x_{1:n}\\|_2 \\leq x_{n+1}`. The scalar t > 0, and the optional arguments are a = scale, b = offset,
c = lin_term, and d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0,
c = 0, and d = 0.
"""
if np.isscalar(v) or not (len(v.shape) == 1 or (len(v.shape) == 2 and v.shape[1] == 1)):
raise ValueError("v must be a vector")
if v.shape[0] < 2:
raise ValueError("v must have at least 2 elements.")
return prox_scale(prox_soc_base, *args, **kwargs)(v, t)
def prox_box_constr_base(v, t, v_lo, v_hi):
"""Proximal operator of the set indicator that :math:`\\underline x \\leq x \\leq \\overline x`.
"""
FUNS = SPARSE_FUNS if sparse.issparse(v) else NUMPY_FUNS
max_elemwise, min_elemwise = FUNS["max_elemwise"], FUNS["min_elemwise"]
return min_elemwise(max_elemwise(v, v_lo), v_hi)
def prox_nonneg_constr_base(v, t):
"""Proximal operator of the set indicator that :math:`x \\geq 0`.
"""
# return prox_box_constr_base(v, t, 0, np.inf)
# return v.maximum(0) if sparse.issparse(v) else np.maximum(v,0)
FUNS = SPARSE_FUNS if sparse.issparse(v) else NUMPY_FUNS
max_elemwise = FUNS["max_elemwise"]
return max_elemwise(v, 0)
def prox_nonpos_constr_base(v, t):
"""Proximal operator of the set indicator that :math:`x \\leq 0`.
"""
# return prox_box_constr_base(v, t, -np.inf, 0)
# return v.minimum(0) if sparse.issparse(v) else np.minimum(v,0)
FUNS = SPARSE_FUNS if sparse.issparse(v) else NUMPY_FUNS
min_elemwise = FUNS["min_elemwise"]
return min_elemwise(v, 0)
def prox_psd_cone_base(B, t):
"""Proximal operator of the set indicator that :math:`B \\succeq 0`, where :math:`B` is a symmetric matrix.
"""
# B_symm = (B + B.T)/2.0
# if not np.allclose(B, B_symm):
# raise ValueError("B must be a symmetric matrix.")
# s, u = np.linalg.eigh(B_symm)
s, u = np.linalg.eigh(B)
s_new = np.maximum(s, 0)
return u.dot(np.diag(s_new)).dot(u.T)
def prox_soc_base(v, t):
"""Proximal operator of the set indicator that :math:`\\|v_{1:n}\\_2 \\leq v_{n+1}`, where :math:`v` is a vector of
length n+1, `v_{1:n}` symbolizes its first n elements, and :math:`v_{n+1}` is its last element. This is equivalent
to the projection of :math:`v` onto the second-order cone :math:`C = {(u,s):\\|u\\|_2 \\leq s}`.
Parikh and Boyd (2013). "Proximal Algorithms." Foundations and Trends in Optimization. vol. 1, no. 3, Sect. 6.3.2.
"""
if sparse.issparse(v):
v = v.tocsr()
u = v[:-1] # u = (v_1,...,v_n)
s = v[-1] # s = v_{n+1}
s = np.asscalar(s.todense())
u_norm = sparse.linalg.norm(u,'fro')
if u_norm <= -s:
return np.zeros(v.shape)
elif u_norm <= s:
return v
else:
scale = (1 + s / u_norm) / 2
return scale * sparse.vstack((u, u_norm))
else:
u = v[:-1] # u = (v_1,...,v_n)
s = v[-1] # s = v_{n+1}
s = np.asscalar(s)
u_norm = np.linalg.norm(u,2)
if u_norm <= -s:
return np.zeros(v.shape)
elif u_norm <= s:
return v
else:
scale = (1 + s / u_norm) / 2
u_all = np.zeros(v.shape)
u_all[:-1] = u
u_all[-1] = u_norm
return scale * u_all
| 5,589 | 43.015748 | 116 | py |
a2dr | a2dr-master/a2dr/proximal/misc.py | import numpy as np
import warnings
from scipy import sparse
from scipy.special import expit, lambertw
from scipy.optimize import minimize
from a2dr.proximal.projection import proj_simplex
from a2dr.proximal.composition import prox_scale
def prox_kl(v, t = 1, u = None, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where
:math:`f(x) = \\sum_i x_i*\\log(x_i/u_i)`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
if u is None:
u = np.ones(v.shape)
return prox_scale(prox_kl_base, u, *args, **kwargs)(v, t)
def prox_logistic(v, t = 1, x0 = None, y = None, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where
:math:`f(x) = \\sum_i \\log(1 + \\exp(-y_i*x_i))`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
if x0 is None:
# x0 = np.random.randn(*v.shape)
x0 = v
if y is None:
y = -np.ones(v.shape)
return prox_scale(prox_logistic_base, x0, y, *args, **kwargs)(v, t)
def prox_max(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\max_i x_i`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
return prox_scale(prox_max_base, *args, **kwargs)(v, t)
def prox_kl_base(v, t, u):
"""Proximal operator of :math:`f(x) = \\sum_i x_i\\log(x_i/u_i)`, where u is a given vector quantity.
The function defaults to u_i = 1 for all i, so that :math:`f(x) = \\sum_i x_i\\log x_i`.
"""
return t*np.real(lambertw(u*np.exp(v/t-1)/t))
def prox_logistic_base(v, t, x0, y):
"""Proximal operator of :math:`f(x) = \\sum_i \\log(1 + \\exp(-y_i*x_i))`, where y is a given vector quantity,
solved using the Newton-CG method from scipy.optimize.minimize. The function defaults to y_i = -1 for all i,
so that :math:`f(x) = \\sum_i \\log(1 + \\exp(x_i))`.
"""
# Treat matrices elementwise.
v_shape = v.shape
v = v.flatten(order='C')
x0 = x0.flatten(order='C')
y = y.flatten(order='C')
# Only works on dense vectors.
if sparse.issparse(v):
v = v.todense()
if sparse.issparse(x0):
x0 = x0.todense()
if sparse.issparse(y):
y = y.todense()
# g(x) = \sum_i log(1 + exp(-y_i*x_i)) + 1/(2*t)*||x - v||_2^2
def fun(x, y, v, t):
# expit(x) = 1/(1 + exp(-x))
return -np.sum(np.log(expit(np.multiply(y,x)))) + 1.0/(2*t)*np.sum((x - v)**2)
# dg(x)/dx_i = -y_i/(1 + exp(y_i*x_i)) + (1/t)*(x_i - v_i)
def jac(x, y, v, t):
return -np.multiply(y, expit(-np.multiply(y,x))) + (1.0/t)*(x - v)
# d^2g(x)/dx_i^2 = y_i^2*exp(y_i*x_i)/(1 + exp(y_i*x_i))^2 + 1/t
def hess(x, y, v, t):
return np.diag(np.multiply(np.multiply(y**2, np.exp(np.multiply(y,x))), expit(-np.multiply(y,x))**2) + 1.0/t)
res = minimize(fun, x0, args=(y, v, t), method='Newton-CG', jac=jac, hess=hess)
# res = minimize(fun, x0, args=(y, v, t), method='Newton-CG', jac=jac)
if not res.success:
warnings.warn(res.message)
return res.x[0] if res.x.size == 1 else res.x.reshape(v_shape, order='C')
def prox_max_base(v, t):
"""Proximal operator of :math:`f(x) = \\max_i x_i`.
"""
return v - t*proj_simplex(v/t)
| 3,886 | 40.351064 | 117 | py |
a2dr | a2dr-master/a2dr/proximal/projection.py | import numpy as np
from scipy.optimize import bisect
TOLERANCE = 1e-6
def proj_l1(x, r = 1, method = "bisection"):
if np.isscalar(x):
x = np.array([x])
if not np.isscalar(r) or r < 0:
raise ValueError("r must be a non-negative scalar.")
if np.linalg.norm(x,1) <= r:
return x
else:
beta = proj_simplex(np.abs(x), r, method)
return np.sign(x) * beta
def proj_simplex(x, r = 1, method = "bisection"):
"""Project x onto a simplex with upper bound r.
Bisection: Liu and Ye (2009). "Efficient Euclidean Projections in Linear Time." Sect. 2.1.
https://icml.cc/Conferences/2009/papers/123.pdf
Efficient: Duchi et al (2008). "Efficient Projections onto the l1-Ball for Learning in High Dimensions."
Fig. 1 and Sect. 4.
https://stanford.edu/~jduchi/projects/DuchiShSiCh08.pdf
"""
if np.isscalar(x):
x = np.array([x])
if not np.isscalar(r) or r < 0:
raise ValueError("r must be a non-negative scalar.")
elif r == 0:
return np.zeros(x.shape)
if method == "bisection":
c_min = np.min(x) - (r + TOLERANCE)/x.size
c_max = np.max(x) + (r + TOLERANCE)/x.size
c_star = bisect(lambda c: np.sum(np.maximum(x - c, 0)) - r, c_min, c_max)
elif method == "sorted":
x_decr = np.sort(x, axis = None)[::-1]
x_cumsum = np.cumsum(x_decr)
denom = np.arange(1, x_decr.size + 1)
theta = (x_cumsum - r)/denom
x_diff = x_decr - theta
idx = np.max(np.argwhere(x_diff > 0).ravel())
c_star = theta[idx]
else:
raise ValueError("method must be either 'bisection' or 'sorted'.")
return np.maximum(x - c_star, 0)
| 1,730 | 35.829787 | 111 | py |
a2dr | a2dr-master/a2dr/proximal/__init__.py | """
Copyright 2019 Anqi Fu, Junzi Zhang
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
from a2dr.proximal.constraint import prox_box_constr, prox_nonneg_constr, prox_nonpos_constr, prox_psd_cone, prox_soc
from a2dr.proximal.elementwise import prox_abs, prox_constant, prox_exp, prox_huber, prox_identity, prox_neg, \
prox_neg_entr, prox_neg_log, prox_pos
from a2dr.proximal.matrix import prox_neg_log_det, prox_sigma_max, prox_trace
from a2dr.proximal.misc import prox_kl, prox_logistic, prox_max
from a2dr.proximal.norm import prox_norm1, prox_norm2, prox_norm_inf, prox_norm_fro, prox_norm_nuc, prox_group_lasso
from a2dr.proximal.quadratic import prox_qp, prox_quad_form, prox_sum_squares, prox_sum_squares_affine
| 1,329 | 48.259259 | 117 | py |
a2dr | a2dr-master/a2dr/proximal/interface.py | import numpy as np
import scipy as sp
from scipy import sparse
def shape_to_2d(shape):
if np.isscalar(shape) or len(shape) == 0:
return 1,1
elif len(shape) == 1:
return shape[0],1
else:
return shape
def apply_to_nonzeros(fun, v):
if sparse.issparse(v):
v_new = v.copy()
v_new.data = fun(v.data)
return v_new
else:
return fun(v)
NUMPY_FUNS = {"hstack": np.hstack,
"max_elemwise": np.maximum,
"min_elemwise": np.minimum,
"mul_elemwise": np.multiply,
"norm": np.linalg.norm,
"vstack": np.vstack,
"zeros": lambda shape, dtype=float: np.zeros(shape, dtype=dtype)
}
SPARSE_FUNS = {"hstack": sparse.hstack,
"max_elemwise": lambda x, y: x.maximum(y),
"min_elemwise": lambda x, y: x.minimum(y),
"mul_elemwise": lambda x, y: x.multiply(y),
"norm": sp.sparse.linalg.norm,
"vstack": sparse.vstack,
"zeros": lambda shape, dtype=None: sparse.csr_matrix(shape_to_2d(shape), dtype=dtype)
}
| 1,157 | 29.473684 | 100 | py |
a2dr | a2dr-master/a2dr/proximal/elementwise.py | import numpy as np
from scipy import sparse
from scipy.special import lambertw
from a2dr.proximal.interface import NUMPY_FUNS, SPARSE_FUNS, apply_to_nonzeros
from a2dr.proximal.composition import prox_scale
def prox_abs(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = |x|` applied elementwise
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_abs_base, *args, **kwargs)(v, t)
def prox_constant(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = c` for constant c,
scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_constant_base, *args, **kwargs)(v, t)
def prox_exp(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\exp(x)` applied
elementwise for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and
d = 0.
"""
return prox_scale(prox_exp_base, *args, **kwargs)(v, t)
def prox_huber(v, t = 1, M = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where
.. math::
f(x) =
\\begin{cases}
2M|x|-M^2 & \\text{for } |x| \\geq |M| \\\\
|x|^2 & \\text{for } |x| \\leq |M|
\\end{cases}
applied elementwise for scalar M > 0, t > 0, and the optional arguments are a = scale, b = offset, c = lin_term,
and d = quad_term. We must have M > 0, t > 0, a = non-zero, and d >= 0. By default, M = 1, t = 1, a = 1, b = 0,
c = 0, and d = 0.
"""
return prox_scale(prox_huber_base, M, *args, **kwargs)(v, t)
def prox_identity(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = x` applied elementwise
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_identity_base, *args, **kwargs)(v, t)
def prox_neg(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = -\\min(x,0)` applied
elementwise for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_neg_base, *args, **kwargs)(v, t)
def prox_neg_entr(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = x\\log(x)` applied
elementwise for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_neg_entr_base, *args, **kwargs)(v, t)
def prox_neg_log(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = -\\log(x)` applied
elementwise for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_neg_log_base, *args, **kwargs)(v, t)
def prox_pos(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\max(x,0)` applied
elementwise for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_pos_base, *args, **kwargs)(v, t)
def prox_abs_base(v, t):
"""Proximal operator of :math:`f(x) = |x|`.
"""
return apply_to_nonzeros(lambda y: np.maximum(y - t, 0) + np.minimum(y + t, 0), v)
def prox_constant_base(v, t):
"""Proximal operator of :math:`f(x) = c` for any constant :math:`c`.
"""
return v
def prox_exp_base(v, t):
"""Proximal operator of :math:`f(x) = \\exp(x)`.
"""
if sparse.issparse(v):
v = v.todense()
return v - lambertw(np.exp(v + np.log(t)))
def prox_huber_base(v, t, M):
"""Proximal operator of
.. math::
f(x) =
\\begin{cases}
2M|x|-M^2 & \\text{for } |x| \\geq |M| \\\\
|x|^2 & \\text{for } |x| \\leq |M|
\\end{cases}
applied elementwise, where :math:`M` is a positive scalar.
"""
return apply_to_nonzeros(lambda y: np.where(np.abs(y) <= (M + 2*M*t), y / (1 + 2*t), y - 2*M*t*np.sign(y)), v)
def prox_identity_base(v, t):
"""Proximal operator of :math:`f(x) = x`.
"""
if sparse.issparse(v):
v = v.todense()
return v - t
def prox_neg_base(v, t):
"""Proximal operator of :math:`f(x) = -\\min(x,0)`, where the minimum is taken elementwise.
"""
return apply_to_nonzeros(lambda y: np.where(y + t <= 0, y + t, np.where(y >= 0, y, 0)), v)
def prox_neg_entr_base(v, t):
"""Proximal operator of :math:`f(x) = x\\log(x)`.
"""
if sparse.issparse(v):
v = v.todense()
return t * np.real(lambertw(np.exp((v/t - 1) - np.log(t))))
def prox_neg_log_base(v, t):
"""Proximal operator of :math:`f(x) = -\\log(x)`.
"""
if sparse.issparse(v):
v = v.todense()
return (v + np.sqrt(v**2 + 4*t)) / 2
def prox_pos_base(v, t):
"""Proximal operator of :math:`f(x) = \\max(x,0)`, where the maximum is taken elementwise.
"""
return apply_to_nonzeros(lambda y: np.where(y - t >= 0, y - t, np.where(y <= 0, y, 0)), v)
| 6,182 | 44.131387 | 116 | py |
a2dr | a2dr-master/a2dr/proximal/quadratic.py | import numpy as np
from cvxpy import *
from scipy import sparse
from a2dr.proximal.composition import prox_scale
def prox_quad_form(v, t = 1, Q = None, method = "lsqr", *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = x^TQx` for symmetric
:math:`Q \\succeq 0`, scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term,
and d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0,
and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
if Q is None:
raise ValueError("Q must be a matrix.")
elif np.isscalar(Q):
Q = np.array([[Q]])
if Q.shape[0] != Q.shape[1]:
raise ValueError("Q must be a square matrix.")
if Q.shape[0] != v.shape[0]:
raise ValueError("Dimension mismatch: nrow(Q) != nrow(v).")
if sparse.issparse(Q):
Q_min_eigval = sparse.linalg.eigsh(Q, k=1, which="SA", return_eigenvectors=False)[0]
if np.iscomplex(Q_min_eigval) or Q_min_eigval < 0:
raise ValueError("Q must be a symmetric positive semidefinite matrix.")
else:
if not np.all(np.linalg.eigvalsh(Q) >= 0):
raise ValueError("Q must be a symmetric positive semidefinite matrix.")
if method not in ["lsqr", "lstsq"]:
raise ValueError("method must be either 'lsqr' or 'lstsq'")
return prox_scale(prox_quad_form_base, Q=Q, method=method, *args, **kwargs)(v, t)
def prox_sum_squares(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\sum_i x_i^2`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_sum_squares_base, *args, **kwargs)(v, t)
def prox_sum_squares_affine(v, t = 1, F = None, g = None, method = "lsqr", *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\|Fx - g\\|_2^2`
for matrix F, vector g, scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term,
and d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0,
and d = 0.
"""
if np.isscalar(v):
v = np.array([v])
if F is None:
raise ValueError("F must be a matrix.")
elif np.isscalar(F):
F = np.array([[F]])
if g is None:
raise ValueError("g must be a vector.")
elif np.isscalar(g):
g = np.array([g])
if F.shape[0] != g.shape[0]:
raise ValueError("Dimension mismatch: nrow(F) != nrow(g).")
if F.shape[1] != v.shape[0]:
raise ValueError("Dimension mismatch: ncol(F) != nrow(v).")
if method not in ["lsqr", "lstsq"]:
raise ValueError("method must be either 'lsqr' or 'lstsq'.")
return prox_scale(prox_sum_squares_affine_base, F=F, g=g, method=method, *args, **kwargs)(v, t)
def prox_qp(v, t = 1, Q = None, q = None, F = None, g = None, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = x^TQx+q^Tx+I{Fx<=g}`
for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if Q is None:
raise ValueError("Q must be a matrix")
if q is None:
raise ValueError("q must be a vector")
if F is None:
raise ValueError("F must be a matrix")
if g is None:
raise ValueError("g must be a vector")
if Q.shape[0] != Q.shape[1]:
raise ValueError("Q must be square")
if Q.shape[0] != q.shape[0]:
raise ValueError("Dimension mismatch: nrow(Q) != nrow(q)")
if Q.shape[1] != v.shape[0]:
raise ValueError("Dimension mismatch: ncol(Q) != nrow(v)")
if F.shape[0] != g.shape[0]:
raise ValueError("Dimension mismatch: nrow(F) != nrow(g)")
if F.shape[1] != v.shape[0]:
raise ValueError("Dimension mismatch: ncol(F) != nrow(v)")
return prox_scale(prox_qp_base(Q, q, F, g), *args, **kwargs)(v, t)
def prox_quad_form_base(v, t, Q, method = "lsqr"):
"""Proximal operator of :math:`f(x) = x^TQx`, where :math:`Q \\succeq 0` is a symmetric positive semidefinite matrix.
"""
if method == "lsqr":
# Q_min_eigval = spLA.eigsh(Q, k=1, which="SA", return_eigenvectors=False)[0]
# if np.iscomplex(Q_min_eigval) or Q_min_eigval < 0:
# raise Exception("Q must be a symmetric positive semidefinite matrix.")
Q = sparse.csr_matrix(Q)
return sparse.linalg.lsqr(2*t*Q + sparse.eye(v.shape[0]), v, atol=1e-16, btol=1e-16)[0]
elif method == "lstsq":
# if not np.all(LA.eigvalsh(Q) >= 0):
# raise Exception("Q must be a symmetric positive semidefinite matrix.")
return np.linalg.lstsq(2*t*Q + np.eye(v.shape[0]), v, rcond=None)[0]
else:
raise ValueError("method must be 'lsqr' or 'lstsq'")
def prox_sum_squares_base(v, t):
"""Proximal operator of :math:`f(x) = \\sum_i x_i^2`.
"""
return v / (1.0 + 2*t)
def prox_sum_squares_affine_base(v, t, F, g, method = "lsqr"):
"""Proximal operator of :math:`f(x) = \\|Fx - g\\|_2^2`, where F is a matrix and g is a vector.
"""
# if F.shape[0] != g.shape[0]:
# raise ValueError("Dimension mismatch: nrow(F) != nrow(g)")
# if F.shape[1] != v.shape[0]:
# raise ValueError("Dimension mismatch: ncol(F) != nrow(v)")
# Only works on dense vectors.
if sparse.issparse(g):
g = g.toarray()[:, 0]
if sparse.issparse(v):
v = v.toarray()[:, 0]
n = v.shape[0]
if method == "lsqr":
F = sparse.csr_matrix(F)
F_stack = sparse.vstack([F, 1/np.sqrt(2*t)*sparse.eye(n)])
g_stack = np.concatenate([g, 1/np.sqrt(2*t)*v])
return sparse.linalg.lsqr(F_stack, g_stack, atol=1e-16, btol=1e-16)[0]
elif method == "lstsq":
if sparse.issparse(F):
F = F.todense()
F_stack = np.vstack([F, 1/np.sqrt(2*t)*np.eye(n)])
g_stack = np.concatenate([g, 1/np.sqrt(2*t)*v])
return np.linalg.lstsq(F_stack, g_stack, rcond=None)[0]
else:
raise ValueError("method must be 'lsqr' or 'lstsq'")
def prox_qp_base(Q, q, F, g):
# check warmstart/parameter mode -- make sure the problem reduction is only done once
n = Q.shape[0]
I = np.eye(n)
v_par = Parameter(n)
t_par = Parameter(nonneg=True)
x = Variable(n)
obj = quad_form(x, Q) + sum_squares(x)/2/t_par + (q-v_par/t_par)*x
constr = [F * x <= g]
prob = Problem(Minimize(obj), constr)
def prox_qp1(v, t):
v_par.value, t_par.value = v, t
prob.solve()
return x.value
return prox_qp1
| 6,958 | 43.608974 | 121 | py |
a2dr | a2dr-master/a2dr/proximal/matrix.py | import numpy as np
from scipy import sparse
from a2dr.proximal.composition import prox_scale
from a2dr.proximal.norm import prox_norm_inf_base
def prox_neg_log_det(B, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(aB-b) + cB + d\\|B\\|_F^2`, where :math:`f(B) = -\\log\\det(B)`, where `B` is a
symmetric matrix. The scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if B.shape[0] != B.shape[1]:
raise ValueError("B must be a square matrix.")
B_symm = (B + B.T) / 2.0
if not (np.allclose(B, B_symm)):
raise ValueError("B must be a symmetric matrix.")
return prox_scale(prox_neg_log_det_base, *args, **kwargs)(B_symm, t)
def prox_sigma_max(B, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(aB-b) + cB + d\\|B\\|_F^2`, where :math:`f(B) = \\sigma_{\\max}(B)`
is the maximum singular value of :math:`B`, for scalar t > 0, and the optional arguments are a = scale,
b = offset, c = lin_term, and d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default,
t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_sigma_max_base, *args, **kwargs)(B, t)
def prox_trace(B, t = 1, C = None, *args, **kwargs):
"""Proximal operator of :math:`tf(aB-b) + cB + d\\|B\\|_F^2`, where :math:`f(B) = tr(C^TB)` is the trace of
:math:`C^TB`, where C is a given matrix quantity. By default, C is the identity matrix, so :math:`f(B) = tr(B)`.
The scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and d = quad_term.
We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
if np.isscalar(B):
B = np.array([[B]])
if C is None:
C = sparse.eye(B.shape[0])
if B.shape[0] != C.shape[0]:
raise ValueError("Dimension mismatch: nrow(B) != nrow(C)")
if B.shape[1] != C.shape[1]:
raise ValueError("Dimension mismatch: ncol(B) != ncol(C)")
return prox_scale(prox_trace_base, C=C, *args, **kwargs)(B, t)
def prox_neg_log_det_base(B, t):
"""Proximal operator of :math:`f(B) = -\\log\\det(B)`.
"""
s, u = np.linalg.eigh(B)
# s_new = (s + np.sqrt(s**2 + 4*t))/2
id_pos = (s >= 0)
id_neg = (s < 0)
s_new = np.zeros(len(s))
s_new[id_pos] = (s[id_pos] + np.sqrt(s[id_pos] ** 2 + 4.0 * t)) / 2
s_new[id_neg] = 2.0 * t / (np.sqrt(s[id_neg] ** 2 + 4.0 * t) - s[id_neg])
return u.dot(np.diag(s_new)).dot(u.T)
def prox_sigma_max_base(B, t):
"""Proximal operator of :math:`f(B) = \\sigma_{\\max}(B)`, the maximum singular value of :math:`B`, otherwise
known as the spectral norm.
"""
U, s, Vt = np.linalg.svd(B, full_matrices=False)
s_new = prox_norm_inf_base(s, t)
return U.dot(np.diag(s_new)).dot(Vt)
def prox_trace_base(B, t, C):
"""Proximal operator of :math:`f(B) = tr(C^TB)`, the trace of :math:`C^TB`, where C is a given matrix quantity
such that :math:`C^TB` is square.
"""
return B - t*C
| 3,154 | 46.089552 | 117 | py |
a2dr | a2dr-master/a2dr/proximal/composition.py | import numpy as np
from scipy import sparse
def prox_scale(prox, *args, **kwargs):
"""Given the proximal operator of a function :math:`f`, returns the proximal operator of :math:`g` defined as
.. math::
g(x) = `tf(ax-b) + <c,x> + d\\|x\\|_F^2`,
where :math:`t > 0`, :math:`a \\neq 0` is a scaling term, :math:`b` is an offset, :math:`c` is a linear multiplier,
and :math:`d \\geq 0` is a quadratic multiplier.
:param prox: Function handle of a proximal operator that takes as input a vector/matrix :math:`v` and a scalar
:math:`t > 0`, and outputs
.. math::
`prox_{tf}(v) = \\min_x f(x) + \\frac{1}{2t}\\|x - v\\|_F^2}`,
the proximal operator of :math:`tf` evaluated at :math:`v`, where :math:`f` is an arbitrary function.
:param scale: Scaling term :math:`a \\neq 0`. Defaults to 1.
:param offset: Offset term :math:`b`. Defaults to 0.
:param lin_term: Linear term :math:`c`. If a scalar is given, then :math:`c` is its vectorization. Defaults to 0.
:param quad_term: Quadratic term :math:`d \\geq 0`. Defaults to 0.
:return: Function handle for the proximal operator of :math:`g`.
"""
scale = kwargs.pop("scale", 1.0)
offset = kwargs.pop("offset", 0)
lin_term = kwargs.pop("lin_term", 0)
quad_term = kwargs.pop("quad_term", 0)
if not np.isscalar(scale) or scale == 0:
raise ValueError("scale must be a non-zero scalar.")
if not np.isscalar(quad_term) or quad_term < 0:
raise ValueError("quad_term must be a non-negative scalar.")
def prox_new(v, t):
# if sparse.issparse(v):
# if not ((lin_term == 0 or sparse.issparse(lin_term)) and \
# (quad_term == 0 or sparse.issparse(quad_term))):
# v = v.todense()
v_new = scale*(v - lin_term)/(2*quad_term + 1) - offset
t_new = t*scale**2/(2*quad_term + 1)
return (prox(v_new, t_new, *args, **kwargs) + offset)/scale
return prox_new
| 1,980 | 48.525 | 119 | py |
longitudinalCOVID | longitudinalCOVID-master/main.py | import argparse
import os
import random
from collections import defaultdict
from copy import copy
import numpy as np
import torch
import data_loader as module_data_loader
import dataset as module_dataset
import model as module_arch
import model.utils.loss as module_loss
import model.utils.metric as module_metric
import trainer as trainer_module
from dataset.DatasetStatic import Phase
from dataset.dataset_utils import Views
from parse_config import ConfigParser, parse_cmd_args
def main(config, resume=None):
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(0)
num_patients = config['dataset']['num_patients']
crossVal_or_test = False
if config['test']:
folds = 1
len_fold = num_patients
crossVal_or_test = True
elif config['dataset']['cross_val']:
folds = config['dataset']['val_fold_num']
len_fold = config['dataset']['val_fold_len']
crossVal_or_test = True
else:
folds, len_fold = 1, 0
if config['dataset']['args']['val_patients']:
raise Exception(
"Please specify validation patients set in config while not using cross-validation or test phase.")
all_patients = [i for i in range(num_patients)]
np.random.shuffle(all_patients)
if resume:
config.resume = resume
logger = config.get_logger('train')
# get function handles of loss and metrics
loss = getattr(module_loss, config['loss'])
metrics = [getattr(module_metric, met) for met in config['metrics']]
# setup data_loader instances
if config['single_view']:
results = defaultdict(list)
for view in list(Views):
_cfg = copy(config)
for fold in range(folds):
logger.info('Fold Number: {}'.format(fold + 1))
logs = train(logger, _cfg, loss, metrics, fold, len_fold, all_patients, crossVal_or_test, view=view)
for k, v in list(logs.items()):
results[k].append(v)
else:
for fold in range(folds):
logger.info('Fold Number: {}'.format(fold + 1))
train(logger, config, loss, metrics, fold, len_fold, all_patients, crossVal_or_test)
def train(logger, config, loss, metrics, fold, len_fold, all_patients, crossVal_or_test, view: Views = None):
logger.info('start trainning: {}'.format(config['dataset']['args']))
print("Cross of test", crossVal_or_test, all_patients, fold, len_fold, flush=True)
if crossVal_or_test:
config['dataset']['args']['val_patients'] = all_patients[fold * len_fold: (fold + 1) * len_fold]
data_loader = None
if len(all_patients) != len(config['dataset']['args']['val_patients']): # if we had any patients left in the train set
dataset = config.retrieve_class('dataset', module_dataset)(**config['dataset']['args'], phase=Phase.TRAIN,
view=view)
data_loader = config.retrieve_class('data_loader', module_data_loader)(**config['data_loader']['args'],
dataset=dataset)
val_dataset = config.retrieve_class('dataset', module_dataset)(**config['dataset']['args'], phase=Phase.VAL,
view=view)
valid_data_loader = config.retrieve_class('data_loader', module_data_loader)(**config['data_loader']['args'],
dataset=val_dataset)
# build model architecture, then print to console
model = config.initialize_class('arch', module_arch)
logger.info(model)
if config['only_validation'] or config['test']:
logger.info('Loading checkpoint: {} ...'.format(config['path']))
path = config["path"]
checkpoint = torch.load(path, map_location=lambda storage, loc: storage)
if 'state_dict' in checkpoint.keys():
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
# build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.initialize('optimizer', torch.optim, trainable_params)
lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler, optimizer)
if view:
config._save_dir = os.path.join(config._save_dir, str(view.name))
config._log_dir = os.path.join(config._log_dir, str(view.name))
os.mkdir(config._save_dir)
os.mkdir(config._log_dir)
trainer = config.retrieve_class('trainer', trainer_module)(model, loss, metrics, optimizer, config, data_loader,
fold, valid_data_loader, lr_scheduler)
return trainer.train()
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)')
args.add_argument('-s', '--single_view', default=False, type=bool,
help='Defines if a single is used per plane orientation')
args.add_argument('-v', '--only_validation', default=False, type=bool,
help='just run validation on a checkpoint model and do no training -- should add argument -p')
args.add_argument('-p', '--path', default=None, type=str, help='path to latest checkpoint (default: None)')
args.add_argument('-t', '--test', default=False, type=bool,
help='to run test phase on all the patients list')
config = ConfigParser(*parse_cmd_args(args))
main(config)
| 6,108 | 41.72028 | 123 | py |
longitudinalCOVID | longitudinalCOVID-master/majority_voting.py | import argparse
import os
import nibabel
import numpy as np
import torch
from scipy.ndimage import rotate
from tqdm import tqdm
import data_loader as module_data_loader
import dataset as module_dataset
import model as module_arch
import model.utils.metric as module_metric
from dataset.DatasetStatic import Phase
from dataset.dataset_utils import Evaluate, Dataset
from parse_config import ConfigParser, parse_cmd_args
'''For Majority Voting and taking mean over all planes'''
def main(config, resume=None):
if config["path"]:
resume = config["path"]
logger = config.get_logger('test')
# setup data_loader instances
dataset = config.retrieve_class('dataset', module_dataset)(
**config['dataset']['args'], phase=Phase.TEST, evaluate=config['evaluate']
)
assert config['data_loader']['args'][
'batch_size'] == 1, "batch_size > 1! Configure batch_size in model config to one."
data_loader = config.retrieve_class('data_loader', module_data_loader)(
dataset=dataset,
batch_size=config['data_loader']['args']['batch_size'],
num_workers=config['data_loader']['args']['num_workers'],
shuffle=False
)
# build model architecture
model = config.initialize_class('arch', module_arch)
logger.info(model)
# get function handles of loss and metrics
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
logger.info('Loading checkpoint: {} ...'.format(resume))
checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
if 'state_dict' in checkpoint.keys():
model.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
res = config['dataset']['args']['size']
total_metrics = torch.zeros(len(metric_fns), config['dataset']['args']['n_classes'])
volume_metrics = torch.zeros(len(metric_fns), config['dataset']['args']['n_classes'])
with torch.no_grad():
# setup
volume = 0
axis = 0 # max 2
c = 0
alignment = [(0, 1, 2), (1, 0, 2), (2, 1, 0)]
data_shape = [res, res, res]
output_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
target_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
n_samples = 0
for idx, loaded_data in enumerate(tqdm(data_loader)):
if len(loaded_data) == 6:
# static case
data, target = loaded_data[0], loaded_data[1]
data, target = data.to(device), target.to(device)
output = model(data)
else:
# longitudinal case
x_ref, x, _, target = loaded_data[0], loaded_data[1], loaded_data[2], loaded_data[3]
x_ref, x, target = x_ref.to(device), x.to(device), target.to(device)
output, _ = model(x_ref, x)
for cl in range(output_agg.size()[0]):
x = output_agg[cl].to('cpu').numpy()
y = output[0][cl].to('cpu').numpy()
z = np.transpose(x, alignment[axis])
z[c] += y
output_agg[cl] = torch.tensor(np.transpose(z, alignment[axis])).to(device)
for cl in range(output_agg.size()[0]):
x = target_agg[cl].to('cpu').numpy()
y = target[0][cl].to('cpu').numpy()
z = np.transpose(x, alignment[axis])
z[c] += y
target_agg[cl] = torch.tensor(np.transpose(z, alignment[axis])).to(device)
c += 1
print("C is: ", c, "res is: ", res, flush=True)
if c == res:
axis += 1
c = 0
print("Axis Changed ", axis)
if axis == 3:
print("Volume finished")
path = os.path.join(config.config['trainer']['save_dir'], 'output',
*str(config._save_dir).split(os.sep)[-2:],
str(resume).split(os.sep)[-1][:-4])
os.makedirs(path, exist_ok=True)
axis = 0
label_out = output_agg.argmax(0)
label_target = target_agg.argmax(0)
evaluate_timestep(output_agg.unsqueeze(0), target_agg.unsqueeze(0), label_out, label_target,
metric_fns, config, path, volume,
volume_metrics, total_metrics,
logger)
# inferred whole volume
logger.info('---------------------------------')
logger.info(f'Volume number {int(volume) + 1}:')
for i, met in enumerate(metric_fns):
logger.info(f' {met.__name__}: {volume_metrics[i]}')
volume_metrics = torch.zeros(len(metric_fns))
volume += 1
output_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
target_agg = torch.zeros([config['dataset']['args']['n_classes'], *data_shape]).to(device)
logger.info('================================')
logger.info(f'Averaged over all patients:')
for i, met in enumerate(metric_fns):
logger.info(f' {met.__name__}: {total_metrics[i].item() / n_samples}')
def evaluate_timestep(avg_seg_volume, target_agg, label_out, label_target, metric_fns, config, path, patient,
volume_metrics, total_metrics,
logger):
prefix = f'{config["evaluate"].value}{(int(patient) + 1):02}'
seg_volume = label_out.int().cpu().detach().numpy()
rotated_seg_volume = rotate(rotate(seg_volume, -90, axes=(0, 1)), 90, axes=(1, 2))
nibabel.save(nibabel.Nifti1Image(rotated_seg_volume, np.eye(4)), os.path.join(path, f'{prefix}_seg.nii'))
target_volume = label_target.int().cpu().detach().numpy()
rotated_target_volume = rotate(rotate(target_volume, -90, axes=(0, 1)), 90, axes=(1, 2))
nibabel.save(nibabel.Nifti1Image(rotated_target_volume, np.eye(4)), os.path.join(path, f'{prefix}_target.nii'))
# computing loss, metrics on test set
logger.info(f'Patient {int(patient) + 1}: ')
for i, metric in enumerate(metric_fns):
if metric.__name__.__contains__("loss"):
continue
current_metric = metric(avg_seg_volume, target_agg)
logger.info(f' {metric.__name__}: {current_metric}')
try:
for j in range(current_metric.shape[0]):
volume_metrics[i][j] += current_metric[j]
total_metrics[i][j] += current_metric[j]
except Exception:
print("Invalid metric shape.")
continue
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str, help='config file path (default: None)')
args.add_argument('-d', '--device', default=None, type=str, help='indices of GPUs to enable (default: all)')
args.add_argument('-e', '--evaluate', default=Evaluate.TEST, type=Evaluate,
help='Either "training" or "test"; Determines the prefix of the folders to use')
args.add_argument('-m', '--dataset_type', default=Dataset.ISBI, type=Dataset, help='Dataset to use')
args.add_argument('-r', '--resume', default=None, type=str, help='path to latest checkpoint (default: None)')
args.add_argument('-p', '--path', default=None, type=str, help='path to latest checkpoint (default: None)')
config = ConfigParser(*parse_cmd_args(args))
main(config)
| 7,995 | 41.084211 | 115 | py |
longitudinalCOVID | longitudinalCOVID-master/parse_config.py | import logging
import os
from datetime import datetime
from functools import reduce
from importlib.machinery import SourceFileLoader
from operator import getitem
from pathlib import Path
from logger import setup_logging
from utils.util import write_config
def parse_cmd_args(args):
# parse default cli options
args = args.parse_args()
if args.device:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
if args.resume:
resume = Path(args.resume)
cfg_fname = resume.parent.parent / 'config.py'
else:
msg_no_cfg = "Configuration file need to be specified. Add '-c config.py', for example."
assert args.config is not None, msg_no_cfg
resume = None
cfg_fname = Path("configs/" + args.config)
print("config path:", cfg_fname)
from polyaxon_client.tracking import Experiment, get_data_paths, get_outputs_path
data_paths = get_data_paths()
outputs_path = get_outputs_path()
print("PAAATHHH", outputs_path)
# load config file and apply custom cli options
config = SourceFileLoader("CONFIG", str(cfg_fname)).load_module().CONFIG
for key, value in args.__dict__.items():
config[key] = value
return config, resume
class ConfigParser:
def __init__(self, config, resume=None, modification=None, run_id=None):
"""
class to parse configuration json file. Handles hyperparameters for training, initializations of modules, checkpoint saving
and logging module.
:param config: Dict containing configurations, hyperparameters for training. contents of `config.json` file for example.
:param resume: String, path to the checkpoint being loaded.
:param modification: Dict keychain:value, specifying position values to be replaced from config dict.
:param run_id: Unique Identifier for training processes. Used to save checkpoints and training log. Timestamp is being used as default
"""
# load config file and apply modification
self._config = config
self.resume = resume
# set save_dir where trained model and log will be saved.
save_dir = Path(self.config['trainer']['save_dir'])
exper_name = self.config['name']
if run_id is None: # use timestamp as default run-id
run_id = datetime.now().strftime(r'%m%d_%H%M%S')
self._save_dir = save_dir / 'models' / exper_name / run_id
self._log_dir = save_dir / 'log' / exper_name / run_id
# make directory for saving checkpoints and log.
exist_ok = run_id == ''
self.save_dir.mkdir(parents=True, exist_ok=exist_ok)
self.log_dir.mkdir(parents=True, exist_ok=exist_ok)
# save updated config file to the checkpoint dir
write_config(self.config, self.save_dir / 'config.py')
# configure logging module
setup_logging(self.log_dir)
self.log_levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG
}
def initialize(self, name, module, *args):
"""
finds a function handle with the name given as 'type' in config, and returns the
instance initialized with corresponding keyword args given as 'args'.
"""
module_cfg = self[name]
return getattr(module, module_cfg['type'])(*args, **module_cfg['args'])
def initialize_class(self, name, module, *args):
"""
finds a function handle with the name given as 'type' in config, and returns the
instance initialized with corresponding keyword args given as 'args'.
"""
class_instance = self.retrieve_class(name, module)
return class_instance(*args, **self[name]['args'])
def retrieve_class(self, name, module):
module_cfg = self[name]
class_name = module_cfg["type"]
base_path = os.path.join(Path(os.path.dirname(os.path.abspath(__file__))), module.__name__, f'{class_name}.py')
class_instance = getattr(SourceFileLoader(class_name, base_path).load_module(), class_name)
return class_instance
def __getitem__(self, name):
"""Access items like ordinary dict."""
return self.config[name]
def get_logger(self, name, verbosity=2):
msg_verbosity = 'verbosity option {} is invalid. Valid options are {}.'.format(verbosity, self.log_levels.keys())
assert verbosity in self.log_levels, msg_verbosity
logger = logging.getLogger(name)
logger.setLevel(self.log_levels[verbosity])
return logger
@property
def config(self):
return self._config
@property
def save_dir(self):
return self._save_dir
@property
def log_dir(self):
return self._log_dir
def _get_opt_name(flags):
for flg in flags:
if flg.startswith('--'):
return flg.replace('--', '')
return flags[0].replace('--', '')
def _set_by_path(tree, keys, value):
"""Set a value in a nested object in tree by sequence of keys."""
keys = keys.split(';')
_get_by_path(tree, keys[:-1])[keys[-1]] = value
def _get_by_path(tree, keys):
"""Access a nested object in tree by sequence of keys."""
return reduce(getitem, keys, tree)
| 5,244 | 35.423611 | 142 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/LongitudinalWithProgressionTrainer.py | import numpy
from logger import Mode
from trainer.Trainer import Trainer
from utils.illustration_util import log_visualizations
import torch.nn.functional as F
import torch
class LongitudinalWithProgressionTrainer(Trainer):
"""
Trainer class for training with original loss + difference map loss + reverse order for reference and CT loss
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, data_loader, fold, valid_data_loader, lr_scheduler,
len_epoch)
def remap_labels_for_difference(self, output):
covid_noncovid_output_ref = output.argmax(1)
covid_noncovid_output_ref2 = covid_noncovid_output_ref.clone()
covid_noncovid_output_ref2[covid_noncovid_output_ref != 3] = 0
covid_noncovid_output_ref2[covid_noncovid_output_ref == 3] = 1
covid_noncovid_output_ref[covid_noncovid_output_ref >= 2] = 3
covid_noncovid_output_ref[covid_noncovid_output_ref <= 1] = 0
covid_noncovid_output_ref[covid_noncovid_output_ref == 3] = 1
# first mask is for covid/non-covid difference and second mask is for cons/non-cons
return covid_noncovid_output_ref, covid_noncovid_output_ref2
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
_len_epoch = self.len_epoch if mode == Mode.TRAIN else self.len_epoch_val
for batch_idx, (x_ref, x, target_ref, target, mismatch, is_mismatch, is_last) in enumerate(data_loader):
x_ref, x, target, target_ref = x_ref.to(self.device), x.to(self.device), target.to(
self.device), target_ref.to(self.device)
if mode == Mode.TRAIN:
self.optimizer.zero_grad()
output, encoded = self.model(x_ref, x)
output_ref = None
if mode == Mode.VAL:
mismatch = mismatch.to(self.device)
output_ref, _ = self.model(mismatch, x_ref)
if mode == Mode.TRAIN:
output_ref, _ = self.model(x, x_ref)
covid_noncovid_output_ref, covid_noncovid_output_ref2 = self.remap_labels_for_difference(output_ref)
covid_noncovid_output, covid_noncovid_output2 = self.remap_labels_for_difference(output)
covid_noncovid_target, covid_noncovid_target2 = self.remap_labels_for_difference(target)
covid_noncovid_target_ref, covid_noncovid_target_ref2 = self.remap_labels_for_difference(target_ref)
difference_output = covid_noncovid_output - covid_noncovid_output_ref
difference_output += 1 # 0,1,2 for difference map labels
difference_output_reverse = covid_noncovid_output2 - covid_noncovid_output_ref2
difference_output_reverse += 1
difference_target = covid_noncovid_target - covid_noncovid_target_ref
difference_target += 1
difference_target_reverse = covid_noncovid_target2 - covid_noncovid_target_ref2
difference_target_reverse += 1
d_output = F.one_hot(difference_output, num_classes=3).permute(0, 3, 1, 2)
d_target = F.one_hot(difference_target, num_classes=3).permute(0, 3, 1, 2)
d_target_reverse = F.one_hot(difference_target_reverse, num_classes=3).permute(0, 3, 1, 2)
d_output_reverse = F.one_hot(difference_output_reverse, num_classes=3).permute(0, 3, 1, 2)
if mode == Mode.VAL:
try:
output_refs = torch.tensor([]).to(self.device)
target_refs = torch.tensor([]).to(self.device)
empty = True
for i in range(x.size(0)):
if not is_mismatch[i]:
empty = False
output_refs = torch.cat((output_refs, output_ref[i].unsqueeze(0)))
target_refs = torch.cat((target_refs, target_ref[i].unsqueeze(0)))
if not empty:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output_refs, target_refs,
None, mode, False, is_last=is_last)
except Exception as e:
print("Exception in mismatch:", is_mismatch, e)
elif mode == Mode.TRAIN:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output_ref, target_ref,
None, mode, False, is_last=is_last)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output, d_target, None,
mode, False, True, is_last=is_last)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output_reverse, d_target_reverse,
None,
mode, True, True, is_last=is_last)
loss = self.loss(output, target, output_ref, target_ref, d_output_reverse.float(), d_target_reverse.float())
if mode == Mode.TRAIN:
loss.backward()
self.optimizer.step()
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output, target, loss, mode, False,
is_last=is_last)
if not (batch_idx % self.log_step):
self.logger.info(f'{mode.value} Epoch: {epoch} {self._progress(data_loader, batch_idx,_len_epoch)} Loss: {loss.item():.6f}')
if not (batch_idx % (_len_epoch // 10)):
log_visualizations(self.writer, x_ref, x, output, target, output_ref, target_ref,
difference_output, difference_target, difference_output_reverse,
difference_target_reverse, encoded)
del x_ref, x, target
| 5,986 | 51.982301 | 140 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/StaticTrainer.py | from logger import Mode
from trainer.Trainer import Trainer
from utils.illustration_util import log_visualizations
import torch.nn.functional as F
class StaticTrainer(Trainer):
"""
Trainer class for base training
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, data_loader, fold, valid_data_loader, lr_scheduler,
len_epoch)
def remap_labels_for_difference(self, output):
covid_noncovid_output_ref = output.argmax(1)
covid_noncovid_output_ref2 = covid_noncovid_output_ref.clone()
covid_noncovid_output_ref2[covid_noncovid_output_ref != 3] = 0
covid_noncovid_output_ref2[covid_noncovid_output_ref == 3] = 1
covid_noncovid_output_ref[covid_noncovid_output_ref >= 2] = 3
covid_noncovid_output_ref[covid_noncovid_output_ref <= 1] = 0
covid_noncovid_output_ref[covid_noncovid_output_ref == 3] = 1
# first mask is for covid/non-covid difference and second mask is for cons/non-cons
return covid_noncovid_output_ref, covid_noncovid_output_ref2
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
_len_epoch = self.len_epoch if mode == Mode.TRAIN else self.len_epoch_val
for batch_idx, (data, target, data_ref, target_ref, is_one, is_last) in enumerate(data_loader):
data, target, data_ref, target_ref = data.to(self.device), target.to(self.device), data_ref.to(
self.device), target_ref.to(self.device)
if mode == Mode.TRAIN:
self.optimizer.zero_grad()
output = self.model(data)
loss = self.loss(output, target)
if mode == Mode.TRAIN:
loss.backward()
self.optimizer.step()
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output, target, loss, mode, False,
is_last=is_last)
covid_noncovid_output, covid_noncovid_output2 = self.remap_labels_for_difference(output)
covid_noncovid_target, covid_noncovid_target2 = self.remap_labels_for_difference(target)
covid_noncovid_target_ref, covid_noncovid_target_ref2 = self.remap_labels_for_difference(target_ref)
difference_output = None
difference_output_reverse = None
difference_target = None
difference_target_reverse = None
output_ref = None
if mode == Mode.VAL:
output_ref = self.model(data_ref)
covid_noncovid_output_ref, covid_noncovid_output_ref2 = self.remap_labels_for_difference(output_ref)
try:
for i in range(data.size(0)):
if is_one[1]:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch),
output_ref[i].unsqueeze(0), target_ref[i].unsqueeze(0), None, mode,
False, is_last=is_last)
except Exception as e:
print("Exception in is_one: ", e)
difference_output = covid_noncovid_output - covid_noncovid_output_ref
difference_output += 1
difference_output_reverse = covid_noncovid_output2 - covid_noncovid_output_ref2
difference_output_reverse += 1
difference_target = covid_noncovid_target - covid_noncovid_target_ref
difference_target += 1
difference_target_reverse = covid_noncovid_target2 - covid_noncovid_target_ref2
difference_target_reverse += 1
d_output = F.one_hot(difference_output, num_classes=3).permute(0, 3, 1, 2)
d_target = F.one_hot(difference_target, num_classes=3).permute(0, 3, 1, 2)
d_target_reverse = F.one_hot(difference_target_reverse, num_classes=3).permute(0, 3, 1, 2)
d_output_reverse = F.one_hot(difference_output_reverse, num_classes=3).permute(0, 3, 1, 2)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output, d_target, None,
mode, False, True, is_last=is_last)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output_reverse,
d_target_reverse, None,
mode, True, True, is_last=is_last)
if not (batch_idx % self.log_step):
self.logger.info(f'{mode.value} Epoch: {epoch} {self._progress(data_loader, batch_idx, _len_epoch)} Loss: {loss.item():.6f}')
if not (batch_idx % (_len_epoch // 10)):
if mode == Mode.VAL:
log_visualizations(self.writer, data_ref, data, output, target, output_ref, target_ref,
difference_output, difference_target, difference_output_reverse,
difference_target_reverse, None)
del data, target
| 5,253 | 51.54 | 141 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/LongitudinalTrainer.py |
import numpy
from logger import Mode
from trainer.Trainer import Trainer
from utils.illustration_util import log_visualizations
import torch.nn.functional as F
import torch
class LongitudinalTrainer(Trainer):
"""
Trainer class
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, data_loader, fold, valid_data_loader, lr_scheduler,
len_epoch)
def remap_labels_for_difference(self, output):
covid_noncovid_output_ref = output.argmax(1)
covid_noncovid_output_ref2 = covid_noncovid_output_ref.clone()
covid_noncovid_output_ref2[covid_noncovid_output_ref != 3] = 0
covid_noncovid_output_ref2[covid_noncovid_output_ref == 3] = 1
covid_noncovid_output_ref[covid_noncovid_output_ref >= 2] = 3
covid_noncovid_output_ref[covid_noncovid_output_ref <= 1] = 0
covid_noncovid_output_ref[covid_noncovid_output_ref == 3] = 1
# first mask is for covid/non-covid difference and second mask is for cons/non-cons
return covid_noncovid_output_ref, covid_noncovid_output_ref2
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
_len_epoch = self.len_epoch if mode == Mode.TRAIN else self.len_epoch_val
TOY = False # set to True to perform a toy experiment where target and reference CTs are the same
for batch_idx, (x_ref, x, target_ref, target, mismatch, is_mismatch, is_last) in enumerate(data_loader):
x_ref, x, target, target_ref = x_ref.to(self.device), x.to(self.device), target.to(
self.device), target_ref.to(self.device)
if mode == Mode.TRAIN:
self.optimizer.zero_grad()
if not TOY:
output, encoded = self.model(x_ref, x)
else:
output, encoded = self.model(x,x)
loss = self.loss(output, target)
if mode == Mode.TRAIN:
loss.backward()
self.optimizer.step()
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output, target, loss, mode,
is_last=is_last, toy=TOY)
covid_noncovid_output, covid_noncovid_output2 = self.remap_labels_for_difference(output)
covid_noncovid_target, covid_noncovid_target2 = self.remap_labels_for_difference(target)
covid_noncovid_target_ref, covid_noncovid_target_ref2 = self.remap_labels_for_difference(target_ref)
difference_output = None
difference_output_reverse = None
difference_target = None
difference_target_reverse = None
output_ref = None
if mode == Mode.VAL:
mismatch = mismatch.to(self.device)
if not TOY:
output_ref, encoded_ref = self.model(mismatch, x_ref)
else:
output_ref, encoded_ref = self.model(x_ref, x_ref)
covid_noncovid_output_ref, covid_noncovid_output_ref2 = self.remap_labels_for_difference(output_ref)
difference_output = covid_noncovid_output - covid_noncovid_output_ref
difference_output += 1
difference_output_reverse = covid_noncovid_output2 - covid_noncovid_output_ref2
difference_output_reverse += 1
difference_target = covid_noncovid_target - covid_noncovid_target_ref
difference_target += 1
difference_target_reverse = covid_noncovid_target2 - covid_noncovid_target_ref2
difference_target_reverse += 1
d_output = F.one_hot(difference_output, num_classes=3).permute(0, 3, 1, 2)
d_target = F.one_hot(difference_target, num_classes=3).permute(0, 3, 1, 2)
d_target_reverse = F.one_hot(difference_target_reverse, num_classes=3).permute(0, 3, 1, 2)
d_output_reverse = F.one_hot(difference_output_reverse, num_classes=3).permute(0, 3, 1, 2)
try:
output_refs = torch.tensor([]).to(self.device)
target_refs = torch.tensor([]).to(self.device)
empty = True
for i in range(x.size(0)):
if not is_mismatch[i]:
empty = False
output_refs = torch.cat((output_refs, output_ref[i].unsqueeze(0)))
target_refs = torch.cat((target_refs, target_ref[i].unsqueeze(0)))
if not empty:
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), output_refs, target_refs,
None, mode, False, is_last=is_last, toy=TOY)
except Exception as e:
print("Exception in mismatch:", is_mismatch, e)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output, d_target, None,
mode, False, True, is_last=is_last, toy=TOY)
self.log_scalars(metrics, self.get_step(batch_idx, epoch, _len_epoch), d_output_reverse,
d_target_reverse, None,
mode, True, True, is_last=is_last, toy=TOY)
if not (batch_idx % self.log_step):
self.logger.info(f'{mode.value} Epoch: {epoch} {self._progress(data_loader, batch_idx,_len_epoch)} Loss: {loss.item():.6f}')
if not (batch_idx % (_len_epoch // 10)):
log_visualizations(self.writer, x_ref, x, output, target, output_ref, target_ref,
difference_output, difference_target, difference_output_reverse,
difference_target_reverse, encoded, toy=TOY)
del x_ref, x, target, target_ref, mismatch
| 6,063 | 47.512 | 140 | py |
longitudinalCOVID | longitudinalCOVID-master/trainer/Trainer.py | from abc import abstractmethod
import numpy as np
import torch
from base import BaseTrainer
from logger import Mode
from utils import MetricTracker
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, data_loader, fold=None,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, loss, metric_ftns, optimizer, config, fold)
self.config = config
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader,
self.do_validation = self.valid_data_loader is not None
self.do_training = self.data_loader is not None
if self.valid_data_loader.__class__.__name__ == 'tuple':
self.valid_data_loader = self.valid_data_loader[0]
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader) if self.do_training else 0
self.len_epoch_val = len(self.valid_data_loader) if self.do_validation else 0
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size)) if self.do_training else int(np.sqrt(valid_data_loader.batch_size))
self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer, logger= self.logger)
self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer, logger=self.logger)
@abstractmethod
def _process(self, epoch, data_loader, metrics, mode: Mode = Mode.TRAIN):
raise NotImplementedError('Method _process() from Trainer class has to be implemented!')
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
if self.do_training:
self.model.train()
self.train_metrics.reset()
self._process(epoch, self.data_loader, self.train_metrics, Mode.TRAIN)
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_' + k: v for k, v in val_log.items()})
if self.do_training and self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
with torch.no_grad():
self._process(epoch, self.valid_data_loader, self.valid_metrics, Mode.VAL)
# add histogram of model parameters to the tensorboard
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def log_scalars(self, metrics, step, output, target, loss, mode=Mode.TRAIN, reverse=False, difference= False, toy=False,
is_last=None):
if is_last is None:
is_last = [False] * target.size(0)
if not difference:
self.writer.set_step(step, mode)
if loss is not None:
metrics.update('loss', loss.item())
for met in self.metric_ftns:
if met.__name__ in ["vd, LTPR, LFPR"]:
for i in range(target.size(0)):
if not is_last[i]:
met(output[i].unsqueeze(0), target[i].unsqueeze(0), is_last[i])
continue
metrics.update(met.__name__, met(output[i].unsqueeze(0), target[i].unsqueeze(0)))
metrics.update(met.__name__, met(output, target))
elif not reverse:
self.writer.set_step(step, mode)
for met in self.metric_ftns:
if met.__name__ in ["LFPR", "LTPR"]:
continue
if met.__name__ in ["vd"]:
for i in range(target.size(0)):
if not is_last[i]:
met(output[i].unsqueeze(0), target[i].unsqueeze[0], is_last[i])
continue
metrics.update(met.__name__, met(output[i].unsqueeze(0), target[i].unsqueeze(0)))
metrics.update(met.__name__ + "_difference", met(output, target))
else:
self.writer.set_step(step, mode)
last_metric = self.metric_ftns[-1].__name__
for met in self.metric_ftns:
if met.__name__ in ["LFPR","LTPR"]:
continue
if met.__name__ in ["vd"]:
for i in range(target.size(0)):
if not is_last[i]:
met(output[i].unsqueeze(0), target[i].unsqueeze(0), is_last)
continue
metrics.update(met.__name__, met(output[i].unsqueeze(0), target[i].unsqueeze(0)))
if met.__name__ in [last_metric]:
metrics.update(met.__name__ + "_difference_reverse", met(output, target), is_last=is_last)
metrics.update(met.__name__ + "_difference_reverse", met(output, target))
@staticmethod
def _progress(data_loader, batch_idx, batches):
base = '[{}/{} ({:.0f}%)]'
if hasattr(data_loader, 'n_samples'):
current = batch_idx * data_loader.batch_size
total = data_loader.n_samples
else:
current = batch_idx
total = batches
return base.format(current, total, 100.0 * current / total)
@staticmethod
def get_step(batch_idx, epoch, len_epoch):
return (epoch - 1) * len_epoch + batch_idx
| 5,947 | 40.594406 | 132 | py |
longitudinalCOVID | longitudinalCOVID-master/data_loader/Dataloader.py | from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, WeightedRandomSampler
from torch.utils.data.dataloader import default_collate
import numpy as np
class Dataloader(DataLoader):
"""
data loading -- uncomment the commented lines for reverse weight sampling the classes
"""
def __init__(self, dataset, batch_size, shuffle=True, num_workers=1):
self.dataset = dataset
# self.weights = np.array(self.dataset.number_of_classes)
# self.weights = 1 / self.weights
# self.weights = self.weights / sum(self.weights)
# self.balance = self.dataset.weights
self.shuffle = shuffle
self.batch_idx = 0
if self.shuffle:
self.sampler = RandomSampler(self.dataset) # Replace with: WeightedRandomSampler(self.balance, len(self.dataset))
else:
self.sampler = SequentialSampler(self.dataset)
self.shuffle = False
self.init_kwargs = {
'dataset': self.dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': default_collate,
'num_workers': num_workers
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
| 1,242 | 34.514286 | 125 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/dataset_utils.py | import csv
import os
import sys
from collections import defaultdict, OrderedDict
from enum import Enum
from glob import glob
import gc
import h5py
import numpy as np
import pickle
from skimage.transform import resize
from dataset.dynamic.preprocessing import DatasetPreprocessor
class Modalities(Enum):
SIMPLE = 'simple'
class Phase(Enum):
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
class Views(Enum):
SAGITTAL = 0
CORONAL = 1
AXIAL = 2
class Mode(Enum):
STATIC = 'static'
LONGITUDINAL = 'longitudinal'
class Dataset(Enum):
ISBI = 'isbi'
INHOUSE = 'inhouse'
class Evaluate(Enum):
TRAINING = 'training'
TEST = 'test'
def retrieve_data_dir_paths(data_dir, evaluate: Evaluate, phase, preprocess, val_patients, mode, size, view=None,
sensitivity=False):
empty_slices, non_positive_slices = None, None
if preprocess: # Done only once
preprocessor = DatasetPreprocessor(
data_dir + "/config_lung.yml", data_dir, size) # Place dataset/dynamic/config_lung.yml in a directory next to data
if preprocess: # Done only once
empty_slices, non_positive_slices = preprocess_files(data_dir)
if mode == Mode.LONGITUDINAL:
if sensitivity:
data_dir_paths = retrieve_paths_longitudinal_three(get_patient_paths(data_dir, evaluate, phase),
phase).items()
else:
data_dir_paths = retrieve_paths_longitudinal(get_patient_paths(data_dir, evaluate, phase),
phase).items()
else:
pps = get_patient_paths(data_dir, evaluate, phase)
data_dir_paths = retrieve_paths_static(pps, phase).items()
data_dir_paths = OrderedDict(sorted(data_dir_paths))
_data_dir_paths = []
patient_keys = [key for key in data_dir_paths.keys()]
print(patient_keys, flush=True)
if phase == Phase.TRAIN:
print(patient_keys, val_patients)
pk = patient_keys.copy()
for val_patient in val_patients[::-1]:
patient_keys.remove(pk[val_patient])
for patient in patient_keys:
_data_dir_paths += data_dir_paths[patient]
elif phase == Phase.VAL:
for val_patient in val_patients:
_data_dir_paths += data_dir_paths[patient_keys[val_patient]]
else:
for patient in patient_keys:
_data_dir_paths += data_dir_paths[patient]
if view:
_data_dir_paths = list(filter(lambda path: int(path[0].split(os.sep)[-2]) == view.value, _data_dir_paths))
if phase == Phase.TRAIN or phase == Phase.VAL:
_data_dir_paths = retrieve_filtered_data_dir_paths(data_dir, phase, _data_dir_paths, empty_slices,
non_positive_slices,
mode, val_patients, view)
return _data_dir_paths
def preprocess_files(root_dir, base_path='data28'):
patients = list(filter(lambda name: (Evaluate.TRAINING.value) in name,
os.listdir(root_dir)))
empty_slices = []
non_positive_slices = []
i_patients = len(patients) + 1
log_file = [['pat_id_time_step/registered_upon', 0, 1, 2, 3, 4]]
for patient in patients:
gc.collect()
patient_path = os.path.join(root_dir, patient)
print('Processing patient', patient, flush=True)
patient_data_path = os.path.join(patient_path, 'preprocessed', patient)
patient_label_path = os.path.join(patient_path, 'pathMasks', patient)
patient_lung_path = os.path.join(patient_path, 'masks', patient)
for modality in list(Modalities):
mod, value = modality.name, modality.value
t = 0
for file in os.listdir(root_dir+f"/{patient}/preprocessed/"): # change directory
if file.endswith(".nii"):
t += 1
for i in range(t): # i registered with j
for j in range(t):
label_path = f'{patient_label_path}_{i + 1}-{j + 1}_pathMask.npy'
lung_path = f'{patient_lung_path}_{i + 1}-{j + 1}_mask.npy'
data_path = f'{patient_data_path}_{i + 1}-{j + 1}_{value}_pp.npy'
normalized_data = np.load(data_path)
lung_mask = np.load(lung_path)
rotated_labels = np.load(label_path)
# create slices through all views
path_reg = str(i + 1) + '_' + str(j + 1)
log_file += [[patient + path_reg] + (
np.histogram(rotated_labels, [0, 1, 2, 3, 4, 5])[0] / (300 ** 3)).tolist()]
temp_empty_slices, temp_non_positive_slices = create_slices(normalized_data, rotated_labels,
os.path.join(patient_path, base_path,
str(i + 1), path_reg),
value)
empty_slices += temp_empty_slices
non_positive_slices += temp_non_positive_slices
i_patients += 1
with open(root_dir + '/hist.csv', 'w') as f:
csv.writer(f, delimiter=',').writerows(log_file)
return empty_slices, non_positive_slices
def transform_data(data_path, label, pathology, mins, maxs): # preprocessing method that doesnt use registration
data = np.load(data_path)
if label:
print("label max:", np.max(data))
print("label:", np.histogram(data, [0, 1, 2, 3, 4, 5]))
if not pathology:
base_mask_path, patient_number = data_path.split('/')[:-2], data_path.split('/')[-1][:-12] + 'mask.npy'
p = ''
for jj in base_mask_path:
p += jj + '/'
lung_mask = np.load(p + 'masks/' + patient_number)
data = lung_mask
if pathology and np.histogram(data, [0, 1, 2, 3, 4, 5])[0][1] == 0:
base_mask_path, patient_number = data_path.split('/')[:-2], data_path.split('/')[-1][:-12] + 'mask.npy'
p = ''
for jj in base_mask_path:
p += jj + '/'
lung_mask = np.load(p + 'masks/' + patient_number)
try:
data[data + lung_mask == 1] = 1
except Exception:
print("EXCEPTION!!!!!! lung mask is None", data.shape, lung_mask.shape)
out_dims = np.array([300, 300, 300])
if len(data.shape) == 4:
data = data[0]
if label:
coords = np.argwhere(data)
# bounding box
x_min, y_min, z_min = coords.min(axis=0)
x_max, y_max, z_max = coords.max(axis=0) + 1 # include top slice
data = data[x_min:x_max, y_min:y_max, z_min:z_max]
maxs = [x_max, y_max, z_max]
mins = [x_min, y_min, z_min]
else:
data = data[mins[0]:maxs[0], mins[1]:maxs[1], mins[2]:maxs[2]]
x_dim, y_dim, z_dim = data.shape
x_pad = get_padding(out_dims[0], x_dim)
y_pad = get_padding(out_dims[1], y_dim)
z_pad = get_padding(out_dims[2], z_dim)
smaller = (
(x_pad[0] < 0) * (out_dims[0] - x_dim), (y_pad[0] < 0) * (out_dims[1] - y_dim),
(z_pad[0] < 0) * (out_dims[2] - z_dim))
if smaller != (0, 0, 0):
new_x, new_y, new_z = data.shape[0] + smaller[0], data.shape[1] + smaller[1], data.shape[2] + smaller[2]
if label:
data = resize(data, (new_x, new_y, new_z), order=0)
else:
data = resize(data, (new_x, new_y, new_z))
x_dim, y_dim, z_dim = data.shape
x_pad = get_padding(out_dims[0], x_dim)
y_pad = get_padding(out_dims[1], y_dim)
z_pad = get_padding(out_dims[2], z_dim)
bigger = (
(x_pad[0] > 0) * (out_dims[0] - x_dim), (y_pad[0] > 0) * (out_dims[1] - y_dim),
(z_pad[0] > 0) * (out_dims[2] - z_dim))
if bigger != (0, 0, 0):
new_x, new_y, new_z = data.shape[0] + bigger[0], data.shape[1] + bigger[1], data.shape[2] + bigger[2]
if label:
data = resize(data, (new_x, new_y, new_z), order=0)
else:
data = resize(data, (new_x, new_y, new_z))
x_dim, y_dim, z_dim = data.shape
x_pad = get_padding(out_dims[0], x_dim)
y_pad = get_padding(out_dims[1], y_dim)
z_pad = get_padding(out_dims[2], z_dim)
data = np.pad(data, (x_pad, y_pad, z_pad), 'constant')
return data, mins, maxs
def get_padding(max_dim, current_dim):
diff = max_dim - current_dim
pad = diff // 2
if diff % 2 == 0:
return pad, pad
else:
return pad, pad + 1
def create_slices(data, label, timestep_path, modality):
empty_slices = []
non_positive_slices = []
for view in list(Views):
name, axis = view.name, view.value
temp_data = np.moveaxis(data, axis, 0)
temp_labels = np.moveaxis(label, axis, 0)
for i, (data_slice, label_slice) in enumerate(zip(temp_data, temp_labels)):
path = os.path.join(timestep_path, str(axis), f'{i:03}')
full_path = os.path.join(path, f'{modality}.h5')
if np.max(data_slice) - np.min(data_slice) <= 1e-5:
empty_slices.append(path)
if np.max(label_slice) - np.min(label_slice) <= 1e-5:
non_positive_slices.append(path)
while not os.path.exists(full_path): # sometimes file is not created correctly => Just redo until it exists
if not os.path.exists(path):
os.makedirs(path)
with h5py.File(full_path, 'w') as data_file:
data_file.create_dataset('data', data=data_slice, dtype='f')
data_file.create_dataset('label', data=label_slice, dtype='i')
data_file.create_dataset('hist', data=label_slice, dtype='i')
return empty_slices, non_positive_slices
def retrieve_paths_static(patient_paths, phase):
data_dir_paths = defaultdict(list)
for patient_path in patient_paths:
if not os.path.isdir(patient_path):
continue
print("patient path", patient_path, flush=True)
sys.stdout.flush()
patient = patient_path.split(os.sep)[-2]
for timestep in filter(lambda x: os.path.isdir(os.path.join(patient_path, x)), os.listdir(patient_path)):
timestep_int = int(timestep)
timestep_path = os.path.join(patient_path, timestep)
timestep_path = os.path.join(timestep_path, str(timestep_int) + '_' + str(
timestep_int)) # in static case we use not registered data bc we dont have reference CT
if phase != Phase.TRAIN and timestep_int == 1:
continue
for axis in filter(lambda x: os.path.isdir(os.path.join(timestep_path, x)), os.listdir(timestep_path)):
axis_path = os.path.join(timestep_path, axis)
slice_paths = filter(lambda x: os.path.isdir(x),
map(lambda x: os.path.join(axis_path, x), os.listdir(axis_path)))
data_dir_paths[patient] += slice_paths
return data_dir_paths
def retrieve_paths_longitudinal(patient_paths, phase):
data_dir_paths = defaultdict(list)
for patient_path in patient_paths:
if not os.path.isdir(patient_path):
continue
patient = patient_path.split(os.sep)[-2]
for timestep_x in sorted(
filter(lambda x: os.path.isdir(os.path.join(patient_path, x)), os.listdir(patient_path))):
x_timestep = defaultdict(list)
timestep_x_int = int(timestep_x)
timestep_x_path = os.path.join(patient_path, timestep_x)
timestep_x_path = os.path.join(timestep_x_path, str(timestep_x_int) + '_' + str(timestep_x_int))
for axis in sorted(
filter(lambda x: os.path.isdir(os.path.join(timestep_x_path, x)), os.listdir(timestep_x_path))):
axis_path = os.path.join(timestep_x_path, axis)
slice_paths = sorted(filter(lambda x: os.path.isdir(x),
map(lambda x: os.path.join(axis_path, x), os.listdir(axis_path))))
x_timestep[axis] = slice_paths
for timestep_x_ref in sorted(
filter(lambda x: os.path.isdir(os.path.join(patient_path, x)), os.listdir(patient_path))):
x_ref_timestep = defaultdict(list)
timestep_x_ref_int = int(timestep_x_ref)
timestep_x_ref_path = os.path.join(patient_path, timestep_x_ref)
timestep_x_ref_path = os.path.join(timestep_x_ref_path,
str(timestep_x_ref_int) + '_' + str(
timestep_x_int)) # here we use reference CT that is registered to target CT
for axis in sorted(filter(lambda x: os.path.isdir(os.path.join(timestep_x_ref_path, x)),
os.listdir(timestep_x_ref_path))):
axis_path = os.path.join(timestep_x_ref_path, axis)
slice_paths = sorted(filter(lambda x: os.path.isdir(x),
map(lambda x: os.path.join(axis_path, x), os.listdir(axis_path))))
x_ref_timestep[axis] = slice_paths
if phase == Phase.TRAIN:
if timestep_x_int != timestep_x_ref_int: # all_combinations
data_dir_paths[patient] += zip(x_ref_timestep[axis], x_timestep[axis])
else:
if timestep_x_int == timestep_x_ref_int + 1: # just (ref, target) = (1,2) or (2,3) is sent to trainer and the reverse order (2,1) is used in trainer too
data_dir_paths[patient] += zip(x_ref_timestep[axis], x_timestep[axis])
sys.stdout.flush()
return data_dir_paths
def retrieve_paths_longitudinal_three(patient_paths,
phase=Phase.VAL): # to retrieve triples of CTs for patients with three sessions (for sensitivity analysis)
data_dir_paths = defaultdict(list)
for patient_path in patient_paths:
if not os.path.isdir(patient_path):
continue
patient = patient_path.split(os.sep)[-2]
for timestep_x in sorted(
filter(lambda x: os.path.isdir(os.path.join(patient_path, x)), os.listdir(patient_path))):
x_timestep = defaultdict(list)
timestep_x_int = int(timestep_x)
timestep_x_path = os.path.join(patient_path, timestep_x)
timestep_x_path = os.path.join(timestep_x_path, str(timestep_x_int) + '_' + str(timestep_x_int))
for axis in sorted(
filter(lambda x: os.path.isdir(os.path.join(timestep_x_path, x)), os.listdir(timestep_x_path))):
axis_path = os.path.join(timestep_x_path, axis)
slice_paths = sorted(filter(lambda x: os.path.isdir(x),
map(lambda x: os.path.join(axis_path, x), os.listdir(axis_path))))
x_timestep[axis] = slice_paths
x_ref_timestep = [defaultdict(list), defaultdict(list)]
i = -1
for timestep_x_ref in sorted(
filter(lambda x: os.path.isdir(os.path.join(patient_path, x)), os.listdir(patient_path))):
timestep_x_ref_int = int(timestep_x_ref)
timestep_x_ref_path = os.path.join(patient_path, timestep_x_ref)
if timestep_x_int != timestep_x_ref_int:
i += 1
else:
continue
timestep_x_ref_path = os.path.join(timestep_x_ref_path,
str(timestep_x_ref_int) + '_' + str(timestep_x_int))
for axis in sorted(filter(lambda x: os.path.isdir(os.path.join(timestep_x_ref_path, x)),
os.listdir(timestep_x_ref_path))):
axis_path = os.path.join(timestep_x_ref_path, axis)
slice_paths = sorted(filter(lambda x: os.path.isdir(x),
map(lambda x: os.path.join(axis_path, x), os.listdir(axis_path))))
x_ref_timestep[i][axis] = slice_paths
if i < 1:
continue
for axis in sorted(filter(lambda x: os.path.isdir(os.path.join(timestep_x_ref_path, x)),
os.listdir(timestep_x_ref_path))):
data_dir_paths[patient] += zip(x_ref_timestep[0][axis], x_ref_timestep[1][axis], x_timestep[axis])
sys.stdout.flush()
return data_dir_paths
def get_patient_paths(data_dir, evaluate, phase):
patient_paths = map(lambda name: os.path.join(name, 'data28'), # change directory
(filter(
lambda name: (evaluate.value if phase == Phase.VAL else Evaluate.TRAINING.value) in name,
glob(os.path.join(data_dir, '*')))))
return patient_paths
def retrieve_filtered_data_dir_paths(root_dir, phase, data_dir_paths, empty_slices, non_positive_slices, mode,
val_patients, view: Views = None):
empty_file_path = os.path.join(root_dir, 'empty_slices28.pckl')
non_positive_slices_path = os.path.join(root_dir, 'non_positive_slices28.pckl')
if empty_slices:
pickle.dump(empty_slices, open(empty_file_path, 'wb'))
if non_positive_slices:
pickle.dump(non_positive_slices, open(non_positive_slices_path, 'wb'))
data_dir_path = os.path.join(root_dir,
f'data_dir_{mode.value}_{phase.value}_{val_patients}{f"_{view.name}" if view else ""}28.pckl')
if os.path.exists(data_dir_path):
# means it has been preprocessed before -> directly load data_dir_paths
data_dir_paths = pickle.load(open(data_dir_path, 'rb'))
print(f'Elements in data_dir_paths: {len(data_dir_paths)}')
else:
if not empty_slices:
empty_slices = pickle.load(open(empty_file_path, 'rb'))
if not non_positive_slices:
non_positive_slices = pickle.load(open(non_positive_slices_path, 'rb'))
print(f'Elements in data_dir_paths before filtering empty slices: {len(data_dir_paths)}')
if mode == Mode.STATIC:
data_dir_paths = [x for x in data_dir_paths if x not in set(empty_slices + non_positive_slices)]
else:
data_dir_pathss = []
for x_ref_1, x in data_dir_paths:
if x not in set(empty_slices + non_positive_slices) and phase != Phase.TEST:
data_dir_pathss += [(x_ref_1, x)] # ,(x_ref_1, x) add for augmentation (doubling data)
else:
data_dir_pathss += [(x_ref_1, x)]
data_dir_paths = data_dir_pathss
print(f'Elements in data_dir_paths after filtering empty slices: {len(data_dir_paths)}')
pickle.dump(data_dir_paths, open(data_dir_path, 'wb'))
return data_dir_paths
| 19,416 | 41.863135 | 177 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/rigid_and_deformable_registration.py | from pathlib import Path
import SimpleITK as sitk
import numpy as np
import sys
import torch
import nibabel as nib
from skimage.transform import resize
def iteration_callback(filter):
global itr
print("deformable iter:", itr, "loss:", filter.GetMetricValue(), flush=True)
itr += 1
def save(filter, fixed, moving, fct, mct, fpathology, mpathology):
m = sitk.GetArrayFromImage(sitk.Resample(moving, fixed, filter,
sitk.sitkLinear, 0.0,
moving.GetPixelIDValue()))
mct = resize(mct, fct.shape)
mct = sitk.GetImageFromArray(mct, False)
mct = sitk.GetArrayFromImage(sitk.Resample(mct, fixed, filter,
sitk.sitkLinear, 0.0,
mct.GetPixelIDValue()))
if mpathology is not None:
mpathology = resize(mpathology, fpathology.shape, order=0)
mpathology = sitk.GetImageFromArray(mpathology, False)
mpathology = sitk.GetArrayFromImage(sitk.Resample(mpathology, fixed, filter,
sitk.sitkLinear, 0.0,
mpathology.GetPixelIDValue()))
return m, mct, mpathology
def log_rigid():
global iteration
print("rigid iter:", iteration, flush=True)
iteration += 1
def rigid_registration(f, m):
transform = sitk.CenteredTransformInitializer(f,
m,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# multi-resolution rigid registration using Mutual Information
registration_m = sitk.ImageRegistrationMethod()
registration_m.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_m.SetMetricSamplingStrategy(registration_m.RANDOM)
registration_m.SetMetricSamplingPercentage(0.01)
registration_m.SetInterpolator(sitk.sitkLinear)
registration_m.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=100,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
registration_m.SetOptimizerScalesFromPhysicalShift()
registration_m.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_m.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_m.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_m.SetInitialTransform(transform)
# add iteration callback, save central slice in xy, xz, yz planes
global iteration_number
iteration_number = 0
registration_m.AddCommand(sitk.sitkIterationEvent,
lambda: log_rigid())
rigid_transformation = registration_m.Execute(f, m)
m = sitk.Resample(m, f, rigid_transformation, sitk.sitkLinear, 0.0,
m.GetPixelIDValue())
print("rigid registration finished.", flush=True)
return f, m
itr = 0
iteration = 1
def deformable_registration(fixed_image, moving_image, fixed_ct, moving_ct, fixed_pathology, moving_pathology):
moving_image = resize(moving_image, fixed_image.shape, order=0)
fixed_image = sitk.GetImageFromArray(fixed_image, False)
moving_image = sitk.GetImageFromArray(moving_image, False)
# uncommnet to do rigid registration first
# fixed_image, moving_image = rigid_registration(fixed_image,moving_image)
registration_method = sitk.ImageRegistrationMethod()
# Determine the number of BSpline control points using the physical
# spacing we want for the finest resolution control grid.
grid_physical_spacing = [50.0, 50.0, 50.0] # A control point every 50mm
image_physical_size = [size * spacing for size, spacing in zip(fixed_image.GetSize(), fixed_image.GetSpacing())]
mesh_size = [int(image_size / grid_spacing + 0.5) \
for image_size, grid_spacing in zip(image_physical_size, grid_physical_spacing)]
# The starting mesh size will be 1/4 of the original, it will be refined by
# the multi-resolution framework.
mesh_size = [int(sz / 4 + 0.5) for sz in mesh_size]
initial_transform = sitk.BSplineTransformInitializer(image1=fixed_image,
transformDomainMeshSize=mesh_size, order=3)
# Instead of the standard SetInitialTransform we use the BSpline specific method which also
# accepts the scaleFactors parameter to refine the BSpline mesh. In this case we start with
# the given mesh_size at the highest pyramid level then we double it in the next lower level and
# in the full resolution image we use a mesh that is four times the original size.
registration_method.SetInitialTransformAsBSpline(initial_transform,
inPlace=False,
scaleFactors=[1, 2, 4])
registration_method.SetMetricAsMeanSquares()
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=50,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: iteration_callback(registration_method))
global itr
itr = 0
final_transformation = registration_method.Execute(fixed_image, moving_image)
m, mct, mpathology = save(final_transformation, fixed_image, moving_image, fixed_ct, moving_ct, fixed_pathology,
moving_pathology)
print(final_transformation, flush=True)
print('\nOptimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
return m, mct, mpathology
| 6,550 | 45.792857 | 118 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/DatasetStatic.py | import os
import sys
import h5py
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from matplotlib import cm
from skimage.transform import resize
from torch.utils.data import Dataset
from pathlib import Path
from skimage import feature
from torchvision.transforms import transforms
from dataset.dataset_utils import Phase, Modalities, Views, Mode, retrieve_data_dir_paths, Evaluate
class DatasetStatic(Dataset):
"""DatasetStatic dataset"""
def __init__(self, data_dir, phase=Phase.TRAIN, modalities=(), val_patients=None, evaluate: Evaluate = Evaluate.TRAINING, preprocess=True, size=300, n_classes=5,
view: Views = None):
self.modalities = list(map(lambda x: Modalities(x), modalities))
self.size = size
self.n_classes = n_classes
self.data_dir_paths = retrieve_data_dir_paths(data_dir, evaluate, phase, preprocess, val_patients, Mode.STATIC, size, view)
def __len__(self):
return len(self.data_dir_paths)
def crop_center(self, img, cropx, cropy):
z, y, x = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[:, starty:starty + cropy, startx:startx + cropx]
def __getitem__(self, idx):
data, label = [], None
slice = int(self.data_dir_paths[idx].split("/")[-1])
view = int(self.data_dir_paths[idx].split("/")[-2])
try:
if idx + 1 >= self.__len__():
is_last = True
else:
next_one = self.data_dir_paths[idx + 1]
next_slice = int(next_one.split("/")[-1])
is_last = next_slice <= slice and view == 2
except Exception as e:
print("IS_LAST Exception", e)
is_last = True
for i, modality in enumerate(self.modalities):
try:
with h5py.File(os.path.join(self.data_dir_paths[idx], f'{modality.value}.h5'), 'r') as f:
data.append(f['data'][()])
if label is None:
label = f['label'][()]
label[label > self.n_classes - 1] = self.n_classes - 1
label = F.one_hot(torch.as_tensor(label, dtype=torch.int64), num_classes=self.n_classes).permute(2, 0, 1)
except Exception as e:
print("EXCEPTION in loading data!: ", e)
return self.__getitem__(idx+1)
mismatch, mismatch_label = [], None
print(self.data_dir_paths[idx], flush=True)
is_one = False
if self.data_dir_paths[idx].__contains__("2_2"):
mismatch_path = self.data_dir_paths[idx].replace("2/2_2", "1/1_2")
is_one = True
elif self.data_dir_paths[idx].__contains__("1_1"):
mismatch_path = self.data_dir_paths[idx].replace("1/1_1", "2/2_1")
else:
mismatch_path = self.data_dir_paths[idx].replace("3/3_3", "2/2_3")
for i, modality in enumerate(self.modalities):
with h5py.File(os.path.join(mismatch_path, f'{modality.value}.h5'), 'r') as f:
mismatch.append(f['data'][()])
mismatch_label = torch.as_tensor(f['label'][()], dtype=torch.int64)
mismatch_label[mismatch_label > self.n_classes - 1] = self.n_classes - 1
mismatch_label = F.one_hot(mismatch_label, num_classes=self.n_classes).permute(2, 0, 1)
data = np.array(data)
if data.shape != (1,self.size, self.size):
print("INCORRECT SHAPE", self.data_dir_paths[idx], data.shape, label.shape, flush=True)
data = resize(data,(1,self.size, self.size))
label = resize(label, (self.n_classes, self.size, self.size), order=0)
mismatch = np.array(mismatch)
if mismatch.shape != (1,self.size, self.size):
print("INCORRECT SHAPE mismatch", mismatch_path, mismatch.shape, mismatch_label.shape , flush=True)
mismatch = resize(mismatch, (1,self.size, self.size))
mismatch_label = resize(mismatch_label, (self.n_classes, self.size, self.size), order=0)
mismatch = torch.as_tensor(mismatch)
data = torch.as_tensor(data).float()
return data.float(), torch.as_tensor(label).float(), mismatch.float(), torch.as_tensor(mismatch_label).float(), is_one, is_last
| 4,367 | 43.571429 | 165 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/DatasetLongitudinal.py | import os
import h5py
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from skimage import feature
from skimage.transform import resize
from torch.utils.data import Dataset
from torchvision import transforms
from dataset.dataset_utils import Phase, Modalities, Mode, retrieve_data_dir_paths, Evaluate
class DatasetLongitudinal(Dataset):
"""DatasetLongitudinal dataset"""
def __init__(self, data_dir, phase=Phase.TRAIN, modalities=(), val_patients=None,
evaluate: Evaluate = Evaluate.TRAINING, size=300, n_classes=5, preprocess=True, view=None):
self.modalities = list(map(lambda x: Modalities(x), modalities))
self.phase = phase
self.size = size
self.n_classes = n_classes
self.data_dir_paths = retrieve_data_dir_paths(data_dir, evaluate, phase, preprocess, val_patients,
Mode.LONGITUDINAL, size, view)
self.transforms = transforms.Compose([transforms.RandomRotation(10),
transforms.RandomAffine((0, 0), translate=(0,0.25))]) # use for augmentation
def __len__(self):
return len(self.data_dir_paths)
def crop_center(self, img, cropx, cropy):
z, y, x = img.shape
startx = x // 2 - (cropx // 2)
return img[:, :cropy, startx:startx + cropx]
def __getitem__(self, idx):
x_ref, x, ref_label, label = [], [], None, None
x_ref_path, x_path = self.data_dir_paths[idx]
slice = int(x_path.split("/")[-1])
view = int(x_path.split("/")[-2])
try:
if idx + 1 >= self.__len__(): # is_last is used for LTPR, LFPR and VD metrics -- can be omitted it from the code if not using these metrics
is_last = True
else:
next_one = self.data_dir_paths[idx + 1][1]
next_slice = int(next_one.split("/")[-1])
is_last = next_slice <= slice and view == 2
except:
is_last = True
print("Exception in extracting next slice")
for i, modality in enumerate(self.modalities):
with h5py.File(os.path.join(x_ref_path, f'{modality.value}.h5'), 'r') as f:
x_ref.append(f['data'][()])
if ref_label is None:
ref_label = torch.as_tensor(f['label'][()], dtype=torch.int64)
ref_label[ref_label > self.n_classes - 1] = self.n_classes - 1
ref_label = F.one_hot(ref_label, num_classes=self.n_classes).permute(2, 0, 1)
with h5py.File(os.path.join(x_path, f'{modality.value}.h5'), 'r') as f:
x.append(f['data'][()])
if label is None:
try:
label = torch.as_tensor(f['label'][()], dtype=torch.int64)
label[label > self.n_classes - 1] = self.n_classes - 1
label = F.one_hot(label, num_classes=self.n_classes).permute(2, 0, 1) # volume
except Exception:
return self.__getitem__(idx + 1)
mismatch = []
is_mismatch = False # For patients with 3 scans, scan 2 is always referenced by scan 1 (hence the mismatch), scan 3 by scan 2, and scan 1 by scan 2.
mismatch_path = None
if self.data_dir_paths[idx][0].__contains__("2_3"):
mismatch_path = self.data_dir_paths[idx][0].replace("2/2_3", "1/1_3")
for i, modality in enumerate(self.modalities):
with h5py.File(os.path.join(mismatch_path, f'{modality.value}.h5'), 'r') as f:
mismatch.append(f['data'][()])
is_mismatch = True
x = np.array(x)
x_ref = np.array(x_ref)
if x.shape != (1, self.size, self.size):
print("INCORRECT SHAPE", x_path, x.shape, label.shape, flush=True)
x = resize(x, (1, self.size, self.size))
label = resize(label, (self.n_classes, self.size, self.size), order=0)
if x_ref.shape != (1, self.size, self.size):
print("INCORRECT SHAPE", x_ref_path, x_ref.shape, ref_label.shape, flush=True)
x_ref = resize(x_ref, (1, self.size, self.size))
ref_label = resize(ref_label, (self.n_classes, self.size, self.size), order=0)
if not len(mismatch):
mismatch = x
else:
mismatch = np.array(mismatch)
if mismatch.shape != (1, self.size, self.size):
print("INCORRECT SHAPE mismatch", mismatch_path, mismatch.shape, flush=True)
mismatch = resize(mismatch, (1, self.size, self.size))
mismatch = torch.as_tensor(mismatch)
x = torch.as_tensor(x)
x_ref = torch.as_tensor(x_ref)
return x_ref.float(), x.float(), torch.as_tensor(ref_label).float(), torch.as_tensor(
label).float(), mismatch.float(), is_mismatch, is_last
| 4,962 | 46.721154 | 157 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/dynamic/preprocessing.py | import os
import yaml
from pathlib import Path
import numpy as np
import pandas as pd
from skimage.transform import resize
from tqdm import tqdm
from dataset.rigid_and_deformable_registration import deformable_registration
from .util import (
load_config_yaml,
split_idxs,
rm_tree,
verify_config_hash,
save_config_hash,
crop_to_mask
)
import atexit
from sklearn.model_selection import StratifiedKFold, KFold
import nibabel as nib
import time
import sys
class DatasetPreprocessor:
def __init__(
self,
config_yml_path, data_dir, size,
force_preprocessing=True,
cleanup=False,
verbose=True,
seed=42,
):
# load dataset config
config_yml_path = Path(config_yml_path)
self.data_dir = data_dir
self.size = size
assert config_yml_path.is_file(), f"config yaml could not be found at '{config_yml_path}', {os.curdir}"
self.cfg = load_config_yaml(config_yml_path)
self.verbose = verbose
if verbose:
print(f"Loaded {self.cfg['name']} setup from {config_yml_path}", flush=True)
# set data root dir
# self.data_root = Path(data_root)
self.pp_path = config_yml_path.parent
assert (
self.pp_path.is_dir()
), f"preprocessed directory could not be found at '{self.pp_path}'"
# load setup config
setup_yml_path = self.pp_path / "setup.yml"
assert Path(
setup_yml_path
).is_file(), f"setup yaml could not be found at '{config_yml_path}'"
self.setup = load_config_yaml(setup_yml_path)
if verbose:
print(f"Loaded {self.setup['name']} setup from {setup_yml_path}", flush=True)
# set temporary dir for npy files and csv
self.npy_path = self.pp_path / ("npy_" + self.cfg["name"])
self.tmp_df_path = self.npy_path / "tmp_df.csv"
# setup cleanup
if cleanup:
atexit.register(self.cleanup)
# load base patient dataframe
self.df_path = self.pp_path / "base_df.csv"
self.df = pd.read_csv(self.df_path)
if verbose:
print(f"Dataframe loaded from {self.df_path}")
if "drop_na_col" in self.cfg.keys():
if self.cfg["drop_na_col"] is not None:
df = self.df.dropna(subset=[self.cfg["drop_na_col"]])
self.df = df.reset_index(drop=True)
# check if patients are selected manually
if "manual_split" in self.cfg.keys():
print("Manual splits detected, stored in 'manual_split'")
self.df["manual_split"] = None
for split, pat_ids in self.cfg["manual_split"].items():
for pat_id in pat_ids:
self.df.loc[
self.df[self.setup["id_col"]] == pat_id, "manual_split"
] = split
# select only volumes that have a split assigned
self.df.dropna(subset=["manual_split"], inplace=True)
# temporary file in npy folder to lock only execute pre-processing once
lock_file = (self.npy_path / 'lock')
if lock_file.is_file():
print('found lock file - waiting until pre-processing is finished', end='')
while lock_file.is_file():
time.sleep(5)
print("sleeping")
print('.', end='')
print(' continuing')
# exit()
print("done making lock file", flush=True)
# check if temporary dir exists already
if (
self.npy_path.is_dir()
and not force_preprocessing
and self.tmp_df_path.is_file()
and verify_config_hash(config_yml_path, self.npy_path)
):
if verbose:
print(
f"npy folder found at {self.npy_path}! (delete folder for new preprocessing or set force_preprocessing)"
)
print(f"{self.setup['name']} '{self.cfg['name']}' preprocessed data found")
self.df = pd.read_csv(self.tmp_df_path)
else:
try:
self.npy_path.mkdir(exist_ok=force_preprocessing)
except FileExistsError:
print(
f"npy folder found at {self.npy_path}! (delete folder for new preprocessing or set force_preprocessing)"
)
# create lockfile
lock_file.touch()
# preprocess all data with npz files and safe npy
print(f"Preprocessing {self.setup['name']} '{self.cfg['name']}'..")
self._preprocess_all()
# select only volumes that have been preprocessed
df = self.df.dropna(subset=["dim0"])
num_vol = len(df)
num_pat = len(self.df)
if num_vol < num_pat:
print(
f"WARNING: only {num_vol} out of {num_pat} have been preprocessed. Dropping rest of entries.."
)
self.df = df.reset_index(drop=True)
if 'manual_split' in self.cfg.keys():
print('manual split found in config - skipping automatic splitting')
else:
if num_pat < 10:
print('less than 10 patients. 50-50 split in train and val')
test_size = 0
val_size = 0.5
train_size = 0.5
else:
# SIMPLE TRAIN, VAL, TEST SPLITTING
test_size = self.cfg["test_size"]
val_size = self.cfg["val_size"]
train_size = 1 - val_size - test_size
print(
f"Creating split 'train_val_test_split': {test_size:.0%} test, {val_size:.0%} val and {train_size:.0%} train"
)
splits = ["train", "val", "test"]
idxs = np.arange(len(self.df))
idxs_split = split_idxs(
idxs, test_size=test_size, val_size=val_size, seed=seed, shuffle=True
)
self.df["train_val_test_split"] = None
for split in splits:
self.df.loc[idxs_split[split], "train_val_test_split"] = split
# 5-FOLD-SPLIt
if len(self.df) > 5:
stratify = self.cfg['stratify']
idxs = np.arange(len(self.df))
n_splits = 5
if stratify:
strat_label = np.zeros_like(idxs)
for i, label in enumerate(self.cfg['labels']):
strat_label += 2 ** i * (self.df[f"num_{label}"] > 0).to_numpy(dtype=int)
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
for k, (train_idx, val_idx) in enumerate(skf.split(idxs, strat_label)):
split_col = f"split_{n_splits}fold_{k}"
self.df[split_col] = None
self.df.loc[train_idx, split_col] = "train"
self.df.loc[val_idx, split_col] = "val"
strat_print = ", stratified"
else:
kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
for k, (train_idx, val_idx) in enumerate(kf.split(idxs)):
split_col = f"split_{n_splits}fold_{k}"
self.df[split_col] = None
self.df.loc[train_idx, split_col] = "train"
self.df.loc[val_idx, split_col] = "val"
strat_print = ''
print(
f"Created k-fold cross validation split: 'split_{n_splits}fold_k' - {n_splits}-fold, shuffle, seed 42{strat_print} - splits: 'train', 'val'"
)
else:
print('Omitting 5-fold-split due to limited number of volumes')
# copy config and create hash
new_cfg_path = self.npy_path / Path(config_yml_path).name
new_cfg_path.write_text(Path(config_yml_path).read_text())
save_config_hash(new_cfg_path, self.npy_path)
# save temporary dataframe
self.df.to_csv(self.tmp_df_path)
# remove lockfile
lock_file.unlink()
if verbose:
print(f"Temporary data has been extracted to {self.npy_path}")
print("Successfully preprocessed data")
def print_cfg(self):
# print config
print("\nConfiguration:")
print(yaml.dump(self.cfg))
def export_config(self, path):
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
self.df.to_csv(path / 'patients.csv')
(path / f'config_{self.cfg["name"]}.yml').write_text(yaml.dump(self.cfg))
def cleanup(self):
# cleaning up tmp dir
rm_tree(self.npy_path)
def _preprocess_all(self):
"""
loop through all patients in df with npz files
map channels and labels
preprocess and save npy files
"""
print("pre preprocessing started")
cfg = self.cfg["preprocessing"]
df = self.df
for i in range(3):
df[f"dim{i}"] = None
for label in self.cfg["labels"]:
df[f"num_{label}"] = 0
# drop rows where selected labelmap is not present
df.drop(
df[
df[f'nii_{self.cfg["labelmap"]}'] == False
].index, inplace=True)
for idx, patient in tqdm(df.iterrows(), total=df.shape[0]):
pat_id = patient[self.setup["id_col"]]
tt = []
for i in os.listdir(self.data_dir + f"/{pat_id}/preprocessed/"):
if i.endswith(".nii"):
tt += [i]
sys.stdout.flush()
times = range(len(tt))
datas, ps, p2s = [], [], []
for time in times:
# load data
print("i", time, times, flush=True)
p = self.data_dir+f"/{pat_id}/preprocessed/{pat_id}_0{time + 1}_simple_pp.nii" # Change directory if needed
data = nib.load(p).get_fdata()
# load seg
labelmap = self.cfg["labelmap"] if "labelmap" in self.cfg.keys() else "default"
p = self.data_dir+f"/{pat_id}/pathMasks/{pat_id}_0{time + 1}_pathMask.nii" # Change directory if needed
p2 = self.data_dir+f"/{pat_id}/masks/{pat_id}_0{time + 1}_mask.nii" # Change directory if needed
try:
seg = nib.load(p).get_fdata()
lung = nib.load(p2).get_fdata()
except:
continue
label_counts = self._get_label_counts(seg)
for k, v in label_counts.items():
self.df.loc[idx, f"num_{k}"] = np.array(v, dtype=np.uint64)
datas += [data]
ps += [seg]
p2s += [lung]
for time_1 in times:
for time_2 in times:
# perform preprocessing (only done once)
cropped_data, cropped_seg, cropped_lung, mask = crop_to_mask(datas[time_1], ps[time_1], p2s[time_1])
cropped_data_ref, cropped_seg_ref, cropped_lung_ref, mask_ref = crop_to_mask(datas[time_2],
ps[time_2],
p2s[time_2])
data = cropped_data
seg = cropped_seg
lung = cropped_lung
data_ref = cropped_data_ref
seg_ref = cropped_seg_ref
lung_ref = cropped_lung_ref
data = self._clip(data, low_thr=cfg["clip_low_thr"], up_thr=cfg["clip_up_thr"])
data_ref = self._clip(data_ref, low_thr=cfg["clip_low_thr"], up_thr=cfg["clip_up_thr"])
# normalize
1
data = self._normalize(data, np.ones_like(data, dtype=bool))
data_ref = self._normalize(data_ref, np.ones_like(data_ref, dtype=bool))
data = resize(data, (self.size, self.size, self.size))
seg = resize(seg, (self.size, self.size, self.size), order=0)
lung = resize(lung, (self.size, self.size, self.size), order=0)
data_ref = resize(data_ref, (self.size, self.size, self.size))
seg_ref = resize(seg_ref, (self.size, self.size, self.size), order=0)
lung_ref = resize(lung_ref, (self.size, self.size, self.size), order=0)
# if np.histogram(seg, [0, 1, 2, 3, 4, 5])[0][1] == 0: # but some data have non-int values so we fix that first
com_seg = seg.astype(np.uint32)
com_lung = np.clip(np.round(lung), 0 , 1)
seg[com_seg + com_lung == 1] = 1
if time_1 != time_2: # register time_1 data to time_2 data
lung, data, seg = deformable_registration(lung_ref,
lung,
data_ref,
data,
seg_ref,
seg)
# save number of layers to df
for i in range(3):
self.df.loc[idx, f"dim{i}"] = np.array(
data.shape[i], dtype=np.uint64
)
# save to disk as npy
parent_dir = {"simple_pp": "preprocessed", "pathMask": "pathMasks", "mask": "masks"}
save_dict = {}
save_dict["simple_pp"] = data
save_dict["pathMask"] = seg
save_dict["mask"] = lung
for key in save_dict.keys():
path = Path(self.data_dir+ f"/{pat_id}/{parent_dir[key]}/{pat_id}_{time_1 + 1}-{time_2 + 1}_{key}") # change directory
np.save(path.with_suffix(".npy"), save_dict[key])
print("saved", pat_id, time_1, time_2, flush=True)
def _get_label_counts(self, seg):
counts = {}
for c, label in enumerate(self.cfg["labels"]):
counts[label] = (seg == c).sum()
return counts
def _remap_channels(self, data):
"""map selected modalities to input channels"""
channels = self.cfg["channels"]
new_data = []
for c, modality in enumerate(channels):
new_data.append(np.expand_dims(data[modality], axis=0))
new_data = np.hstack(new_data)
return new_data
def _remap_labels(self, seg, labelmap):
""""map selected labels to segmentation map values"""
new_seg = np.zeros(seg.shape, dtype=seg.dtype)
for new_label_value, label_name in enumerate(self.cfg["labels"]):
label_value = self.setup["labels"][labelmap][label_name]
new_seg[seg == label_value] = new_label_value
if self.cfg["labelmap"] == "quicknat" and label_name == "Cortical Grey Matter Right":
new_seg[
(seg > 100) & (seg % 2 == 0)
] = new_label_value
if self.cfg["labelmap"] and label_name == "Cortical Grey Matter Left":
new_seg[
(seg > 100) & (seg % 2 == 1)
] = new_label_value
return new_seg
def _normalize(self, data, mask):
"""normalize grey values optionally taking into account non-zero maks"""
data = data.astype(np.float32)
if not self.cfg["preprocessing"]["norm_mask"]:
mask = np.ones_like(mask)
if self.cfg["preprocessing"]["norm_method"] == "minmax":
# taken from quicknat
data[mask] = (data[mask] - np.min(data[mask])) / (
np.max(data[mask]) - np.min(data[mask])
)
elif self.cfg["preprocessing"]["norm_method"] == "std":
# taken from nnunet
data[mask] = (data[mask] - data[mask].mean()) / (
data[mask].std() + 1e-8
)
data[mask == 0] = 0
return data
def _clip(self, data, low_thr=-1024, up_thr=600):
data[data < low_thr] = low_thr
data[data > up_thr] = up_thr
return data
| 16,929 | 40.192214 | 164 | py |
longitudinalCOVID | longitudinalCOVID-master/dataset/dynamic/util.py | from pathlib import Path
import yaml
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import hashlib
import torch
def load_config_yaml(path):
"""loads a yaml config from file and returns a dict"""
path = Path(path)
with open(path) as file:
cfg = yaml.full_load(file)
return cfg
def save_config_yaml(path, config):
path = Path(path)
with open(path, "w") as file:
yaml.dump(config, file)
def split_idxs(idxs_in, test_size=0.1, val_size=0.1, seed=42, shuffle=True):
"""split indices into test, val and train
"""
idxs_out = {}
if test_size > 0:
idxs_out["train"], idxs_out["test"] = train_test_split(
idxs_in, test_size=test_size, shuffle=shuffle, stratify=None, random_state=seed
)
else:
idxs_out["test"] = []
idxs_out["train"] = idxs_in
if val_size > 0:
idxs_out["train"], idxs_out["val"] = train_test_split(
idxs_out["train"],
test_size=val_size / (1 - test_size),
shuffle=True,
stratify=None,
random_state=seed,
)
else:
idxs_out["val"] = []
return idxs_out
def rm_tree(pth: Path):
"""WARNING: deletes path recursively like rm -rf"""
print(f"Recursively deleting '{pth}'")
for child in pth.iterdir():
if child.is_file():
child.unlink()
else:
rm_tree(child)
pth.rmdir()
def get_sha256_hash(path):
"""returns sha256 hash from file found at path"""
return hashlib.sha256(Path(path).read_bytes()).hexdigest()
def save_hash(hash, path):
"""save hash to given path"""
with open(path, "w") as hash_file:
print(hash, file=hash_file, end="")
def load_hash(path):
"""load hash from path"""
with open(path, "r") as hash_file:
return hash_file.read()
def verify_config_hash(config_path, npy_path: Path):
"""checks if config is the same as hashed and return bool"""
hash_path = npy_path / "config_hash.sha256"
if hash_path.is_file():
new_hash = get_sha256_hash(config_path)
old_hash = load_hash(hash_path)
if new_hash == old_hash:
return True
return False
def save_config_hash(config_path, npy_path: Path):
"""saves hash of given config"""
cfg_hash = get_sha256_hash(config_path)
hash_path = npy_path / "config_hash.sha256"
save_hash(cfg_hash, hash_path)
def make_config(cfg, dyndata_path):
"""write a config yaml file based on the cfg dictionary provided"""
pp_path = dyndata_path
setup_yml_path = pp_path / "setup.yml"
assert Path(
setup_yml_path
).is_file(), f"setup yaml could not be found at '{setup_yml_path}'"
setup = load_config_yaml(setup_yml_path)
cfg["setup_hash"] = get_sha256_hash(setup_yml_path)
if "labels" not in cfg.keys():
assert (
"labelmap" in cfg.keys()
), "labelmap needs to be specified check setup script"
labels_dict = setup["labels"][cfg["labelmap"]]
cfg["labels"] = sorted(labels_dict, key=labels_dict.get)
cfg_path = (pp_path / f"config_{cfg['name']}.yml").absolute()
save_config_yaml(cfg_path, cfg)
print(
f"'{cfg['name']}' config for '{setup['name']}' dataset \nwas successfully saved to '{cfg_path}'"
)
def to_crop_padded_tensor_3d(data, out_dims=[64, 64, 64], padding_value=0):
""" pads a list of numpy arrays to given output dimension and
returns one big tensor """
num_chan = data.shape[0]
data = torch.from_numpy(data)
out_shape = [num_chan, *out_dims]
out_dims = torch.tensor(out_dims)
out_tensor = torch.full(size=out_shape, fill_value=padding_value, dtype=data.dtype)
for i in range(num_chan):
in_dims = torch.tensor(data[i].shape)
padding = (out_dims - in_dims) / 2
start = padding.clone()
start_data = torch.zeros_like(padding)
end_data = in_dims.clone()
end = padding + in_dims
# check if tensor needs to be cropped
for d in range(3):
if in_dims[d] > out_dims[d]:
start[d] = 0
start_data[d] = -padding[d]
end[d] = out_dims[d]
end_data[d] = start_data[d] + out_dims[d]
out_tensor[
i, start[0]:end[0], start[1]:end[1], start[2]:end[2]
] = data[i, start_data[0]:end_data[0], start_data[1]:end_data[1], start_data[2]:end_data[2]]
return out_tensor
def random_narrow_tensor(tensors, narrow_size, dim=0, include="center", ignore_bg=True):
non_zero = (
tensors[1][ignore_bg:] != 0
).nonzero() # Contains non-zero indices for all 4 dims
h_min = non_zero[:, dim].min()
h_max = non_zero[:, dim].max()
if include == "target":
start_slice = int(
np.clip(
(h_min + (((h_max - h_min) - narrow_size)) * np.random.random()),
0,
tensors[0].size(dim) - narrow_size,
)
)
elif include == "center":
start_slice = int(
np.clip(
((h_min + (h_max - h_min) / 2) - narrow_size / 2),
0,
tensors[0].size(dim) - narrow_size,
)
)
elif include == "random":
start_slice = np.random.randint(tensors[0].size(dim) - narrow_size)
else:
return tensors
for i in range(len(tensors)):
tensors[i] = torch.narrow(tensors[i], dim, start_slice, narrow_size)
return tensors
def crop_to_mask(data, seg,
lung): # crops segmentation mask and data to where the lung mask (and segmentation) mask are non-zero
"""
crop data and return non-zero mask
inspired by nnunet and stackoverflow
# """
crop_threshold = -1000000000
mask = np.zeros(data.shape, dtype=bool)
# non zero mask over all channels
cmask = data > crop_threshold
mask = cmask | mask
# non black coordinates
coords = np.argwhere(mask)
# bounding box
x_min, y_min, z_min = coords.min(axis=0)
x_max, y_max, z_max = coords.max(axis=0) + 1 # include top slice
cropped_data = data[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_seg = seg[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_lung = lung[x_min:x_max, y_min:y_max, z_min:z_max]
mask = mask[x_min:x_max, y_min:y_max, z_min:z_max]
coords = np.argwhere(cropped_seg)
coords2 = np.argwhere(cropped_lung)
# bounding box
x_min, y_min, z_min = np.concatenate((np.array([coords2.min(axis=0)]),np.array([coords.min(axis=0)])), axis=0).min(axis=0) # change to : 'coords2.min(axis=0)' for only considering lung mask
x_max, y_max, z_max = np.concatenate((np.array([coords2.max(axis=0)]),np.array([coords.max(axis=0)])), axis=0).max(axis=0) + 1 # include top slice # change to: 'coords2.max(axis=0)' for only considering lung mask
cropped_lung = cropped_lung[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_seg = cropped_seg[x_min:x_max, y_min:y_max, z_min:z_max]
cropped_data = cropped_data[x_min:x_max, y_min:y_max, z_min:z_max]
return np.array(cropped_data), np.array(cropped_seg), np.array(cropped_lung), mask
| 7,244 | 32.082192 | 217 | py |
longitudinalCOVID | longitudinalCOVID-master/logger/visualization.py | import importlib
from datetime import datetime
from enum import Enum
class Mode(Enum):
TRAIN = 'Train'
VAL = 'Val'
class TensorboardWriter():
def __init__(self, log_dir, logger, enabled):
self.writer = None
self.selected_module = ""
if enabled:
log_dir = str(log_dir)
# Retrieve visualization writer.
succeeded = False
for module in ["torch.utils.tensorboard", "tensorboardX"]:
try:
self.writer = importlib.import_module(module).SummaryWriter(log_dir)
succeeded = True
break
except ImportError:
succeeded = False
self.selected_module = module
if not succeeded:
message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
"this machine. Please install TensorboardX with 'pip install tensorboardx', upgrade PyTorch to " \
"version >= 1.1 to use 'torch.utils.tensorboard' or turn off the option in the 'config.json' file."
logger.warning(message)
self.step = 0
self.mode = None
self.tb_writer_ftns = {
'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio', 'add_graph',
'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
}
self.tag_mode_exceptions = {'add_graph', 'add_histogram', 'add_embedding'}
self.timer = datetime.now()
def set_step(self, step, mode=Mode.TRAIN):
self.mode = mode
self.step = step
if step == 0:
self.timer = datetime.now()
else:
duration = datetime.now() - self.timer
self.add_scalar('steps_per_sec', 1 / duration.total_seconds())
self.timer = datetime.now()
def __getattr__(self, name):
"""
If visualization is configured to use:
return add_data() methods of tensorboard with additional information (step, tag) added.
Otherwise:
return a blank function handle that does nothing
"""
if name in self.tb_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
# add mode(train/valid) tag
if name not in self.tag_mode_exceptions:
tag = '{}/{}'.format(tag, self.mode.value)
add_data(tag, data, self.step, *args, **kwargs)
return wrapper
else:
# default action for returning methods defined in this class, set_step() for instance.
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name))
return attr
| 3,020 | 36.296296 | 125 | py |
longitudinalCOVID | longitudinalCOVID-master/logger/logger.py | import logging
import logging.config
from pathlib import Path
from utils import read_json
def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO):
"""
Setup logging configuration
"""
log_config = Path(log_config)
if log_config.is_file():
config = read_json(log_config)
# modify logging paths based on run config
for _, handler in config['handlers'].items():
if 'filename' in handler:
handler['filename'] = str(save_dir / handler['filename'])
logging.config.dictConfig(config)
else:
print("Warning: logging configuration file is not found in {}.".format(log_config))
logging.basicConfig(level=default_level)
| 751 | 30.333333 | 96 | py |
longitudinalCOVID | longitudinalCOVID-master/logger/__init__.py | from .logger import *
from .visualization import *
| 51 | 16.333333 | 28 | py |
longitudinalCOVID | longitudinalCOVID-master/configs/Static_Network.py | import os
from polyaxon_client.tracking import get_outputs_path
CONFIG = {
"name": f"{os.path.basename(__file__).split('.')[0]}",
"n_gpu": 1,
"arch": {
"type": "FCDenseNet",
"args": {
"in_channels": 1,
"n_classes": 5
}
},
"dataset": {
"type": "DatasetStatic",
"num_patients": 22,
"cross_val": True,
"val_fold_num": 4,
"val_fold_len": 4,
"args": {
"data_dir": "/data/COVID_longitudinal/test",
"preprocess": False,
"size": 300,
"n_classes": 5,
"modalities": ['simple'],
"val_patients": None # if not using cross validation: [2,6,10,14] or an arbitrary array of patient numbers
}
},
"data_loader": {
"type": "Dataloader",
"args": {
"batch_size": 1,
"shuffle": False,
"num_workers": 4,
}
},
"optimizer": {
"type": "Adam",
"args": {
"lr": 0.0001,
"weight_decay": 0,
"amsgrad": True
}
},
"loss": "mse",
"metrics": [
"precision", "recall", "dice_loss", "dice_score", "asymmetric_loss",
],
"lr_scheduler": {
"type": "StepLR",
"args": {
"step_size": 50,
"gamma": 0.1
}
},
"trainer": {
"type": "StaticTrainer",
"epochs": 1,
"save_dir": get_outputs_path(),
"save_period": 1,
"verbosity": 2,
"monitor": "min val_loss",
"early_stop": 10,
"tensorboard": True
}
}
| 1,637 | 22.73913 | 119 | py |
longitudinalCOVID | longitudinalCOVID-master/configs/Longitudinal_Late_Fusion.py | import os
from polyaxon_client.tracking import get_outputs_path
CONFIG = {
"name": f"{os.path.basename(__file__).split('.')[0]}",
"n_gpu": 1,
"arch": {
"type": "LateLongitudinalFCDenseNet",
"args": {
"in_channels": 1,
"n_classes": 5
}
},
"dataset": {
"type": "DatasetLongitudinal",
"num_patients": 22,
"cross_val": True,
"val_fold_num": 4,
"val_fold_len": 4,
"args": {
"data_dir": "/data/COVID_longitudinal/test",
"preprocess": False,
"size": 300,
"n_classes": 5,
"modalities": ['simple'],
"val_patients": None # if not using cross validation: [2,6,10,14] or an arbitrary array of patient numbers
}
},
"data_loader": {
"type": "Dataloader",
"args": {
"batch_size": 2,
"shuffle": False,
"num_workers": 4,
}
},
"optimizer": {
"type": "Adam",
"args": {
"lr": 0.0001,
"weight_decay": 0,
"amsgrad": True
}
},
"loss": "mse",
"metrics": [
"precision", "recall", "dice_loss", "dice_score", "asymmetric_loss"
],
"lr_scheduler": {
"type": "StepLR",
"args": {
"step_size": 50,
"gamma": 0.1
}
},
"trainer": {
"type": "LongitudinalTrainer",
"epochs": 1, #change to 100 in Train Phase
"save_dir": get_outputs_path(),
"save_period": 1,
"verbosity": 2,
"monitor": "min val_loss",
"early_stop": 10,
"tensorboard": True
}
}
| 1,693 | 23.911765 | 119 | py |
longitudinalCOVID | longitudinalCOVID-master/configs/Longitudinal_Network.py | import os
from polyaxon_client.tracking import get_outputs_path
CONFIG = {
"name": f"{os.path.basename(__file__).split('.')[0]}",
"n_gpu": 1,
"arch": {
"type": "LongitudinalFCDenseNet",
"args": {
"in_channels": 1,
"siamese": False,
"n_classes": 5
}
},
"dataset": {
"type": "DatasetLongitudinal",
"num_patients": 22,
"cross_val": True,
"val_fold_num": 4,
"val_fold_len": 4,
"args": {
"data_dir": "/data/COVID_longitudinal/test",
"preprocess": False,
"size": 300,
"n_classes": 5,
"modalities": ['simple'],
"val_patients": None # if not using cross validation: [2,6,10,14] or an arbitrary array of patient numbers
}
},
"data_loader": {
"type": "Dataloader",
"args": {
"batch_size": 2,
"shuffle": True, # for test to use LTPR and VD metrics use False
"num_workers": 4,
}
},
"optimizer": {
"type": "Adam",
"args": {
"lr": 0.0001,
"weight_decay": 0,
"amsgrad": True
}
},
"loss": "mse",
"metrics": [
"precision", "recall", "dice_loss", "dice_score", "asymmetric_loss"
],
"lr_scheduler": {
"type": "StepLR",
"args": {
"step_size": 50,
"gamma": 0.1
}
},
"trainer": {
"type": "LongitudinalTrainer",
"epochs": 1, #change to 100 in Train Phase
"save_dir": get_outputs_path(),
"save_period": 1,
"verbosity": 2,
"monitor": "min val_loss",
"early_stop": 10,
"tensorboard": True
}
}
| 1,767 | 24.257143 | 119 | py |
longitudinalCOVID | longitudinalCOVID-master/configs/Longitudinal_Network_with_Progression_Learning.py | import os
from polyaxon_client.tracking import get_outputs_path
CONFIG = {
"name": f"{os.path.basename(__file__).split('.')[0]}",
"n_gpu": 1,
"arch": {
"type": "LongitudinalFCDenseNet",
"args": {
"in_channels": 1,
"siamese": False,
"n_classes": 5
}
},
"dataset": {
"type": "DatasetLongitudinal",
"num_patients": 22,
"cross_val": True,
"val_fold_num": 4,
"val_fold_len": 4,
"args": {
"data_dir": "/data/COVID_longitudinal/test",
"preprocess": False,
"size": 300,
"n_classes": 5,
"modalities": ['simple'],
"val_patients": None # if not using cross validation: [2,6,10,14] or an arbitrary array of patient numbers
}
},
"data_loader": {
"type": "Dataloader",
"args": {
"batch_size": 2,
"shuffle": True, # for test to use LTPR and VD metrics use False
"num_workers": 4,
}
},
"optimizer": {
"type": "Adam",
"args": {
"lr": 0.0001,
"weight_decay": 0,
"amsgrad": True
}
},
"loss": "mse",
"metrics": [
"precision", "recall", "dice_loss", "dice_score", "asymmetric_loss"
],
"lr_scheduler": {
"type": "StepLR",
"args": {
"step_size": 50,
"gamma": 0.1
}
},
"trainer": {
"type": "LongitudinalMaskPropagationTrainer",
"epochs": 1, #change to 100 in Train Phase
"save_dir": get_outputs_path(),
"save_period": 1,
"verbosity": 2,
"monitor": "min val_loss",
"early_stop": 10,
"tensorboard": True
}
}
| 1,782 | 24.471429 | 119 | py |
longitudinalCOVID | longitudinalCOVID-master/base/base_model.py | from abc import abstractmethod
import numpy as np
import torch.nn as nn
class BaseModel(nn.Module):
"""
Base class for all models
"""
@abstractmethod
def forward(self, *inputs):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super().__str__() + '\nTrainable parameters: {}'.format(params)
| 650 | 22.25 | 79 | py |
longitudinalCOVID | longitudinalCOVID-master/base/base_trainer.py | from abc import abstractmethod
import torch
from numpy import inf
from logger import TensorboardWriter
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, model, loss, metric_ftns, optimizer, config, fold=None):
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
# setup GPU device if available, move model into configured device
self.device, device_ids = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.loss = loss
self.metric_ftns = metric_ftns
self.optimizer = optimizer
if fold:
self.fold = str(fold)
else:
self.fold = ''
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
if config.resume is not None:
self._resume_checkpoint(config.resume)
self.not_improved_count = 0
@abstractmethod
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
best_log = None
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# print logged informations to the screen #IMPORTANT CHANGE
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = False
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
best_log = log
self.not_improved_count = 0
best = True
else:
self.not_improved_count += 1
if self.not_improved_count > self.early_stop:
self.logger.info("Validation performance hasn\'t improve for {} epochs. Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
return best_log
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning("Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
arch = type(self.model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = f'{str(self.checkpoint_dir)}/checkpoint-epoch{epoch}.pth'
# filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = f'{str(self.checkpoint_dir)}/model_best'+self.fold+'.pth'
torch.save(state, best_path)
self.logger.info("Saving current best: model_best.pth ...")
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
resume_path = str(resume_path)
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['arch'] != self.config['arch']:
self.logger.warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
status = self._load_dict(checkpoint)
self.logger.warning(f'Missing keys: {str(status[0])}') if status[0] else None
self.logger.warning(f'Unexpected keys: {str(status[1])}') if status[1] else None
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger.warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
def _load_dict(self, checkpoint):
return list(self.model.load_state_dict(checkpoint['state_dict'], False))
| 7,505 | 39.354839 | 133 | py |
longitudinalCOVID | longitudinalCOVID-master/base/base_data_loader.py | import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
class BaseDataLoader(DataLoader):
"""
Base class for all data loaders
"""
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
self.sampler, self.valid_sampler = self._split_sampler(self.validation_split)
self.init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': collate_fn,
'num_workers': num_workers
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if split == 0.0:
return None, None
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert split > 0
assert split < self.n_samples, "validation set size is configured to be larger than entire dataset."
len_valid = split
else:
len_valid = int(self.n_samples * split)
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
def split_validation(self):
if self.valid_sampler is None:
return None
else:
return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
| 1,971 | 30.301587 | 112 | py |
longitudinalCOVID | longitudinalCOVID-master/base/__init__.py | from .base_data_loader import *
from .base_model import *
from .base_trainer import *
| 86 | 20.75 | 31 | py |
longitudinalCOVID | longitudinalCOVID-master/utils/util.py | import json
import pprint
from collections import OrderedDict
from itertools import repeat
from pathlib import Path
import pandas as pd
def write_config(content, fname):
with fname.open('wt') as handle:
handle.write("CONFIG = " + pprint.pformat(content))
handle.close()
def read_json(fname):
fname = Path(fname)
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
fname = Path(fname)
with fname.open('wt') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def inf_loop(data_loader):
''' wrapper function for endless data loader. '''
for loader in repeat(data_loader):
yield from loader
class MetricTracker:
def __init__(self, *keys, writer=None, logger=None):
self.writer = writer
self.logger = logger
keys = ["loss", "precision0", "precision1", "precision2", "precision3", "precision4",
"recall0", "recall1", "recall2", "recall3", "recall4",
"dice_loss", "dice_score0", "dice_score1", "dice_score2", "dice_score3", "dice_score4",
"asymmetric_loss"
]
keys += ["precision_difference0", "precision_difference1", "precision_difference2",
"recall_difference0", "recall_difference1", "recall_difference2",
"dice_loss_difference", "dice_score_difference0", "dice_score_difference1", "dice_score_difference2",
"asymmetric_loss_difference"
]
keys += ["precision_difference_reverse0", "precision_difference_reverse1", "precision_difference_reverse2",
"recall_difference_reverse0", "recall_difference_reverse1", "recall_difference_reverse2",
"dice_loss_difference_reverse", "dice_score_difference_reverse0", "dice_score_difference_reverse1",
"dice_score_difference_reverse2",
"asymmetric_loss_difference_reverse"
]
self.keys = keys
self._data = pd.DataFrame(index=keys, columns=['total', 'counts', 'average'])
self.reset()
def reset(self):
for col in self._data.columns:
self._data[col].values[:] = 0
def update(self, key, value, n=1, is_last=False):
if self.writer is not None:
try:
for i, v in enumerate(value):
if v is None:
continue
self.writer.add_scalar(key + str(i), v)
self._data.total[key + str(i)] += v * n
self._data.counts[key + str(i)] += n
self._data.average[key + str(i)] = self._data.total[key + str(i)] / self._data.counts[key + str(i)]
except Exception as e:
if value is None:
return
self.writer.add_scalar(key, value)
self._data.total[key] += value * n
self._data.counts[key] += n
self._data.average[key] = self._data.total[key] / self._data.counts[key]
# if is_last: #use for obtaining separate results for each patient
# self.logger.info("End of Volume!")
# for key in self.keys:
# self.logger.info(' {:15s}: {}'.format(str(key), self._data.average[key]))
# self.reset()
def avg(self, key):
return self._data.average[key]
def result(self):
return dict(self._data.average)
| 3,525 | 34.979592 | 119 | py |
longitudinalCOVID | longitudinalCOVID-master/utils/__init__.py | from .util import *
| 20 | 9.5 | 19 | py |
longitudinalCOVID | longitudinalCOVID-master/utils/illustration_util.py | import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from torchvision.utils import make_grid
from PIL import Image, ImageDraw
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:, :, 0] += np.arange(w)
flow[:, :, 1] += np.arange(h)[:, np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
def visualize_flow(flow):
"""Visualize optical flow
Args:
flow: optical flow map with shape of (H, W, 2), with (y, x) order
Returns:
RGB image of shape (H, W, 3)
"""
assert flow.ndim == 3
assert flow.shape[2] == 2
hsv = np.zeros([flow.shape[0], flow.shape[1], 3], dtype=np.uint8)
mag, ang = cv2.cartToPolar(flow[..., 1], flow[..., 0])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 1] = 255
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return rgb
def visualize_difference(x):
rgbs = []
for i in x:
hsv = np.zeros([i.shape[0], i.shape[1], 3], dtype=np.uint8)
hsv[..., 1] = 255
hsv[..., 2] = 255
hsv[..., 0] = i * 255 // 2 # cv2.normalize(i, None, 0, 255, cv2.NORM_INF)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
rgbs += [rgb]
return np.array(rgbs)
def prepare_encoded(encoded):
encoded = encoded.detach().cpu().numpy().astype('float32')
heatmap = np.mean(encoded, axis=1).squeeze() # mean on the channels
# relu on top of the heatmap
heatmap = np.maximum(heatmap, 0)
# normalize the heatmap
heatmap /= np.max(heatmap)
return heatmap
def log_visualizations(writer, x_ref, x, output, target, output_ref, target_ref, outputDiff,
groundDiff, outputDiffReverse, groundDiffReverse, encoded, toy=False):
batch_size = x.size(0)
x_ref = cast(x_ref)
x = cast(x)
output = cast(output, True, True, True)
target = cast(target, True, True, True)
if output_ref is not None and groundDiff is not None:
outputDiff = visualize_difference(outputDiff.cpu().detach().numpy()).astype('float32')
groundDiff = visualize_difference(groundDiff.cpu().detach().numpy()).astype("float32")
outputDiffReverse = visualize_difference(outputDiffReverse.cpu().detach().numpy()).astype('float32')
groundDiffReverse = visualize_difference(groundDiffReverse.cpu().detach().numpy()).astype("float32")
output_ref = cast(output_ref, True, True, True)
target_ref = cast(target_ref, True, True, True)
if encoded is not None:
encoded = np.reshape(np.array([prepare_encoded(encoded)]), (batch_size, 9, 9, 1))
for i in range(batch_size):
if not toy:
a1, a2, b1, b, c, c1 = x_ref[i], x[i], output_ref[i], target_ref[i], output[i], target[i]
tensor1 = np.expand_dims(np.transpose(np.hstack([a1, a2, b1, b, c, c1]), (2, 0, 1)), axis=0)
writer.add_image('xRef_x_outputRef_targetRef_output_target',
make_grid(torch.as_tensor(tensor1), nrow=8, normalize=False))
else:
a, a1, b, b1, c, c1 = x[i], x_ref[i], output_ref[i], target_ref[i], output[i], target[i]
tensor2 = np.expand_dims(np.transpose(np.hstack([a, a1, b, b1, c, c1]), (2, 0, 1)), axis=0)
writer.add_image('TOY_x_xref_outputRef_targetRef_output_target',
make_grid(torch.as_tensor(tensor2), nrow=8, normalize=False))
if not toy:
d, e, f, g = outputDiff[i], groundDiff[i], outputDiffReverse[i], groundDiffReverse[i]
tensor3 = np.expand_dims(np.transpose(np.hstack([d, e, f, g]), (2, 0, 1)), axis=0)
writer.add_image('outDiff_groundDiff_outDiffReverse_groundDiffReverse',
make_grid(torch.as_tensor(tensor3), nrow=8, normalize=False))
else:
d, e, f, g = outputDiff[i], groundDiff[i], outputDiffReverse[i], groundDiffReverse[i]
tensor4 = np.expand_dims(np.transpose(np.hstack([d, e, f, g]), (2, 0, 1)), axis=0)
writer.add_image('TOY_outDiff_groundDiff_outDiffReverse_groundDiffReverse',
make_grid(torch.as_tensor(tensor4), nrow=100, normalize=False))
if encoded is not None:
if not toy:
encodedd = encoded[i]
tensor5 = np.expand_dims(np.transpose(encodedd, (2, 0, 1)), axis=0)
writer.add_image('encodedLongitudinal',
make_grid(torch.as_tensor(tensor5), nrow=8, normalize=False))
else:
x_toy = encoded[i]
tensor5 = np.expand_dims(np.transpose(x_toy, (2, 0, 1)), axis=0)
writer.add_image('encodedTOY',
make_grid(torch.as_tensor(tensor5), nrow=8, normalize=False))
elif groundDiff is None and output_ref is not None:
for i in range(batch_size):
a1, a2, b, b1, c, c1 = x_ref[i], x[i], output_ref[i], target_ref[i], output[i], target[i]
tensor = np.expand_dims(np.transpose(np.hstack([a1, a2, b, b1, c, c1]), (2, 0, 1)), axis=0)
writer.add_image('xRef_x_outputRef(2)_targetRef_output_target',
make_grid(torch.as_tensor(tensor), nrow=8, normalize=True))
else:
for i in range(batch_size):
a1, a2, b, c = x_ref[i], x[i], output[i], target[i]
tensor = np.expand_dims(np.transpose(np.hstack([a1, a2, b, c]), (2, 0, 1)), axis=0)
writer.add_image('xRef_x_output_target',
make_grid(torch.as_tensor(tensor), nrow=8, normalize=True))
def log_visualizations_deformations(writer, input_moving, input_fixed, flow, target_moving, target_fixed, output=None):
zipped_data = zip(
cast(input_moving),
cast(input_fixed),
cast(flow, normalize_data=False),
cast(target_moving, True),
cast(target_fixed, True),
cast(output, True) if type(None) != type(output) else [None for _ in input_moving]
)
for (_input_moving, _input_fixed, _flow, _target_moving, _target_fixed, _output) in zipped_data:
transposed_flow = np.transpose(_flow, (1, 2, 0))
illustration = [
_input_moving,
_input_fixed,
visualize_flow(transposed_flow) / 255.,
_target_moving,
_target_fixed
]
if type(None) != type(_output):
illustration.append(_output)
tensor = np.expand_dims(np.transpose(np.hstack(illustration), (2, 0, 1)), axis=0)
description = 'xRef_x_flowfield_targetRef_target_output'
writer.add_image(description, make_grid(torch.as_tensor(tensor), nrow=8, normalize=True))
def cast(data, argmax=False, normalize_data=True, mask=False):
data2 = data.cpu().detach().numpy()
if argmax:
data2 = np.argmax(data2, axis=1)
data2 = data2.astype('float32')
if normalize_data:
data2 = np.asarray([normalize(date, mask) for date in data2])
return data2
def normalize(x, mask):
if len(x.shape) > 2:
x = x[0]
if mask:
hsv = np.zeros([x.shape[0], x.shape[1], 3], dtype=np.uint8)
hsv[..., 1] = 255
hsv[..., 2] = 255
hsv[..., 0] = x * 255 // 4
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) # cv2.cvtColor(x * 1/4, cv2.COLOR_GRAY2RGB) #cv2.normalize(x, None, 0, 255, cv2.NORM_MINMAX) for grayscale
return rgb
else:
return cv2.cvtColor(cv2.normalize(x, None, 0, 1, cv2.NORM_MINMAX), cv2.COLOR_GRAY2RGB)
| 7,893 | 40.547368 | 158 | py |
longitudinalCOVID | longitudinalCOVID-master/model/FCDenseNet.py | from base import BaseModel
from model.utils.layers import *
class FCDenseNetEncoder(BaseModel):
def __init__(self, in_channels=1, down_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4, growth_rate=12, out_chans_first_conv=48):
super().__init__()
self.down_blocks = down_blocks
self.skip_connection_channel_counts = []
self.has_bottle_neck = True if bottleneck_layers>0 else False
self.add_module('firstconv', nn.Conv2d(in_channels=in_channels, out_channels=out_chans_first_conv, kernel_size=3, stride=1, padding=1, bias=True))
self.cur_channels_count = out_chans_first_conv
self.denseBlocksDown = nn.ModuleList([])
self.transDownBlocks = nn.ModuleList([])
for i in range(len(down_blocks)):
self.denseBlocksDown.append(DenseBlock(self.cur_channels_count, growth_rate, down_blocks[i]))
self.cur_channels_count += (growth_rate * down_blocks[i])
self.skip_connection_channel_counts.insert(0, self.cur_channels_count)
self.transDownBlocks.append(TransitionDown(self.cur_channels_count))
if self.has_bottle_neck:
self.add_module('bottleneck', Bottleneck(self.cur_channels_count, growth_rate, bottleneck_layers))
self.prev_block_channels = growth_rate * bottleneck_layers
self.cur_channels_count += self.prev_block_channels
def forward(self, x):
out = self.firstconv(x)
skip_connections = []
for i in range(len(self.down_blocks)):
out = self.denseBlocksDown[i](out)
skip_connections.append(out)
out = self.transDownBlocks[i](out)
if self.has_bottle_neck:
out = self.bottleneck(out)
return out, skip_connections
class FCDenseNetDecoder(BaseModel):
def __init__(self, prev_block_channels, skip_connection_channel_counts, growth_rate, n_classes, up_blocks, apply_softmax=True):
super().__init__()
self.apply_softmax = apply_softmax
self.up_blocks = up_blocks
self.transUpBlocks = nn.ModuleList([])
self.denseBlocksUp = nn.ModuleList([])
for i in range(len(self.up_blocks) - 1):
self.transUpBlocks.append(TransitionUp(prev_block_channels, prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[i]
self.denseBlocksUp.append(DenseBlock(cur_channels_count, growth_rate, self.up_blocks[i], upsample=True))
prev_block_channels = growth_rate * self.up_blocks[i]
cur_channels_count += prev_block_channels
self.transUpBlocks.append(TransitionUp(prev_block_channels, prev_block_channels))
cur_channels_count = prev_block_channels + skip_connection_channel_counts[-1]
self.denseBlocksUp.append(DenseBlock(cur_channels_count, growth_rate, self.up_blocks[-1], upsample=False))
cur_channels_count += growth_rate * self.up_blocks[-1]
self.finalConv = nn.Conv2d(in_channels=cur_channels_count, out_channels=n_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.softmax = nn.Softmax2d()
def forward(self, out, skip_connections):
for i in range(len(self.up_blocks)):
skip = skip_connections[-i - 1]
out = self.transUpBlocks[i](out, skip)
out = self.denseBlocksUp[i](out)
out = self.finalConv(out)
if self.apply_softmax:
out = self.softmax(out)
return out
class FCDenseNet(BaseModel):
def __init__(self,
in_channels=1, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=2, apply_softmax=True, encoder=None):
super().__init__()
self.up_blocks = up_blocks
self.encoder = encoder
if not encoder:
self.encoder = FCDenseNetEncoder(in_channels=in_channels, down_blocks=down_blocks, bottleneck_layers=bottleneck_layers, growth_rate=growth_rate,
out_chans_first_conv=out_chans_first_conv)
prev_block_channels = self.encoder.prev_block_channels
skip_connection_channel_counts = self.encoder.skip_connection_channel_counts
self.decoder = FCDenseNetDecoder(prev_block_channels, skip_connection_channel_counts, growth_rate, n_classes, up_blocks, apply_softmax)
def forward(self, x, is_encoder_output=False):
if is_encoder_output:
out, skip_connections = x
else:
out, skip_connections = self.encoder(x)
out = self.decoder(out, skip_connections)
return out
| 4,679 | 45.8 | 156 | py |
longitudinalCOVID | longitudinalCOVID-master/model/LongitudinalFCDenseNet.py | from base import BaseModel
from model.FCDenseNet import FCDenseNetEncoder, FCDenseNetDecoder
from model.utils.layers import *
class LongitudinalFCDenseNet(BaseModel):
def __init__(self,
in_channels=1, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=2, encoder=None, siamese=True):
super().__init__()
self.up_blocks = up_blocks
self.densenet_encoder = encoder
self.siamese = siamese
if not encoder:
self.densenet_encoder = FCDenseNetEncoder(in_channels=in_channels * (1 if siamese else 2), down_blocks=down_blocks,
bottleneck_layers=bottleneck_layers,
growth_rate=growth_rate, out_chans_first_conv=out_chans_first_conv)
prev_block_channels = self.densenet_encoder.prev_block_channels
skip_connection_channel_counts = self.densenet_encoder.skip_connection_channel_counts
if self.siamese:
self.add_module('merge_conv', nn.Conv2d(prev_block_channels * 2, prev_block_channels, 1, 1))
self.decoder = FCDenseNetDecoder(prev_block_channels, skip_connection_channel_counts, growth_rate, n_classes, up_blocks)
def forward(self, x_ref, x):
if self.siamese:
out, skip_connections = self.densenet_encoder(x)
out_ref, _ = self.densenet_encoder(x_ref)
out = torch.cat((out, out_ref), dim=1)
out1 = self.merge_conv(out)
else:
out1, skip_connections = self.densenet_encoder(torch.cat((x_ref, x), dim=1))
out = self.decoder(out1, skip_connections)
return out, out1 #returning the encoded featuremap for visualization purposes
| 1,858 | 45.475 | 128 | py |
longitudinalCOVID | longitudinalCOVID-master/model/LateLongitudinalFCDenseNet.py | from base import BaseModel
from model.FCDenseNet import FCDenseNetEncoder, FCDenseNetDecoder
from model.utils.layers import *
class LateLongitudinalFCDenseNet(BaseModel):
def __init__(self,
in_channels=1, down_blocks=(4, 4, 4, 4, 4),
up_blocks=(4, 4, 4, 4, 4), bottleneck_layers=4,
growth_rate=12, out_chans_first_conv=48, n_classes=2, encoder=None):
super().__init__()
self.up_blocks = up_blocks
self.densenet_encoder = encoder
if not encoder:
self.densenet_encoder = FCDenseNetEncoder(in_channels=in_channels , down_blocks=down_blocks,
bottleneck_layers=bottleneck_layers,
growth_rate=growth_rate, out_chans_first_conv=out_chans_first_conv)
prev_block_channels = 2* self.densenet_encoder.prev_block_channels
skip_connection_channel_counts = self.densenet_encoder.skip_connection_channel_counts
self.decoder = FCDenseNetDecoder(prev_block_channels, skip_connection_channel_counts, growth_rate, n_classes, up_blocks)
def forward(self, x_ref, x):
out1, skip_connections = self.densenet_encoder(x)
out_ref, _ = self.densenet_encoder(x_ref)
out = torch.cat((out1, out_ref), dim=1)
out = self.decoder(out, skip_connections)
return out, out1
| 1,423 | 38.555556 | 128 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/metric_utils.py | import numpy as np
import torch
def asymmetric_loss(beta, output, target):
g = flatten(target)
p = flatten(output)
pg = (p * g).sum(-1)
beta_sq = beta ** 2
a = beta_sq / (1 + beta_sq)
b = 1 / (1 + beta_sq)
g_p = ((1 - p) * g).sum(-1)
p_g = (p * (1 - g)).sum(-1)
loss = (1. + pg) / (1. + pg + a * g_p + b * p_g)
total_loss = torch.mean(1. - loss)
return total_loss
def eps_tp_tn_fp_fn(output, target):
with torch.no_grad():
epsilon = 1e-7
target = flatten(target).cpu().detach().float()
output = flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
tp = torch.sum(target * output)
tn = torch.sum((1 - target) * (1 - output))
fp = torch.sum((1 - target) * output)
fn = torch.sum(target * (1 - output))
return epsilon, tp.float(), tn.float(), fp.float(), fn.float()
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
if type(tensor) == torch.Tensor:
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.contiguous().view(C, -1).float()
else:
return torch.as_tensor(tensor.flatten()).float() | 1,646 | 32.612245 | 70 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/loss.py | import torch
import torch.nn.functional as F
from model.utils import metric_utils
import numpy as np
def inf(*args):
return torch.as_tensor(float("Inf"))
def gradient_loss(s):
dy = torch.abs(s[:, :, 1:, :] - s[:, :, :-1, :]) ** 2
dx = torch.abs(s[:, :, :, 1:] - s[:, :, :, :-1]) ** 2
return (torch.mean(dx) + torch.mean(dy)) / 2.0
def multitask_loss(warp, flow, output, input_fixed, target_fixed):
lung_mask = torch.zeros_like(target_fixed)
lung_mask[target_fixed != 0] = 1
warp = warp * lung_mask
input_fixed = input_fixed * lung_mask
recon_loss = mse(warp, input_fixed)
grad_loss = gradient_loss(flow)
seg_loss = mse(output, target_fixed)
return recon_loss + 0.01 * grad_loss + seg_loss
def deformation_loss(warp, flow, input_fixed):
recon_loss = mse(warp, input_fixed)
grad_loss = gradient_loss(flow)
return recon_loss + 0.01 * grad_loss
def l1(output, target):
return F.l1_loss(output, target)
def mse(output, target):
return F.mse_loss(output, target)
def mse_difference(output, target, output_ref, target_ref, outDiff, groundDiff):
return F.mse_loss(output, target) + F.mse_loss(output_ref, target_ref) + F.mse_loss(outDiff, groundDiff)
def nll_loss(output, target):
return F.nll_loss(metric_utils.flatten(output), metric_utils.flatten(target))
def dice_loss(output, target, weights):
size = output.size()
outputs = torch.zeros_like(output)
targets = torch.zeros_like(target)
for i in range(size[0]):
for j in range(size[1]):
outputs[i][j] = output[i][j] * weights[j]
targets[i][j] = target[i][j] * weights[j]
return metric_utils.asymmetric_loss(1, output, target)
def asymmetric_loss(output, target):
return metric_utils.asymmetric_loss(2, output, target)
| 1,817 | 26.969231 | 108 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/layers.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class SpatialTransformer(nn.Module):
def __init__(self, size, mode='bilinear'):
super(SpatialTransformer, self).__init__()
vectors = [torch.arange(0, s) for s in size]
grid = torch.unsqueeze(torch.stack(torch.meshgrid(vectors)), dim=0).float()
self.register_buffer('grid', grid)
self.mode = mode
def forward(self, src, flow):
new_locs = self.grid + flow
shape = flow.shape[2:]
for i in range(len(shape)):
new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)
new_locs = new_locs.permute(0, 2, 3, 1)
new_locs = new_locs[..., [1, 0]]
return F.grid_sample(src, new_locs, mode=self.mode, align_corners=True)
class DenseLayer(nn.Sequential):
def __init__(self, in_channels, growth_rate):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(in_channels))
self.add_module('relu', nn.ReLU(True))
self.add_module('conv', nn.Conv2d(in_channels, growth_rate, kernel_size=3, stride=1, padding=1, bias=True))
self.add_module('drop', nn.Dropout2d(0.3))
def forward(self, x):
return super().forward(x)
class DenseBlock(nn.Module):
def __init__(self, in_channels, growth_rate, n_layers, upsample=False):
super().__init__()
self.upsample = upsample
self.layers = nn.ModuleList([DenseLayer(in_channels + i * growth_rate, growth_rate) for i in range(n_layers)])
def forward(self, x):
if self.upsample:
new_features = []
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
new_features.append(out)
return torch.cat(new_features, 1)
else:
for layer in self.layers:
out = layer(x)
x = torch.cat([x, out], 1)
return x
class TransitionDown(nn.Sequential):
def __init__(self, in_channels):
super().__init__()
self.add_module('norm', nn.BatchNorm2d(num_features=in_channels))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, bias=True))
self.add_module('drop', nn.Dropout2d(0.2))
self.add_module('maxpool', nn.MaxPool2d(2))
def forward(self, x):
return super().forward(x)
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=0, bias=True)
def forward(self, x, skip_x):
out = self.convTrans(x)
out = center_crop(out, skip_x.size(2), skip_x.size(3))
out = torch.cat([out, skip_x], 1)
return out
class Bottleneck(nn.Sequential):
def __init__(self, in_channels, growth_rate, n_layers):
super().__init__()
self.add_module('bottleneck', DenseBlock(in_channels, growth_rate, n_layers, upsample=True))
def forward(self, x):
return super().forward(x)
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:(xy2 + max_height), xy1:(xy1 + max_width)]
| 3,420 | 32.871287 | 142 | py |
longitudinalCOVID | longitudinalCOVID-master/model/utils/metric.py | import numpy as np
import torch
from sklearn.metrics import f1_score, precision_score, recall_score, roc_curve
from medpy import metric
from model.utils import metric_utils
def precision(output, target):
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
return precision_score(target, output, average=None) # average='macro' for macro averaging
def recall(output, target):
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
return recall_score(target, output, average=None) # average='macro' for macro averaging
def dice_loss(output, target):
with torch.no_grad():
return metric_utils.asymmetric_loss(1, output, target)
def dice_score(output, target):
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
f = f1_score(target, output, average=None) # average='macro' for macro averaging
return f
def asymmetric_loss(output, target):
with torch.no_grad():
return metric_utils.asymmetric_loss(2, output, target)
lt1, lt2 = [0] * 5, [0] * 5
def LTPR(output, target, is_last=True):
tprs = []
global lt1, lt2
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
for i in range(5):
output1 = output.clone()
target1 = target.clone()
output1[output1 == i] = 10
output1[output1 != 10] = 0
output1[output1 == 10] = 1
target1[target1 == i] = 10
target1[target1 != 10] = 0
target1[target1 == 10] = 1
output1 = output1.detach().cpu().numpy()
target1 = target1.detach().cpu().numpy()
result = np.atleast_1d(output1.astype(np.bool))
reference = np.atleast_1d(target1.astype(np.bool))
lt1[i] += np.count_nonzero(result * reference)
lt2[i] += np.count_nonzero(reference)
if 0 == lt2[i]:
tpr = None
else:
tpr = lt1[i] / float(lt2[i])
tprs += [tpr]
if is_last:
lt1, lt2 = [0] * 5, [0] * 5
return tprs
else:
return None
lf1, lf2 = [0] * 5, [0] * 5
def LFPR(output, target, is_last=True):
fprs = []
global lf1, lf2
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
for i in range(5):
output1 = output.clone()
target1 = target.clone()
output1[output1 == i] = 10
output1[output1 != 10] = 0
output1[output1 == 10] = 1
target1[target1 == i] = 10
target1[target1 != 10] = 0
target1[target1 == 10] = 1
output1 = output1.detach().cpu().numpy()
target1 = target1.detach().cpu().numpy()
result = np.atleast_1d(output1.astype(np.bool))
reference = np.atleast_1d(target1.astype(np.bool))
lf1[i] += np.count_nonzero(result * (1 - reference))
lf2[i] += np.count_nonzero(reference)
if 0 == lf2[i]:
fpr = None
else:
fpr = lf1[i] / float(lf2[i])
fprs += [fpr]
if is_last:
lf1, lf2 = [0] * 5, [0] * 5
return fprs
else:
return None
vol1 = [0] * 5
vol2 = [0] * 5
def vd(output, target, is_last=True):
vds = []
global vol1, vol2
with torch.no_grad():
target = metric_utils.flatten(target).cpu().detach().float()
output = metric_utils.flatten(output).cpu().detach().float()
if len(output.shape) == 2: # is one hot encoded vector
target = np.argmax(target, axis=0)
output = np.argmax(output, axis=0)
for i in range(5):
output1 = output.clone()
target1 = target.clone()
output1[output1 == i] = 10
output1[output1 != 10] = 0
output1[output1 == 10] = 1
target1[target1 == i] = 10
target1[target1 != 10] = 0
target1[target1 == 10] = 1
output1 = output1.detach().cpu().numpy()
target1 = target1.detach().cpu().numpy()
result = np.atleast_1d(output1.astype(np.bool))
reference = np.atleast_1d(target1.astype(np.bool))
vol1[i] += np.count_nonzero(result)
vol2[i] += np.count_nonzero(reference)
vd = abs(vol1[i] - vol2[i])
vds += [vd]
if is_last:
vol1, vol2 = [0] * 5, [0] * 5
return vds
else:
return None
| 5,769 | 30.703297 | 99 | py |
CONTAIN | CONTAIN-main/contain.py | import networkx as nx
import networkx.algorithms.community as nx_comm
import matplotlib.pyplot as plt
from networkx.generators.random_graphs import gnm_random_graph
import random as rnd
import time
from SparseShield_NIvsHS.Scripts.SparseShieldSolver import SparseShieldSolver
from SparseShield_NIvsHS.Scripts.NetShieldSolver import NetShieldSolver
import os
def Contain(G,seeds, budget, r, dr):
nbs=[set(G.neighbors(s))|{s} for s in seeds]
subs=[G.subgraph(n) for n in nbs]
agg=nx.compose_all(subs)
seed_group=next(nx.connected_components(agg))
while True:
comms = nx_comm.louvain_communities(G, resolution=r, seed=42)
intersects = [x for c in comms if (x := (seed_group & c))]
ratios = [(x, len(x)/len(seed_group)) for x in intersects]
if len(ratios) >= budget:
sorted_ratios = sorted(ratios, key=lambda item:item[1], reverse=True)[0:budget]
return sorted_ratios, r
else:
return [], -1
r += dr
# CONTAIN, NetShield, and SparseShield usage
if __name__ == "__main__":
for f in ['tvshow_edges.csv', 'politician_edges.csv', 'government_edges.csv', 'public_figure_edges.csv', 'athletes_edges.csv', 'company_edges.csv', 'new_sites_edges.csv', 'artist_edges.csv', 'artist_edges.csv',]:
G = nx.read_edgelist(os.path.join("data", f), delimiter=',', create_using=nx.Graph(), nodetype=int)
deg_sort = sorted([(n, len(list(G.neighbors(n)))) for n in G.nodes()], key=lambda item:item[1], reverse=True)
seeds = deg_sort[0:int(len(deg_sort)*0.1)]
seeds = sorted([x[0] for x in seeds])
s=set(seeds)
k=len(seeds)
print(len(G.nodes), len(G.edges))
# How to use CONTAIN
start_time = time.time()
ranks, r = Contain(G, seeds=seeds, budget=10, r=1, dr=0.25)
immunised_nodes = sum([len(x[0]) for x in ranks])
end_time = time.time()
print("Contain: budget", k, "time", end_time-start_time, "No. immunised nodes", immunised_nodes)
print('====================================')
# Test SparseShield
start_time = time.time()
solver = SparseShieldSolver(G, seeds=seeds, k=k)
nodes = solver.sparse_shield()
end_time = time.time()
print("SparseShield: budget", k, "time", end_time-start_time, "No. immunised nodes", len(nodes))
print('====================================')
# Test SparseShield
start_time = time.time()
solver = NetShieldSolver(G, seeds=seeds, k=k)
nodes = solver.net_shield()
end_time = time.time()
print("NetShield: budget", k, "time", end_time-start_time, "No. immunised nodes", len(nodes))
print('====================================')
| 2,793 | 41.333333 | 217 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/Simulator.py | import time
import networkx as nx
import random
import numpy as np
import logging
from collections import defaultdict
import warnings
from joblib import Parallel, delayed
class Simulator():
def __init__(self, G, seeds):
self.G = G
self.seeds = seeds
self.blocked = {}
self.log = {}
def add_blocked(self, name, node_set):
self.blocked[name] = list(node_set)
def run(self, iterations, num_threads=22):
assert(sum([not self.G.has_node(n) for n in self.seeds]) == 0)
for key in self.blocked:
blocked_list = self.blocked[key]
# any blocked or seed node should exist in the graph
assert(sum([not self.G.has_node(n) for n in blocked_list]) == 0)
self.log['iterations'] = iterations
iteration_results = []
results = Parallel(n_jobs=num_threads)(
delayed(self.run_iteration)() for i in range(iterations))
for result in results:
iteration_results.append(result)
self.log.update(
self.merge_results_across_iterations(iteration_results))
return self.log
def run_iteration(self):
return self.simuation_as_possible_world()
def simuation_as_possible_world(self):
'''
Allows to calculate the number of saved nodes
'''
t1 = time.time()
front_nodes = self.seeds
active_series = []
active_series.append(len(front_nodes))
active = set()
active.update(self.seeds)
iterations = 0
active_edges = set()
active_subgraph = nx.DiGraph()
active_subgraph.add_nodes_from([key for key in active])
while (len(front_nodes) > 0):
front_edges = self.get_front_edges(front_nodes)
active_edges.update(front_edges)
front_nodes = [e[1] for e in front_edges if e[1] not in active]
active.update(front_nodes)
active_series.append(active_series[-1]+len(front_nodes))
iterations += 1
active_subgraph.add_edges_from(active_edges)
results = {}
results['iterations until termination in unblocked graph'] = iterations
results['active nodes in unblocked graph'] = len(active_subgraph)
results['solvers'] = {}
for blocked_set_name in self.blocked:
blocked_list = self.blocked[blocked_set_name]
results['solvers'][blocked_set_name] = {}
active_subgraph_with_blocked = active_subgraph.subgraph(
[node for node in active_subgraph.nodes() if node not in blocked_list])
active_subgraph_with_blocked = self.get_reachable_subgraph_from_seeds(
active_subgraph_with_blocked)
activated_node_amount = len(active_subgraph_with_blocked)
saved_node_amount = results['active nodes in unblocked graph'] - \
activated_node_amount
results['solvers'][blocked_set_name]['activated nodes'] = activated_node_amount
results['solvers'][blocked_set_name]['saved nodes'] = saved_node_amount
results['solvers'][blocked_set_name]['fraction of saved nodes to active nodes'] = saved_node_amount / \
results['active nodes in unblocked graph']
t2 = time.time()
results['simulation time'] = t2 - t1
return results
def get_reachable_subgraph_from_seeds(self, G):
G = G.copy()
G.add_node("superseed")
G.add_edges_from([("superseed", n) for n in self.seeds])
node_subset = nx.descendants(G, "superseed")
return G.subgraph(node_subset - set(["superseed"]))
def get_front_edges(self, front_nodes):
new_front_edges = []
for v in front_nodes:
for u in self.G.successors(v):
if (np.random.rand() <= self.G[v][u]['weight']):
new_front_edges.append((v, u))
return new_front_edges
def merge_results_across_iterations(self, results):
assert(len(results) > 0)
r = results[0]
N = len(results)
merged = {}
for key in r:
if key == "solvers":
continue
merged[key] = self.get_list_stats(
[results[i][key] for i in range(N)])
merged['solvers'] = {}
for alg in r['solvers']:
merged['solvers'][alg] = {}
for key in r['solvers'][alg]:
l = [results[i]['solvers'][alg][key] for i in range(N)]
merged['solvers'][alg][key] = self.get_list_stats(
[results[i]['solvers'][alg][key] for i in range(N)])
return merged
def get_list_stats(self, l):
s = {}
warnings.simplefilter("ignore", category=RuntimeWarning)
s['mean'] = np.mean(l)
s['var'] = np.var(l, ddof=1)
return s
| 4,867 | 37.634921 | 115 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/main.py | from DomSolver import DomSolver
from networkx.generators.random_graphs import gnm_random_graph
import random as rnd
import time
if __name__ == "__main__":
en_pair = (978, 10217)
G = gnm_random_graph(en_pair[0], en_pair[1], seed=None, directed=True)
E = []
for e in G.edges:
E.append((e[0], e[1], rnd.randint(1,1)))
G.add_weighted_edges_from(E)
solver = DomSolver(G, [0], 10)
solver.run()
# print(solver.get_rank())
print(solver.get_best_nodes(10)) | 496 | 28.235294 | 74 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/RandomSolver.py | import networkx as nx
import time
import numpy as np
from Solver import *
class RandomSolver(Solver):
def run(self):
t1 = time.time()
random_blocked_set = np.random.choice([node for node in self.G.nodes() if node not in self.seeds], self.k, replace=False)
t2 = time.time()
self.log['Total time'] = (t2-t1)
self.log['Blocked nodes'] = [int(node) for node in random_blocked_set]
| 424 | 27.333333 | 129 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/SparseShieldSeedlessSolver.py | from Solver import *
import networkx as nx
import numpy as np
import time
import sys
import itertools
from scipy.sparse.linalg import eigsh
import os
from heapq import *
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class PriorityQueue:
def __init__(self, initlist):
self.counter = itertools.count() # unique sequence count
self.entry_finder = {} # mapping of tasks to entries
self.pq = []
for el in initlist:
entry = [-el[0], next(self.counter), el[1]]
self.pq.append(entry)
self.entry_finder[el[1]] = entry
heapify(self.pq) # list of entries arranged in a heap
self.REMOVED = '<removed-task>' # placeholder for a removed task
def update_task_add(self, task, add_value):
priority = 0
if task in self.entry_finder:
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
priority = entry[0]
count = next(self.counter)
entry = [priority-add_value, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def add_task(self, task, priority=0):
'Add a new task or update the priority of an existing task'
if task in self.entry_finder:
self.remove_task(task)
count = next(self.counter)
entry = [-priority, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def remove_task(self, task):
'Mark an existing task as REMOVED. Raise KeyError if not found.'
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
def pop_task(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.pq:
priority, count, task = heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
class SparseShieldSeedlessSolver(Solver):
def __init__(self, G, seeds, k, **params):
Solver.__init__(self, G, seeds, k, **params)
self.to_add = len(seeds)
def net_shield(self):
G = self.G.to_undirected()
nodelist = list(G.nodes())
M = len(G)
indexes = list(range(M))
inverse_index = {}
for i in indexes:
inverse_index[nodelist[i]] = i
t1 = time.time()
A = nx.to_scipy_sparse_matrix(
G, nodelist=nodelist, weight=None, dtype='f')
W, V = eigsh(A, k=1, which='LM')
max_eig = W[0]
max_eigvec = V[:, 0].reshape((V.shape[0],))
self.log["Eigenvalue"] = max_eig
scores = 2*max_eig*(max_eigvec**2)
pk = PriorityQueue(zip(scores.tolist(), indexes))
S = set()
for _ in range(self.k + self.to_add):
next_best = pk.pop_task()
S.add(next_best)
for n in G.neighbors(nodelist[next_best]):
j = inverse_index[n]
if j not in S:
pk.update_task_add(
j, -2 * max_eigvec[next_best] * max_eigvec[j])
t2 = time.time()
self.log['Total time'] = t2-t1
return list([nodelist[i] for i in S if (nodelist[i] not in self.seeds)])[:self.k]
def run(self):
blocked = self.net_shield()
self.log['Blocked nodes'] = [int(node) for node in blocked]
| 3,440 | 32.086538 | 89 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/run_solver.py | from Simulator import *
from SparseShieldSolver import *
from SparseShieldSeedlessSolver import *
from SparseShieldPlusSolver import *
from NetShieldSolver import *
from NetShapeSolver import *
from RandomSolver import *
from DomSolver import *
from DegreeSolver import *
import os
import sys
import time
import argparse
import numpy as np
import json
import pickle as pkl
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
if __name__ == "__main__":
t1 = time.time()
parser = argparse.ArgumentParser(
description="Run solver on a single graph with seeds")
parser.add_argument("graph", type=str)
parser.add_argument("seeds", type=str)
parser.add_argument("nodes_to_block", type=int)
parser.add_argument("algorithm", type=str)
parser.add_argument("-j", "--simulation_iterations",
type=int, default="100")
parser.add_argument("-o", "--outfile", type=str, default="a.out")
parser.add_argument("-p", "--other_params", type=str, nargs="*")
parser.add_argument("-s", "--just_solve",
type=int, default=1)
args = parser.parse_args()
G, seeds = pkl.load(open(args.graph, 'rb')), np.atleast_1d(
np.loadtxt(args.seeds))
k = args.nodes_to_block
problem_params = {}
if args.other_params:
for i in range(int(len(args.other_params)/2)):
if args.problem_params[2*i+1].isdigit():
problem_params[args.other_params[2*i]
] = int(args.other_params[2*i+1])
else:
problem_params[args.other_params[2*i]
] = args.other_params[2*i+1]
z = dict(problem_params)
Solver = eval(args.algorithm + "Solver")
solver = Solver(G, seeds, k, **z)
solver.run()
try:
# in case the are numpy values
solver.log["Eigenvalue"] = solver.log["Eigenvalue"].item()
except:
None
print("%s blocked %d nodes in a graph of size %d." %
(solver.get_name(), k, len(G)))
if args.just_solve == 0:
print("Running simulations...")
simulator = Simulator(G, seeds)
simulator.add_blocked(0, solver.log['Blocked nodes'])
results = simulator.run(args.simulation_iterations)
solver.log.update({"simulation": results['solvers'][0]})
json.dump(solver.log, open(args.outfile, "w"))
print("Solver Time: %1.5fs; Objective (saved): %1.1f; Total time: %1.5s" % (
solver.log["Total time"], results['solvers'][0]["saved nodes"]["mean"], (time.time() - t1)))
print("Logs saved to {}.".format(args.outfile))
| 2,623 | 34.945205 | 100 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/NetShieldSolver.py | import networkx as nx
import numpy as np
import time
import sys
import itertools
from scipy.linalg import eigh
import os
from heapq import *
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from Solver import *
class PriorityQueue:
def __init__(self, initlist):
self.counter = itertools.count() # unique sequence count
self.entry_finder = {} # mapping of tasks to entries
self.pq = []
for el in initlist:
entry = [-el[0], next(self.counter), el[1]]
self.pq.append(entry)
self.entry_finder[el[1]] = entry
heapify(self.pq) # list of entries arranged in a heap
self.REMOVED = '<removed-task>' # placeholder for a removed task
def update_task_add(self, task, add_value):
priority = 0
if task in self.entry_finder:
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
priority = entry[0]
count = next(self.counter)
entry = [priority-add_value, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def add_task(self, task, priority=0):
'Add a new task or update the priority of an existing task'
if task in self.entry_finder:
self.remove_task(task)
count = next(self.counter)
entry = [-priority, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def remove_task(self, task):
'Mark an existing task as REMOVED. Raise KeyError if not found.'
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
def pop_task(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.pq:
priority, count, task = heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
class NetShieldSolver(Solver):
def net_shield(self):
G = self.G.to_undirected()
nodelist = [n for n in G.nodes()]
inverse_index = {}
for i in range(len(nodelist)):
inverse_index[nodelist[i]] = i
t1 = time.time()
A = nx.to_numpy_matrix(G, nodelist=nodelist, weight=None)
M = len(G)
W, V = eigh(A, eigvals=(M-1, M-1), type=1, overwrite_a=True)
max_eig = W[0]
max_eigvec = V[:,0].reshape((V.shape[0],))
self.log["Eigenvalue"] = max_eig
scores = 2*max_eig*(max_eigvec**2)
pk = PriorityQueue(zip(scores.tolist(), list(range(len(G)))))
S = set()
for it in range(self.k):
next_best = pk.pop_task()
S.add(next_best)
for n in G.neighbors(nodelist[next_best]):
j = inverse_index[n]
if j not in S:
pk.update_task_add(j, -2 * max_eigvec[next_best] * max_eigvec[j])
t2 = time.time()
self.log['Total time'] = t2-t1
return [nodelist[i] for i in S]
def run(self):
blocked = self.net_shield()
self.log['Blocked nodes'] = [int(node) for node in blocked]
| 3,193 | 32.270833 | 85 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/SparseShieldPlusSolver.py | from Solver import *
import networkx as nx
import numpy as np
import time
import sys
import itertools
from scipy.sparse.linalg import eigsh
import os
from heapq import *
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class PriorityQueue:
def __init__(self, initlist):
self.counter = itertools.count() # unique sequence count
self.entry_finder = {} # mapping of tasks to entries
self.pq = []
for el in initlist:
entry = [-el[0], next(self.counter), el[1]]
self.pq.append(entry)
self.entry_finder[el[1]] = entry
heapify(self.pq) # list of entries arranged in a heap
self.REMOVED = '<removed-task>' # placeholder for a removed task
def update_task_add(self, task, add_value):
priority = 0
if task in self.entry_finder:
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
priority = entry[0]
count = next(self.counter)
entry = [priority-add_value, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def add_task(self, task, priority=0):
'Add a new task or update the priority of an existing task'
if task in self.entry_finder:
self.remove_task(task)
count = next(self.counter)
entry = [-priority, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def remove_task(self, task):
'Mark an existing task as REMOVED. Raise KeyError if not found.'
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
def pop_task(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.pq:
priority, count, task = heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
class SparseShieldPlusSolver(Solver):
def __init__(self, G, seeds, k, **params):
Solver.__init__(self, G, seeds, k, **params)
for node in seeds:
self.G.remove_node(node)
def net_shield(self):
G = self.G.to_undirected()
nodelist = list(G.nodes())
M = len(G)
indexes = list(range(M))
inverse_index = {}
for i in indexes:
inverse_index[nodelist[i]] = i
t1 = time.time()
A = nx.to_scipy_sparse_matrix(
G, nodelist=nodelist, weight=None, dtype='f')
W, V = eigsh(A, k=1, which='LM')
max_eig = W[0]
max_eigvec = V[:, 0].reshape((V.shape[0],))
self.log["Eigenvalue"] = max_eig
scores = 2*max_eig*(max_eigvec**2)
pk = PriorityQueue(zip(scores.tolist(), indexes))
S = set()
for _ in range(self.k):
next_best = pk.pop_task()
S.add(next_best)
for n in G.neighbors(nodelist[next_best]):
j = inverse_index[n]
if j not in S:
pk.update_task_add(
j, -2 * max_eigvec[next_best] * max_eigvec[j])
t2 = time.time()
self.log['Total time'] = t2-t1
return [nodelist[i] for i in S]
def run(self):
blocked = self.net_shield()
self.log['Blocked nodes'] = [int(node) for node in blocked]
| 3,424 | 31.311321 | 78 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/SparseShieldSolver.py | from Solver import *
import networkx as nx
import numpy as np
import time
import sys
import itertools
from scipy.sparse.linalg import eigsh
import os
from heapq import *
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class PriorityQueue:
def __init__(self, initlist):
self.counter = itertools.count() # unique sequence count
self.entry_finder = {} # mapping of tasks to entries
self.pq = []
for el in initlist:
entry = [-el[0], next(self.counter), el[1]]
self.pq.append(entry)
self.entry_finder[el[1]] = entry
heapify(self.pq) # list of entries arranged in a heap
self.REMOVED = '<removed-task>' # placeholder for a removed task
def update_task_add(self, task, add_value):
priority = 0
if task in self.entry_finder:
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
priority = entry[0]
count = next(self.counter)
entry = [priority-add_value, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def add_task(self, task, priority=0):
'Add a new task or update the priority of an existing task'
if task in self.entry_finder:
self.remove_task(task)
count = next(self.counter)
entry = [-priority, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def remove_task(self, task):
'Mark an existing task as REMOVED. Raise KeyError if not found.'
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
def pop_task(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.pq:
priority, count, task = heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
class SparseShieldSolver(Solver):
def sparse_shield(self):
G = self.G.to_undirected()
nodelist = list(G.nodes())
M = len(G)
indexes = list(range(M))
inverse_index = {}
for i in indexes:
inverse_index[nodelist[i]] = i
t1 = time.time()
A = nx.to_scipy_sparse_matrix(
G, nodelist=nodelist, weight=None, dtype='f')
W, V = eigsh(A, k=1, which='LM')
max_eig = W[0]
max_eigvec = V[:, 0].reshape((V.shape[0],))
self.log["Eigenvalue"] = max_eig
scores = 2*max_eig*(max_eigvec**2)
pk = PriorityQueue(zip(scores.tolist(), indexes))
S = set()
for _ in range(self.k):
next_best = pk.pop_task()
S.add(next_best)
for n in G.neighbors(nodelist[next_best]):
j = inverse_index[n]
if j not in S:
pk.update_task_add(
j, -2 * max_eigvec[next_best] * max_eigvec[j])
t2 = time.time()
self.log['Total time'] = t2-t1
return [nodelist[i] for i in S]
def run(self):
blocked = self.sparse_shield()
self.log['Blocked nodes'] = [int(node) for node in blocked]
| 3,240 | 31.41 | 78 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/Generator.py | import networkx as nx
from networkx.algorithms import approximation
import random
import sys
import time
import os
import argparse
import numpy as np
import hashlib
from scipy.sparse import csr_matrix
class Generator:
def __init__(self, params):
self.params = params
self.generators = {
'powerlaw_cluster': lambda: nx.powerlaw_cluster_graph(params["n"], params["m"], params["p"]),
'grid': lambda: nx.convert_node_labels_to_integers(nx.grid_2d_graph(params['n'], params['n'])),
'path': lambda: nx.path_graph(params["n"]),
'binomial': lambda: nx.fast_gnp_random_graph(params['n'], params['p']),
'watts_strogatz': lambda: nx.watts_strogatz_graph(params['n'], params['k'], params['p']),
'karate': lambda: nx.karate_club_graph(),
'gaussian_random_partition': lambda: nx.gaussian_random_partition_graph(params['n'], params['s'], params['v'], params['p_in'], params['p_out'])
}
def gen_graph_id(self):
return str(self.get_static_hash(str(int(time.time())) + str(random.randint(10000, 99999)) + "_".join([str(self.params[p]) for p in self.params])))
def generate(self, number_of_graphs=1):
for i in range(number_of_graphs):
G = self.generators[self.params["graph_type"]]()
if self.params["graph_type"] != 'vk':
if self.params["graph_type"] not in ["gnutella", "stanford"]:
G = self.add_random_directions(G, self.params["both_directions"])
else:
if self.params["both_directions"]:
raise Exception("Not implemeted")
G = self.assign_weights(G, self.params["weight_scale"], self.params["random_weight"])
G.graph['graph_id'] = self.gen_graph_id()
G.graph.update(self.params)
yield G
@staticmethod # used in tests
def assign_weights(G, weight_scale, random_weight):
if random_weight:
for e in G.edges():
a = np.random.random()*weight_scale
G[e[0]][e[1]]['weight'] = np.random.random()*weight_scale
else:
for e in G.edges():
G[e[0]][e[1]]['weight'] = weight_scale
return G
@staticmethod
def add_random_directions(G, both=False):
assert(not nx.is_directed(G))
dG = nx.DiGraph()
for e in G.edges():
if both:
dG.add_edge(e[0],e[1])
dG.add_edge(e[1],e[0])
for key in G[e[0]][e[1]]:
dG[e[0]][e[1]][key] = G[e[0]][e[1]][key]
dG[e[1]][e[0]][key] = G[e[0]][e[1]][key]
else:
if np.random.random() < 0.5:
dG.add_edge(e[0],e[1])
for key in G[e[0]][e[1]]:
dG[e[0]][e[1]][key] = G[e[0]][e[1]][key]
else:
dG.add_edge(e[1],e[0])
for key in G[e[1]][e[0]]:
dG[e[1]][e[0]][key] = G[e[0]][e[1]][key]
return dG
@staticmethod
def analyze_graph(G):
G.graph['directed'] = nx.is_directed(G)
G_und = G.to_undirected()
G.graph['connected_components'] = nx.number_connected_components(G_und)
G.graph['largest_component'] = len(max(nx.connected_components(G_und), key=len))
logging.info("Graph ID {}: components analyzed.".format(G.graph['graph_id']))
G.graph['average_clustering'] = approximation.average_clustering(G_und)
logging.info("Graph ID {}: clustering analyzed.".format(G.graph['graph_id']))
degrees = [d for n, d in G.degree()]
G.graph['min_degree'] = min(degrees),max(degrees),np.mean(degrees),np.median(degrees)
G.graph['max_degree'] = max(degrees)
G.graph['avg_degree'] = np.mean(degrees)
G.graph['std_degree'] = np.std(degrees)
G.graph['median_degree'] = np.median(degrees)
logging.info("Graph ID {}: degrees analyzed.".format(G.graph['graph_id']))
@staticmethod
def get_static_hash(string):
h = int(hashlib.md5(string.encode('utf-8')).hexdigest(), 16)
return h
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate graph with seeds")
parser.add_argument("graph_type", type=str)
parser.add_argument("graph_outfile", type=str)
parser.add_argument("seed_outfile", type=str)
parser.add_argument("-s", "--number_of_seeds", type=int, default=1)
parser.add_argument("-b", "--both_directions", type=int, default=1)
parser.add_argument("-w", "--weight_scale", type=float, default=0.3)
parser.add_argument("-p", "--other_params", type=str, nargs="*")
args = parser.parse_args()
other_params = {"graph_type": args.graph_type,
"both_directions": args.both_directions,
"weight_scale": args.weight_scale,
"random_weight": 1}
if args.other_params:
for i in range(int(len(args.other_params)/2)):
if args.other_params[2*i+1].isdigit():
other_params[args.other_params[2*i]] = int(args.other_params[2*i+1])
else:
other_params[args.other_params[2*i]] = args.other_params[2*i+1]
z = dict(other_params)
gen = Generator(z)
G = next(gen.generate())
nx.write_gpickle(G, args.graph_outfile)
n = args.number_of_seeds
seeds = np.random.choice([node for node in G.nodes()], n, replace=False)
np.savetxt(args.seed_outfile, seeds, fmt="%1u")
print("Done.")
| 5,605 | 43.141732 | 155 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/SetSelector.py | import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import helpers
import time
import logging
from collections import defaultdict, Counter
class SetSelector():
def __init__(self, ranking, is_weighted=True):
self.ranking = ranking
self.is_weighted = is_weighted
self.log = {}
@staticmethod
def get_best_score_mincut(scores, selected_nodes):
best_key = None
best_score = -1
for s in scores:
score = s[1]/SetSelector.get_cut_length_cost(s[0], selected_nodes) # score per node
if score > best_score:
best_score = score
best_key = s[0]
return best_key
@staticmethod
def get_updated_ranking(k, ranking, selected_nodes):
vacant_node_set_size = k - len(selected_nodes)
updated_ranking = defaultdict(lambda: [])
sample_is_already_blocked = defaultdict(lambda: False)
for key in ranking:
if SetSelector.get_cut_length_cost(key, selected_nodes) > vacant_node_set_size:
continue
if helpers.set_in_set(key, selected_nodes):
for sample_id in ranking[key]:
sample_is_already_blocked[sample_id] = True
else:
for sample_id in ranking[key]:
if not sample_is_already_blocked[sample_id]:
updated_ranking[key].append(sample_id)
return updated_ranking
@staticmethod
def get_cut_length_cost(cut, selected_nodes):
return len([1 for n in cut if n not in selected_nodes])
@staticmethod
def build_scores(ranking, selected_nodes):
return SetSelector.build_weighted_scores(ranking, selected_nodes, defaultdict(lambda: 1))
@staticmethod
def build_weighted_scores(ranking, selected_nodes, weights_per_sample):
scores = []
for key in ranking:
unique_sample_set = set()
weight_of_sample_set = 0
for key2 in ranking:
if helpers.set_in_set(key2, key): # including itself
for n in ranking[key2]:
if n not in unique_sample_set:
weight_of_sample_set += weights_per_sample[n]
unique_sample_set.add(n)
scores.append((key, weight_of_sample_set))
return scores
def get_best_nodes(self, k, strategy="basic"):
selected_nodes = set()
ranking = dict(self.ranking)
it = 0
while len(selected_nodes) < k:
it += 1
ranking = SetSelector.get_updated_ranking(k, ranking, selected_nodes)
if self.is_weighted:
sample_weights = self.get_sample_weights()
scores = SetSelector.build_weighted_scores(ranking, selected_nodes, sample_weights)
else:
scores = SetSelector.build_scores(ranking, selected_nodes)
new_nodes = SetSelector.get_best_score_mincut(scores, selected_nodes)
if new_nodes == None:
break
for n in new_nodes:
selected_nodes.add(n)
self.log["Scores are weighted"] = self.is_weighted
self.log["Blocking nodes selection iterations"] = it
return selected_nodes
def set_sampled_nodes_weights(self, p):
# for exact estimator this is activation probabilities of sample nodes
self.sampled_nodes_weights = p
def set_sample_to_node_index(self, i):
self.sample_to_node_index = i
def get_positive_samples(self, blocked_set):
return set(helpers.flatten([self.ranking[key] for key in self.ranking if helpers.set_in_set(key, blocked_set)]))
def get_positive_node_counts(self, blocked_set):
return Counter([self.sample_to_node_index[sample] for sample in self.get_positive_samples(blocked_set)])
def get_predicted_normalized(self, iteration, blocked_set):
return len(self.get_positive_samples(blocked_set))/(iteration+1)
def get_predicted_normalized_per_node(self, blocked_set):
all_samples_per_node = Counter(self.sample_to_node_index)
positive_samples_per_node = self.get_positive_node_counts(blocked_set)
result = 0
for node in positive_samples_per_node:
sampled_nodes_weights = self.get_sampled_nodes_weights()
result += positive_samples_per_node[node]/all_samples_per_node[node]*sampled_nodes_weights[node]
return result
def get_sampled_nodes_weights(self):
return self.sampled_nodes_weights
def get_sample_weights(self):
return [self.sampled_nodes_weights[n] for n in self.sample_to_node_index]
| 4,718 | 38.991525 | 120 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/Solver.py | import time
class Solver:
def __init__(self, G, seeds, k, **params):
if len(G) == 0:
raise Exception("Graph can not be empty")
if len(seeds) == 0:
raise Exception("Seeds can not be empty")
if k > len(G) - len(seeds):
raise Exception("Seeds can not be blocked: too large k")
if k == 0:
raise Exception("k should be greater than 0")
self.G = G.copy()
self.seeds = [int(node) for node in seeds]
self.k = int(k)
self.log = {}
self.log['created'] = time.time()
self.params = params
self.clear()
def clear(self):
pass
def get_name(self):
return self.__class__.__name__
| 730 | 27.115385 | 68 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/DomSolver.py | '''
The class implements DAVA - the seed-aware immunization algorithm based on dominator trees.
'''
import networkx as nx
import time
from collections import defaultdict
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import Solver as slv
from functools import reduce
import math
import numpy as np
class DomSolver(slv.Solver):
def clear(self):
for e in self.G.edges(data=True):
if e[2]['weight'] == 1:
e[2]['weight'] = 0.99999 # No p=1 allowed due to probability calculation along shortest path
self.create_superseed_and_update_weights()
def create_superseed_and_update_weights(self):
self.superseed_index = len(self.G)
while self.superseed_index in self.G:
self.superseed_index += 1
neighbors = defaultdict(lambda: [])
for seed in self.seeds:
for n in self.G.neighbors(seed):
neighbors[n].append(self.G[seed][n]['weight'])
new_edges = [(self.superseed_index, n, DomSolver.get_total_weight(neighbors[n])) for n in neighbors]
self.G.add_weighted_edges_from(new_edges)
self.G = self.G.subgraph((set(self.G.nodes()) - set(self.seeds)) | set([self.superseed_index])).copy()
for edge in self.G.edges():
self.G[edge[0]][edge[1]]['weight'] = -math.log(self.G[edge[0]][edge[1]]['weight'])
@staticmethod
def get_total_weight(list_of_probabilities):
return 1. - reduce(lambda x, y: x*y, [(1.-p) for p in list_of_probabilities])
def run(self):
t1 = time.time()
blocked = []
extra_time = 0
if not self.params.get("fast", False):
for iteration in range(self.k):
self.build_domtree()
if iteration == 0:
extra_time += self.save_tree_stats_return_time("first it")
if iteration == self.k - 1:
extra_time += self.save_tree_stats_return_time("last it")
blocked += self.get_best_nodes(1)
self.G.remove_node(blocked[-1])
else:
self.build_domtree()
extra_time += self.save_tree_stats_return_time("first it")
blocked = self.get_best_nodes(self.k)
t2 = time.time()
self.log['Total time'] = t2 - t1 - extra_time
self.log['Blocked nodes'] = blocked
def save_tree_stats_return_time(self, prefix):
t1 = time.time()
g = self.domtree
sp = nx.single_source_shortest_path_length(g,self.superseed_index)
self.log['tree depth ' + prefix] = max([sp[n] for n in sp])
self.log['first level node fraction ' + prefix] = g.degree(self.superseed_index)/len(self.G)
first_level_degrees = [g.out_degree(n) for n in g.neighbors(self.superseed_index)]
self.log['second level node fraction ' + prefix] = sum(first_level_degrees)/len(self.G)
self.log['second level avg degree ' + prefix] = 0 if len(first_level_degrees) == 0 else np.mean(first_level_degrees)
t2 = time.time()
return t2 - t1
def build_domtree(self):
tree_dict = nx.algorithms.dominance.immediate_dominators(self.G, self.superseed_index)
self.domtree = nx.DiGraph()
self.domtree.add_node(self.superseed_index)
self.domtree.add_edges_from([(edge[1],edge[0]) for edge in tree_dict.items() if edge[0] != edge[1]])
probabilities_from_root = nx.single_source_dijkstra_path_length(self.G, self.superseed_index)
#probability (v,u) = p(u)/p(v) from root
for edge in self.domtree.edges():
if edge[0] == self.superseed_index:
probability = math.exp(-probabilities_from_root[edge[1]])
else:
probability = math.exp(-probabilities_from_root[edge[1]]+probabilities_from_root[edge[0]])
self.domtree[edge[0]][edge[1]]['weight'] = probability
def traverseTreeRec(self, node):
benefit = 1
for n in self.domtree.neighbors(node):
benefit += self.traverseTreeRec(n)*self.domtree[node][n]['weight']
return benefit
def get_rank(self):
rank = []
if self.k > self.domtree.degree(self.superseed_index):
self.log['error'] = "Problem is trivial"
if self.domtree.degree(self.superseed_index) == 0:
return [(0,np.random.choice([n for n in self.G.nodes() if n != self.superseed_index and n not in self.seeds], replace=False))]
return [(0, next(self.domtree.neighbors(self.superseed_index)))]
for n in self.domtree.neighbors(self.superseed_index):
benefit = self.traverseTreeRec(n)*self.domtree[self.superseed_index][n]['weight']
rank.append((benefit, n))
return rank
def get_best_nodes(self, number_of_nodes):
rank = self.get_rank()
return [int(a[1]) for a in sorted(rank)[-number_of_nodes:]]
| 4,932 | 43.044643 | 142 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/DegreeSolver.py | import networkx as nx
import time
from Solver import *
class DegreeSolver(Solver):
def run(self):
t1 = time.time()
degrees = [(node, self.G.degree([node])[node]) for node in self.G.nodes() if node not in self.seeds]
blocked = []
degrees.sort(key=lambda t: t[1])
for i in range(self.k):
blocked.append(degrees.pop()[0])
t2 = time.time()
self.log['Total time'] = (t2-t1)
self.log['Blocked nodes'] = [int(node) for node in blocked]
| 512 | 27.5 | 108 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/NetShapeSolver.py | import networkx as nx
import numpy as np
from scipy.linalg import eigh
import time
import sys
import os
import math
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from Solver import *
class NetShapeSolver(Solver):
def clear(self):
np.warnings.filterwarnings('ignore')
self.epsilon = self.params['epsilon']
def transform_graph_to_matrix(self):
self.nodelist = [n for n in self.G.nodes()]
self.inverse_nodelist = {}
for i in range(len(self.nodelist)):
self.inverse_nodelist[self.nodelist[i]] = i
F = nx.to_numpy_matrix(self.G, nodelist=self.nodelist, weight='weight')
return F
def init_variables(self):
self.F = self.transform_graph_to_matrix()
self.Delta = self.get_delta(self.F)
self.R = np.sqrt(self.k)*np.max(np.abs(self.Delta))
self.T = int(math.ceil((self.R/self.epsilon)**2))
self.x = np.zeros(len(self.G))
self.x_star = self.x.copy()
def get_delta(self, F):
delta = -F.copy()
for s in self.seeds:
node_index = self.inverse_nodelist[s]
delta[node_index, :] = 0
return delta
def calculate_x_star(self):
self.init_variables()
for iteration in range(1,self.T+1):
self.perform_iteration(iteration)
return self.x_star
def get_hazard_result(self):
return np.multiply((1-(1/self.T)*self.get_x_matrix(self.x_star)), self.F), self.x_star
def perform_iteration(self, iteration):
M = self.F + np.multiply(self.get_x_matrix(self.x), self.Delta)
M2 = 1/2*(M+M.transpose())
N = M2.shape[0]
W, V = eigh(M2, eigvals=(N-1, N-1), type=1, overwrite_a=True)
max_eig = np.real(W[0])
max_eigvec = V[:, 0].reshape(N, 1)
u = max_eigvec
subgrad_step = np.dot(u, u.transpose())
Y = np.multiply(self.get_x_matrix(self.x), self.Delta) - self.R/np.sqrt(iteration) * subgrad_step
self.x = self.get_projection(self.Delta, Y)
self.x_star += np.real(self.x)
return max_eig
@staticmethod
def get_x_matrix(x):
x = np.reshape(x, (len(x), 1)).copy()
return np.repeat(x, x.shape[0], axis=1)
def get_projection(self, delta, y):
delta_prime = self.get_delta_prime(delta)
y_prime = self.get_y_prime(y, delta, delta_prime)
N = len(self.G)
mu1 = 2*np.multiply(y_prime, delta_prime)
mu2 = mu1-2*delta_prime**2
mu = np.concatenate((mu1, mu2))
pi = np.flip(np.argsort(mu), axis=0)
assert(mu[pi[0]] >= mu[pi[-1]])
d = 0
s = 0
i = 0
while s <= self.k and mu[pi[i]] >= 0:
if pi[i] < N:
if delta_prime[pi[i]] == 0:
i += 1
continue
d += 1/(2*delta_prime[pi[i]]**2)
else:
if delta_prime[pi[i]-N] == 0:
i += 1
continue
d -= 1/(2*delta_prime[pi[i]-N]**2)
if mu[pi[i]] != mu[pi[i+1]]:
s += d * (mu[pi[i]] - mu[pi[i+1]])
i += 1
z = np.max((0, mu[pi[i]] + (s-self.k)/d))
x_prime = 2 * np.multiply(delta_prime, y_prime) - z
x_prime = np.multiply(x_prime, np.reciprocal(2*delta_prime**2))
x_prime = np.minimum(x_prime, 1)
x_prime = np.maximum(x_prime, 0)
np.nan_to_num(x_prime, copy=False)
return x_prime
@staticmethod
def get_delta_prime(delta):
return np.ravel(np.sqrt(np.sum(np.power(delta, 2), axis=1)))
@staticmethod
def get_y_prime(y, delta, delta_prime):
y_prime = np.ravel(np.sum(np.multiply(y, delta), axis=1))
dp = delta_prime.copy()
dp[dp == 0] = 1
y_prime /= dp
return y_prime
def get_blocked(self):
node_indexes = np.argsort(self.x_star)[-self.k:]
return [self.nodelist[i] for i in node_indexes]
def run(self):
# check if there is at least one edge from seeds
check = False
for s in self.seeds:
for n in self.G.neighbors(s):
check = True
break
if check:
break
t1 = time.time()
if not check:
self.log['Error'] = "Trivial problem: no paths from seeds"
blocked = [n for n in self.G if n not in self.seeds][:self.k]
else:
self.calculate_x_star()
blocked = self.get_blocked()
t2 = time.time()
self.log['Total time'] = (t2-t1)
self.log['Blocked nodes'] = [int(node) for node in blocked]
| 4,674 | 31.465278 | 105 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/helpers/runners.py | from os import system
from joblib import Parallel, delayed
def run_against_config(results_path, nodes_to_cut, algorithm_name, graph_file, seed_file, just_solve):
output_name = results_path + "result_" + \
str(nodes_to_cut) + "_" + algorithm_name + "_" + \
graph_file.replace('.pkl', '') + ".json"
script_to_run = "python run_solver.py graphs/" + \
graph_file + " seeds/" + seed_file + " "
arguments = str(nodes_to_cut) + " " + \
algorithm_name + " --outfile " + output_name + \
" --just_solve " + str(1 if just_solve else 0)
full_command = script_to_run + arguments
print(full_command)
system(full_command)
def run_solver_against_configs(results_path='results/', graph_file='fromData.pkl', seed_file='seed.csv', startNumber=10, endNumber=1000, step=10, algorithms_to_run=['Random', 'Degree', 'SparseShield', 'Dom'], just_solve=True, num_threads = 22):
configs = list([(nodes_to_cut, algorithm_name) for nodes_to_cut in range(startNumber, endNumber, step) for algorithm_name in algorithms_to_run])
Parallel(n_jobs=num_threads)(
delayed(run_against_config)(results_path, nodes_to_cut, algorithm_name, graph_file, seed_file, just_solve) for (nodes_to_cut, algorithm_name) in configs)
| 1,322 | 54.125 | 244 | py |
CONTAIN | CONTAIN-main/SparseShield_NIvsHS/Scripts/helpers/graph_builder_helpers.py | import networkx as nx
import pickle
import json
import os
from os import listdir
from os.path import isfile, join
from networkx.utils import open_file
user_list_property = 'UniqueUsers'
user_count_property = 'UniqueUsersCount'
number_of_nodes = 2789474.0
active_factor = 0.5
active_multiplier = active_factor * (1 / (number_of_nodes))
def get_files(data_folder):
only_files = [f for f in listdir(data_folder) if isfile(join(data_folder, f))]
print(only_files[:3])
return only_files
def parse_file(file_name, data_folder, G):
with open(data_folder + '/' + file_name, 'r') as json_file:
data = json.load(json_file)
user_list = data[user_list_property]
initial_user = int(file_name.replace('dump', '').replace('.json', ''))
user_count = int(data[user_count_property]) + 0.0
edge_w = active_multiplier * user_count
for user_id in user_list:
G.add_edge(initial_user, user_id, weight = edge_w )
def generate_graph_from_files(data_folder, graph_dump_path):
G = nx.DiGraph()
only_files = get_files(data_folder)
for file_name in only_files:
parse_file(file_name, data_folder, G)
save_graph_to_file(G, graph_dump_path)
print(len(list(G.nodes)))
print(len(list(G.edges)))
return G
def load_graph_from_file(graph_dump_path):
G_loaded = None
with open(graph_dump_path, 'rb') as f:
G_loaded = pickle.load(f)
return G_loaded
def save_graph_to_file(G, save_path):
with open(save_path, "wb") as output_file:
pickle.dump(G, output_file, protocol=pickle.HIGHEST_PROTOCOL)
def get_nodes_by_degree(G):
degrees = {}
for (node, degree) in G.degree():
if degree in degrees:
degrees[degree].append(node)
else:
degrees[degree] = [node]
return degrees
def save_graph_named_by_size(G, graph_dump_path, explicit_name = None):
file_name = graph_dump_path.split('\\')[-1]
save_path = graph_dump_path.replace(file_name, '')
if (explicit_name == None):
num_nodes = len(list(G.nodes))
num_edges = len(list(G.edges))
save_path += "[" + str(num_nodes) + "][" + str(num_edges) + "]" + file_name
else:
save_path = explicit_name
save_graph_to_file(G, save_path)
def get_stats_for_nodes(G):
degrees = get_nodes_by_degree(G)
degree_values = list(degrees.keys())
max_degree = max(degree_values) + 0.0
min_degree = min(degree_values) + 0.0
avg_degree = (sum(degree_values) + 0.0)/(len(degree_values) + 0.0)
return (degrees, max_degree, min_degree, avg_degree)
def analyze_graph(degrees, max_degree, min_degree, avg_degree, high_ration, low_ratio, avg_ratio, above_avg_ratio):
high_degree_count = sum([len(nodes) if (degree > high_ration * max_degree) else 0 for (degree, nodes) in degrees.items()])
low_degree_count = sum([len(nodes) if (degree < low_ratio * min_degree) else 0 for (degree, nodes) in degrees.items()])
avg_degree_count = sum([len(nodes) if (abs(avg_degree - degree) / avg_degree < avg_ratio) else 0 for (degree, nodes) in degrees.items()])
nodes_above_avg_degree_count = sum([len(nodes) if (degree > above_avg_ratio * avg_degree) else 0 for (degree, nodes) in degrees.items()])
return (high_degree_count, low_degree_count, avg_degree_count, nodes_above_avg_degree_count)
def print_stats(high_degree_count, low_degree_count, avg_degree_count, nodes_above_avg_degree_count, high_ration, low_ratio, avg_ratio, above_avg_ratio, max_degree, min_degree, avg_degree, G):
print(high_degree_count, "nodes having the degree at least ",high_ration, " of the maximum degree which is", max_degree)
print(low_degree_count, "nodes having the degree at most ", low_ratio," of the minimum degree which is", min_degree)
print(avg_degree_count, "nodes having the degree at around ", avg_ratio, " of the avg degree which is", avg_degree)
print(nodes_above_avg_degree_count, "nodes having the degree at least", above_avg_ratio, " of the avg degree which is", above_avg_ratio * avg_degree)
print(len(G.nodes), "nodes in total") | 4,138 | 36.972477 | 192 | py |
RAML | RAML-master/incremental/main.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--finetune", action='store_true', default=False)
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=30000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=1000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=6,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=4,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=768)
parser.add_argument("--ckpt", default=None, type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0,1',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output', help="output path")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
class BinaryDiceLoss(nn.Module):
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class MyDiceLoss(nn.Module):
def __init__(self, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.ignore_index = ignore_index
def forward(self, logit, label_lst, class_lst):
loss = 0.0
for b in range(logit.shape[0]):
logit_b = logit[b][torch.where(class_lst[b] != self.ignore_index)]
label_lst_b = label_lst[b][torch.where(class_lst[b] != self.ignore_index)]
if logit_b.shape[0]:
loss += self.dice_criterion(logit_b, label_lst_b)
return loss / logit.shape[0]
class CDiceLoss(nn.Module):
def __init__(self, known_class=16, ignore_index=255):
super().__init__()
self.dice_criterion = BinaryDiceLoss()
self.bce_criterion = nn.BCELoss()
self.ignore_index = ignore_index
self.class_num=known_class
print('finetune with '+str(known_class)+" classes")
def forward(self, logit, label_lst, class_lst):
loss1 = torch.FloatTensor([0.0]).to(logit.device)
for i in range(self.class_num):
loss1 += (self.dice_criterion(logit[:, i], label_lst[:, i]) + self.bce_criterion(logit[:, i], label_lst[:, i].float()))
loss1 /= self.class_num
loss2 = 0.0
for i in range(self.class_num, logit.shape[1]):
loss2 += -torch.log((torch.mean(logit[:, i]) * 50).clamp(0, 1))
loss2 /= (logit.shape[1] - self.class_num)
loss3 = 0.0
num3 = 0
for i in range(logit.shape[1]):
for j in range(logit.shape[1]):
if i == j: continue
dice_loss = self.dice_criterion(logit[:, i], logit[:, j])
loss3 += (1.0 - dice_loss)
num3 += 1
loss3 = loss3 / num3
loss = (loss1 + loss2 + loss3) * 0.1
return {
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
}
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def visualize(image, label, logit, label_lst, class_lst, save_path=None, denorm=None):
# logit: (256, H, W)
if not isinstance(image, np.ndarray):
image = image.detach().cpu().numpy()
label = label.detach().cpu().numpy()
logit = logit.detach().cpu().numpy()
label_lst = label_lst.detach().cpu().numpy()
class_lst = class_lst.detach().cpu().numpy()
if denorm:
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
_, axarr = plt.subplots(2, (1+logit.shape[0]), figsize=(5*(1+logit.shape[0]), 10))
axarr[0][0].imshow(image)
label[label == 255] = 0
axarr[1][0].imshow(label)
for i in range(logit.shape[0]):
if i < label_lst.shape[0]:
axarr[0][1+i].imshow(label_lst[i])
axarr[1][i+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# _, axarr = plt.subplots(16, 32, figsize=(40, 20))
# for i in range(label.shape[0]):
# axarr[i//16][(i%16)*2].imshow(label[i])
# axarr[i//16][(i%16)*2].set_xticks([])
# axarr[i//16][(i%16)*2].set_yticks([])
# for i in range(logit.shape[0]):
# axarr[i//16][(i%16)*2+1].imshow((logit[i] >= 0.5).astype(np.uint8))
# axarr[i//16][(i%16)*2+1].set_xticks([])
# axarr[i//16][(i%16)*2+1].set_yticks([])
# label[label == 255] = 19
# C = logit.shape[0]
# logit = np.argmax(logit, axis=0)
# mask = np.zeros_like(logit)
# for c in range(C):
# t = class_lst[c]
# if t == 255: t = 19
# temp = (logit == c).astype(np.uint8)
# mask = np.ones_like(logit) * t * temp + mask * (1 - temp)
# _, axarr = plt.subplots(1, 3, figsize=(15, 5))
# axarr[0].imshow(image)
# axarr[1].imshow(label)
# axarr[2].imshow(mask)
if save_path:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def val(opts, model, val_loader, device):
metrics = StreamSegMetrics(19)
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
model.eval()
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
for batch_idx, (images, labels, _, _, _) in tqdm(enumerate(val_loader)):
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, _ = model(images)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
#print(labels.shape, outputs.shape)
metrics.update(labels[0].detach().cpu().numpy(), outputs)
score = metrics.get_results()
print(str(opts.num_classes)+' classes')
print(metrics.to_str(score))
def train_stage1(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
#l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, _, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
#loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
#masks = ((labels.unsqueeze(dim=1)) != 255).float()
#loss_l2 = l2_criterion(res_images, images) * 0.01
#loss['loss'] += (loss_seg + loss_l2)
##loss['loss_l2'] = loss_l2
if ("seg" not in epoch_records): epoch_records["seg"]=[]
epoch_records["seg"].append(loss_seg.cpu().data.numpy())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss_seg.backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 1000 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
'''
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
'''
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
def train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = utils.CrossEntropyLoss(ignore_index=255, size_average=True)
l2_criterion = nn.MSELoss().to(device)
model.train()
epoch_records = {}
cur_itr = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_save_dir = os.path.join(opts.output_dir, 'val')
os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
outputs, logits, _, res_images = model(images)
#logits = torch.sigmoid(logits)
# loss = criterion(logits, labels_lst[:, :masks.shape[1]] * masks, class_lst)
loss = criterion(logits, labels_lst, class_lst)
loss_seg = ce_criterion(outputs, labels, None)
masks = ((labels.unsqueeze(dim=1)) != 255).float()
loss_l2 = l2_criterion(res_images, images) * 0.01
loss['loss'] += loss_l2
loss['loss'] += loss_seg
loss['loss_seg'] = loss_seg
loss['loss_l2'] = loss_l2
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
#loss_ce = ce_criterion(outputs, labels, None)
#epoch_records['loss_ce'].append(loss_ce.item())
#loss = loss + loss_ce
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
if batch_idx % 10 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {}
if cur_itr % 500 == 0:
val(opts, model, val_loader, device)
#for _, (images, labels, labels_true, labels_lst, class_lst) in enumerate(val_loader):
# if np.random.uniform(0, 1) < 0.9: continue
for b in range(images.shape[0]):
visualize(images[b], labels_true[b], logits[b], labels_lst[b], class_lst[b], save_path=os.path.join(val_save_dir, f'{cur_itr}_{b}.png'), denorm=denorm)
# break
model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
# if batch_idx % 10 == 0:
# val(opts, model, val_loader, device)
# model.train()
import torch
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel._functions import Scatter
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except Exception:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
opts.num_classes = 256
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device(f'cuda:0' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
remain_class = 19 - len(train_dst.unknown_target)
print('class num : '+str(remain_class))
opts.num_classes=remain_class
model = model_map[opts.model](num_classes=remain_class, output_stride=opts.output_stride, metric_dim=opts.metric_dim, finetune=False)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
# Set up optimizer
if (opts.finetune):
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
else:
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CDiceLoss(remain_class).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
model_state_dict = model.state_dict()
checkpoint_state_dict = checkpoint["model_state"]
for key in checkpoint_state_dict:
if model_state_dict[key].shape != checkpoint_state_dict[key].shape:
print(key)
continue
model_state_dict[key] = checkpoint_state_dict[key]
model.load_state_dict(model_state_dict)
#model.load_state_dict(checkpoint["model_state"])
#model = nn.DataParallel(model)
device_ids=list(map(int, opts.gpu_id.split(',')))
#torch.cuda.set_device(device_ids[0])
print(device_ids)
#model = nn.DataParallel(model, device_ids=list(map(int, opts.gpu_id.split(','))))
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
#model = BalancedDataParallel(2, model, dim=0, device_ids=list(map(int, opts.gpu_id.split(','))))
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
#model = nn.DataParallel(model)
model = BalancedDataParallel(2, model, dim=0, device_ids=[0,1])
model.to(device)
if (opts.finetune):
train(opts, model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
else:
train_stage1(opts, model, train_loader, val_loader, None, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 28,621 | 42.170437 | 171 | py |
RAML | RAML-master/incremental/main_metric.py | from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, cityscapes, Cityscapes_Novel
from utils import ext_transforms as et
from metrics import StreamSegMetrics
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from torch import Tensor
from typing import Tuple
from sklearn.metrics import f1_score
import cv2
def convert_label_to_similarity(normed_feature: Tensor, label: Tensor) -> Tuple[Tensor, Tensor]:
similarity_matrix = normed_feature @ normed_feature.transpose(1, 0)
label_matrix = label.unsqueeze(1) == label.unsqueeze(0)
positive_matrix = label_matrix.triu(diagonal=1)
negative_matrix = label_matrix.logical_not().triu(diagonal=1)
similarity_matrix = similarity_matrix.view(-1)
positive_matrix = positive_matrix.view(-1)
negative_matrix = negative_matrix.view(-1)
return similarity_matrix[positive_matrix], similarity_matrix[negative_matrix]
class CircleLoss(nn.Module):
def __init__(self, m: float, gamma: float) -> None:
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, sp: Tensor, sn: Tensor) -> Tensor:
ap = torch.clamp_min(- sp.detach() + 1 + self.m, min=0.)
an = torch.clamp_min(sn.detach() + self.m, min=0.)
delta_p = 1 - self.m
delta_n = self.m
logit_p = - ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.logsumexp(logit_p, dim=0))
return loss
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='../data/cityscapes',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='cityscapes',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=256,
help="num classes (default: None)")
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
# Train Options
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=10000,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.1,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=10000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: True)')
parser.add_argument("--batch_size", type=int, default=4,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=1,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=512)
parser.add_argument("--ckpt", default="output/final.pth", type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
parser.add_argument("--name", type=str, default='',help="download datasets")
parser.add_argument("--output_dir", type=str, default='output_metric', help="output path")
parser.add_argument("--novel_dir", type=str, default='./novel/', help="novel path")
parser.add_argument("--test_mode", type=str, default='16_3', choices=['16_1','16_3','12','14'],
help="test mode")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
return train_dst, val_dst
def save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, path):
""" save current model
"""
torch.save({
"batch_idx": batch_idx,
"model_state": model.module.state_dict(),
"metric_model": metric_model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
}, path)
print("Model saved as %s" % path)
def get_spilt_center(feature,target,metric_model,label,device):
_, H, W, C = feature.shape
feature = feature.view(H,W,C) # (H*W, M)
target = target.view(H,W) # (H*W)
#feature = feature[target==label] # (N, M)
now_sum = torch.zeros(C,).to(device)
mask = target == label
print(mask.shape)
now_center_embedding=[]
mask = mask.cpu().data.numpy()
mask = mask.astype(np.uint8)
num_object, connect = cv2.connectedComponents(mask)
#novel_sum=0
for k in range(num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_mask = mask[now_connect]
now_mask = now_connect * mask
print(np.sum(now_mask))
if (np.sum(now_mask)<100): continue
print(now_mask.shape)
print(feature.shape)
now_feature=feature[now_mask==1]
print(now_feature.shape)
now_feature=now_feature.view(-1,C)
now_feature=torch.sum(now_feature,dim=0)/np.sum(now_mask)
#now_feature=torch.Tensor(now_feature).to(device)
now_embedding=metric_model.forward_feature(now_feature.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
now_center_embedding.append(now_embedding)
return now_center_embedding
def get_all_center(feature,target,metric_model,label):
_, H, W, C = feature.shape
feature = feature.view(-1,C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==label] # (N, M)
feature = torch.sum(feature, dim=0)
novel_sum = torch.sum(target == label)
now_center = feature / novel_sum
now_center_embedding = metric_model.forward_feature(now_center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return now_center_embedding
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
spilt_list=[]
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=[]
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,x,device)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
#center = center / novel_sum # (M,)
center=np.array(center)
print(center.shape)
'''
random select novel
np.random.seed(333333)
a = np.random.choice(100,1,False)
center=center[a]
print(center.shape)
'''
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
return center_embedding
'''
def generate_novel(novel_path_name, unknown_list, model, metric_model, device):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
center_embedding = {}
with torch.no_grad():
for x in unknown_list: # [13, 14, 15]
print('generate novel: '+str(x))
center=None
novel_dst = Cityscapes_Novel(novel_path=novel_path_name, novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
image = image.to(device)
target = target.to(device,dtype=torch.long)
_,_,feature,_ = model(image)
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
feature = feature.view(-1, C) # (H*W, M)
target = target.flatten() # (H*W)
feature = feature[target==x] # (N, M)
feature = torch.sum(feature, dim=0)
if center is None:
center = torch.zeros(C,).to(device)
center += feature
novel_sum += torch.sum(target == x)
center = center / novel_sum # (M,)
center_embedding[x] = metric_model.forward_feature(center.unsqueeze(dim=0))[0].detach().cpu().numpy() # (128,)
return center_embedding
'''
def cosine_similarity(x,y):
num = x.dot(y.T)
denom = np.linalg.norm(x) * np.linalg.norm(y)
return num / denom
from copy import deepcopy
def concat_logits(logits, thereshold=100, erode=True, tag=None):
if (isinstance(tag,list)):
mask = np.array(tag)
logits = np.transpose(logits)
logits = logits * mask
logits = np.transpose(logits)
logits = (logits >= 0.5).astype(np.uint8)
logits = np.sum(logits,axis=0)
logits[logits>=1]=1
mask = logits == 1
logits = logits.astype(np.uint8)
if (erode == True):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
logits = cv2.dilate(logits, kernel)
logits = cv2.erode(logits, kernel)
#print(logits.shape)
num_object, connect = cv2.connectedComponents(logits)
region_list = []
for k in range(1,num_object):
now_connect = (connect == k)[np.newaxis, ...].astype(np.uint8)
#now_sum = np.sum(now_connect)
#print(now_sum)
if (np.sum(now_connect) < thereshold):
mask[connect == k] = 0
continue
region_list.append(k)
logits = logits * mask
return logits, region_list, connect
def check_novel_logit(opts,model,metric_model,class_no,meta_channel_num,device,beta=0.15):
model.eval()
metric_model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
center_embedding = {}
spilt_list=[]
channel_tag=[0]*meta_channel_num
with torch.no_grad():
print('generate novel: '+str(class_no))
center=[]
novel_dst = Cityscapes_Novel(novel_path=opts.novel_dir, novel_no=class_no, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum = 0
for (image, target) in novel_loader:
assert image.shape[0] == 1
#image, target = novel_transform(image,target)
image = image.to(device)
target = target.to(device,dtype=torch.long)
output,logit,feature,_ = model(image)
output = torch.argmax(output[0], dim=0).detach().cpu().numpy()
mask = target == class_no
target = F.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode='nearest')[:, 0]
#print(target.shape)
#print(mask.shape)
logit = logit[0, (-meta_channel_num):]
#print(logit.shape)
logit = logit * mask
mask = mask.data.cpu().numpy()
all_sum=np.sum(mask)
logit = logit.detach().cpu().numpy()
logit = (logit >= 0.5).astype(np.uint8)
for x in range(logit.shape[0]):
if (np.sum(logit[x])>all_sum*beta): channel_tag[x]=1
#print(logit.shape)
#for x in range(channel_num):
#print(image.shape)
#image= denorm(image.detach().cpu().numpy())[0] * 255
#print(image.shape)
image = (denorm(image.detach().cpu().numpy())[0] * 255).transpose(1, 2, 0).astype(np.uint8)
'''
plt.imshow(image)
plt.show()
plt.close()
_, axarr = plt.subplots(1, logit.shape[0], figsize=(5*logit.shape[0], 5))
for i in range(logit.shape[0]):
now_logit=cv2.resize(logit[i], output.shape[::-1], interpolation=cv2.INTER_NEAREST)
axarr[i].imshow(image)
axarr[i].imshow(now_logit, alpha=0.5)
plt.show()
plt.close()
'''
'''
feature = feature.permute(0, 2, 3, 1) # (1, H, W, M)
_, H, W, C = feature.shape
if (x in spilt_list):
now_center_embedding=get_spilt_center(feature,target,metric_model,label=x)
for now_center in now_center_embedding:
center.append(now_center)
else:
now_center_embedding=get_all_center(feature,target,metric_model,label=x)
center.append(now_center_embedding)
'''
#center = center / novel_sum # (M,)
'''
center=np.array(center)
print(center.shape)
center=np.mean(center,axis=0)
center_embedding[x] = deepcopy(center)
'''
return channel_tag
def val(opts, model, metric_model, train_loader, val_loader, device,):
remain_class = 19 - len(Cityscapes.unknown_target)
metrics16 = StreamSegMetrics(19)
metrics19 = StreamSegMetrics(19, remain_class)
model.eval()
metric_model.eval()
if opts.save_val_results:
if not os.path.exists('results_1'):
os.mkdir('results_1')
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_id = 0
# val_save_dir = os.path.join(opts.output_dir, 'val')
# os.makedirs(val_save_dir, exist_ok=True)
# denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
if (opts.test_mode == '16_1'):
center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
else:
center_embedding = generate_novel(opts.novel_dir, Cityscapes.unknown_target, model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#using when 16+1 setting
#center_embedding = generate_novel(opts.novel_dir, [13], model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
name=['sky','person','rider','car','truck','bus','train','motorcycle','bicycle']
meta_channel_num=20-remain_class
all_tag=[0]*meta_channel_num
if (opts.test_mode == '16_1'):
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
else:
for x in Cityscapes.unknown_target:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
#using when 16+1 setting
'''
for x in [13]:
novel_tag=check_novel_logit(opts, model,metric_model,x, meta_channel_num=meta_channel_num, device=device)
for y in range(meta_channel_num):
if (novel_tag[y]==1): all_tag[y]=1
'''
#all_tag = np.array(all_tag)
print(all_tag)
miou_all=[]
miou_unknown=[]
for _, (images, labels, labels_true, _, _) in tqdm(enumerate(val_loader)):
assert images.shape[0] == 1
with torch.no_grad():
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
known_class = outputs.shape[1]
h,w=outputs.shape[2],outputs.shape[3]
#outputs = logits[:,0:known_class,:,:].clone()
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
metrics16.update(labels[0].detach().cpu().numpy(), outputs)
outputs19 = deepcopy(outputs)
# in 16 + 3 setting and 16 + 1 setting
if ('16' in opts.test_mode):
outputs19[outputs19 == 13] = 16
outputs19[outputs19 == 14] = 17
outputs19[outputs19 == 15] = 18
# in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
if ('14' in opts.test_mode):
outputs19[outputs19 == 13] = 18
outputs19[outputs19 == 12] = 17
outputs19[outputs19 == 11] = 12
outputs19[outputs19 == 10] = 11
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[known_class:] # (3, H/4, W/4)
# concat inference
logits, region, connect = concat_logits(logits, thereshold=250, tag=all_tag)
for k in region:
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.8:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
# default inference
logits = (logits >= 0.5).astype(np.uint8) # (3, H/4, W/4)
for c in range(logits.shape[0]):
logit = logits[c] # (H/4, W/4)
#Hl, Wl = logit.shape
#logit = cv2.resize(logit, (Wl//4, Hl//4), interpolation=cv2.INTER_NEAREST)
num_object, connect = cv2.connectedComponents(logit)
#connect = cv2.resize(connect, (Wl, Hl), interpolation=cv2.INTER_NEAREST)
for k in range(1, num_object+1):
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
if np.sum(mask) < 100: continue
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.75:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
'''
#using in 16+3 setting
if ('16' in opts.test_mode):
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
if (opts.test_mode == '16_1'):
for x in range(17,19):
labels_true[labels_true==x] = 255
# using in 12 + 7 setting 10->12 11,12->10,11
if ('12' in opts.test_mode):
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
labels_true[labels_true==10] = 112
outputs19[outputs19==10] =112
labels_true[labels_true == 11] = 10
outputs19[outputs19==11] = 10
labels_true[labels_true == 12] = 11
outputs19[outputs19 == 12] = 11
labels_true[labels_true==112] -= 100
outputs19[outputs19==112] -= 100
'''
#in 14 + 5 setting unknown_target = [10,13,14,15,16]
# 11 -> 10 12 -> 11 17 -> 12 18 -> 13
# 10 -> 14 ,13 ->15
if ('14' in opts.test_mode):
labels_true[labels_true == 10] = 114
outputs19[outputs19 == 10] = 114
for x in range(13,17):
labels_true[labels_true == x] = 100+2+x
outputs19[outputs19 == x] = 100+2+x
for x in range(11,13):
labels_true[labels_true == x] = x-1
outputs19[outputs19 == x] = x-1
for x in range(17,19):
labels_true[labels_true == x] = x-5
outputs19[outputs19 == x] = x-5
for x in range(114,119):
labels_true[labels_true == x] -=100
outputs19[outputs19 == x] -=100
metrics19.update(labels_true[0].detach().cpu().numpy(), outputs19)
'''
for x in range(13,16):
labels_true[labels_true==x]+=103
outputs19[outputs19==x]+=103
labels_true[labels_true==(x+3)]-=3
outputs19[outputs19==(x+3)]-=3
for x in range(116,119):
labels_true[labels_true==x]-=100
outputs19[outputs19==x]-=100
'''
'''
now_all_IoU = metrics19.get_results()['Mean IoU']
now_unkown_IoU = metrics19.get_results()['Unknown IoU']
miou_all.append(now_all_IoU)
miou_unknown.append(now_unkown_IoU)
metrics19.reset()
'''
#print(labels_true.shape)
#print(outputs19.shape)
if opts.save_val_results:
assert images.shape[0] == 1
target = labels_true[0].detach().cpu().numpy()
image = images[0].detach().cpu().numpy()
pred = outputs19
#pred = pred.reshape(h,w)
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
target = train_loader.dataset.decode_target(target).astype(np.uint8)
pred = train_loader.dataset.decode_target(pred).astype(np.uint8)
#scores = (255 * scores).squeeze().astype(np.uint8)
Image.fromarray(image).save('results_1/%d_image.png' % img_id)
Image.fromarray(target).save('results_1/%d_target.png' % img_id)
Image.fromarray(pred).save('results_1/%d_pred.png' % img_id)
#Image.fromarray(scores).save('results/%d_scores.png' % img_id)
# np.save('results/%d_dis_sum.npy' % img_id, dis_sum_map
img_id += 1
score16 = metrics16.get_results()
score19 = metrics19.get_results()
now_IoU = score19['Unknown IoU']
print('16 classes')
print(metrics16.to_str(score16))
print()
print('19 classes')
print(metrics19.to_str(score19))
'''
for x in range(0,100):
print(x,miou_all[x],miou_unknown[x])
'''
return now_IoU
def train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print):
ce_criterion = nn.CrossEntropyLoss().to(device)
model.eval()
metric_model.train()
epoch_records = {'f1': []}
cur_itr = 0
best_IoU = 0
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
#val_save_dir = os.path.join(opts.output_dir, 'val')
#os.makedirs(val_save_dir, exist_ok=True)
while True:
for batch_idx, (images, labels, labels_true, labels_lst, class_lst) in enumerate(train_loader):
images = images.to(device, dtype=torch.float32)
labels_lst = labels_lst.to(device, dtype=torch.long)
class_lst = class_lst.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
labels = labels.to(device, dtype=torch.long)
_, _, features, _ = model(images)
labels_lst = F.interpolate(labels_lst.float(), size=features.shape[-2:], mode='nearest')
new_features, new_labels, logits = metric_model(features, labels_lst)
cir_loss = criterion(*convert_label_to_similarity(new_features, new_labels)) * 0.1
ce_loss = ce_criterion(logits, new_labels.long())
loss = {
'loss': cir_loss + ce_loss,
'cir_loss': cir_loss,
'ce_loss': ce_loss,
}
for key, value in loss.items():
if key not in epoch_records:
epoch_records[key] = []
epoch_records[key].append(value.item())
optimizer.zero_grad()
loss['loss'].backward()
optimizer.step()
f1 = f1_score(new_labels.detach().cpu().numpy(),
torch.argmax(logits, dim=1).detach().cpu().numpy(),
average='macro')
epoch_records['f1'].append(f1)
if batch_idx % 100 == 0:
context = f"Iters {cur_itr}\t"
for key, value in epoch_records.items():
context += f"{key}: {np.mean(value):.4f}\t"
printer(context)
epoch_records = {'f1': []}
if cur_itr and cur_itr % 1000 == 0:
now_IoU = val(opts, model, metric_model, train_loader, val_loader, device)
if (now_IoU > best_IoU):
best_IoU = now_IoU
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'best.pth'))
print('best IoU :'+str(best_IoU))
model.eval()
metric_model.train()
cur_itr += 1
if cur_itr >= opts.total_itrs:
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'final.pth'))
val(opts, model, metric_model, train_loader, val_loader, device)
return epoch_records
scheduler.step()
save_ckpt(batch_idx, model, metric_model, optimizer, scheduler, os.path.join(opts.output_dir, f'{cur_itr}.pth'))
from dropblock import DropBlock2D
class MetricModel(nn.Module):
def __init__(self, known_class):
super().__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 128))
self.classifier = nn.Linear(128, known_class, bias=False)
self.known_class = known_class
self.dropblock = DropBlock2D(block_size=3, drop_prob=0.3)
def forward(self, feature, label_lst):
# feature: (B, 256, H, W)
# label_lst: (B, 17, H, W)
label_lst = label_lst[:, :self.known_class]
new_feature, new_label = [], []
for _ in range(self.known_class):
tmp_label_lst = self.dropblock(label_lst) # (B, 16, H, W)
for c in range(tmp_label_lst.shape[1]):
tmp_feature = (feature * tmp_label_lst[:, c:c+1, :, :]).view(feature.shape[0], feature.shape[1], -1) # (B, 256, H*W)
tmp_feature = tmp_feature.sum(dim=-1) # (B, 256)
tmp_num = tmp_label_lst[:, c:c+1, :, :].view(tmp_label_lst.shape[0], -1) # (B, H*W)
tmp_num = tmp_num.sum(dim=-1) # (B,)
keep_ind = tmp_num != 0
if keep_ind.shape[0]:
tmp_feature = tmp_feature[keep_ind]
tmp_num = tmp_num[keep_ind]
tmp_feature = tmp_feature / tmp_num.unsqueeze(dim=1) # (B, 256)
new_feature.append(tmp_feature)
new_label.append(torch.ones(tmp_feature.shape[0])*c)
new_feature = torch.cat(new_feature, dim=0) # (N, 256)
new_feature = self.model(new_feature) # (N, 128)
new_label = torch.cat(new_label, dim=0).to(feature.device) # (N,)
logit = self.classifier(new_feature) # (N, 16)
return F.normalize(new_feature), new_label.long(), logit
def forward_feature(self, feature):
# feature: (1, 256)
new_feature = self.model(feature) # (1, 128)
return F.normalize(new_feature)
def main():
print(torch.version.cuda)
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=8)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=8)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
unknown_num = len(train_dst.unknown_target)
remain_class = opts.num_classes - unknown_num
opts.num_classes = remain_class
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride, metric_dim=opts.metric_dim)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# # Set up metrics
# metrics = StreamSegMetrics(opts.num_classes)
#criterion = MyDiceLoss(ignore_index=255).to(device)
criterion = CircleLoss(m=0.25, gamma=8.0).to(device)
utils.mkdir(opts.output_dir)
# Restore
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
res = model.load_state_dict(checkpoint["model_state"])
print(res)
model = nn.DataParallel(model)
model.to(device)
# if opts.continue_training:
# optimizer.load_state_dict(checkpoint["optimizer_state"])
# scheduler.load_state_dict(checkpoint["scheduler_state"])
# print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
model = nn.DataParallel(model)
model.to(device)
for _, param in model.named_parameters():
param.requires_grad = False
metric_model = MetricModel(remain_class).to(device)
optimizer = torch.optim.SGD(metric_model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
if (opts.test_only):
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
metric_model.load_state_dict(checkpoint["metric_model"])
val(opts, model, metric_model, train_loader, val_loader, device)
return
#res = model.load_state_dict(checkpoint["model_state"])
print(res)
#model = nn.DataParallel(model)
#model.to(device)
train(opts, model, metric_model, train_loader, val_loader, criterion, optimizer, scheduler, device, printer=print)
if __name__ == '__main__':
main()
| 40,855 | 43.408696 | 152 | py |
RAML | RAML-master/incremental/test_metric.py | from datasets.cityscapes_novel import Cityscapes_Novel
from tqdm import tqdm
import network
import utils
import os
import random
import argparse
import numpy as np
import torch.nn.functional as F
from torch.utils import data
from datasets import VOCSegmentation, Cityscapes, Cityscapes_Novel
from utils import ext_transforms as et
from metrics import StreamSegMetrics
from collections import namedtuple
from utils import colorEncode
import torch
import torch.nn as nn
from utils.visualizer import Visualizer
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import sklearn.metrics as Metrics
from sklearn.mixture import GaussianMixture
from statsmodels.distributions.empirical_distribution import ECDF
import joblib
import json
from sklearn import manifold
import queue
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([255, 255, 255])
colors = np.array(train_id_to_color)
colors = np.uint8(colors)
from dropblock import DropBlock2D
class MetricModel(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 128))
self.classifier = nn.Linear(128, 10, bias=False)
self.dropblock = DropBlock2D(block_size=3, drop_prob=0.3)
def forward(self, feature, label_lst):
# feature: (B, 256, H, W)
# label_lst: (B, 17, H, W)
label_lst = label_lst[:, :10]
new_feature, new_label = [], []
for _ in range(10):
tmp_label_lst = self.dropblock(label_lst) # (B, 16, H, W)
for c in range(tmp_label_lst.shape[1]):
tmp_feature = (feature * tmp_label_lst[:, c:c+1, :, :]).view(feature.shape[0], feature.shape[1], -1) # (B, 256, H*W)
tmp_feature = tmp_feature.sum(dim=-1) # (B, 256)
tmp_num = tmp_label_lst[:, c:c+1, :, :].view(tmp_label_lst.shape[0], -1) # (B, H*W)
tmp_num = tmp_num.sum(dim=-1) # (B,)
keep_ind = tmp_num != 0
if keep_ind.shape[0]:
tmp_feature = tmp_feature[keep_ind]
tmp_num = tmp_num[keep_ind]
tmp_feature = tmp_feature / tmp_num.unsqueeze(dim=1) # (B, 256)
new_feature.append(tmp_feature)
new_label.append(torch.ones(tmp_feature.shape[0])*c)
new_feature = torch.cat(new_feature, dim=0) # (N, 256)
new_feature = self.model(new_feature) # (N, 128)
new_label = torch.cat(new_label, dim=0).to(feature.device) # (N,)
logit = self.classifier(new_feature) # (N, 16)
return F.normalize(new_feature), new_label.long(), logit
def forward_feature(self, feature):
# feature: (1, 256)
new_feature = self.model(feature) # (1, 128)
return F.normalize(new_feature)
def get_argparser():
parser = argparse.ArgumentParser()
# Datset Options
parser.add_argument("--data_root", type=str, default='./datasets/data',
help="path to Dataset")
parser.add_argument("--dataset", type=str, default='voc',
choices=['voc', 'cityscapes'], help='Name of dataset')
parser.add_argument("--num_classes", type=int, default=None,
help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model", type=str, default='deeplabv3plus_metirc_resnet101',
choices=['deeplabv3_resnet50', 'deeplabv3plus_resnet50',
'deeplabv3_resnet101', 'deeplabv3plus_resnet101',
'deeplabv3_mobilenet', 'deeplabv3plus_mobilenet',
'deeplabv3plus_embedding_resnet101','deeplabv3plus_metirc_resnet101'], help='model name')
parser.add_argument("--separable_conv", action='store_true', default=False,
help="apply separable conv to decoder and aspp")
parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16])
parser.add_argument("--metric_dim", type=int, default=None,
help="num classes (default: None)")
# Train Options
parser.add_argument("--test_only", action='store_true', default=False)
parser.add_argument("--save_val_results", action='store_true', default=False,
help="save segmentation results to \"./results\"")
parser.add_argument("--total_itrs", type=int, default=30e3,
help="epoch number (default: 30k)")
parser.add_argument("--lr", type=float, default=0.01,
help="learning rate (default: 0.01)")
parser.add_argument("--lr_policy", type=str, default='poly', choices=['poly', 'step'],
help="learning rate scheduler policy")
parser.add_argument("--step_size", type=int, default=10000)
parser.add_argument("--crop_val", action='store_true', default=False,
help='crop validation (default: False)')
parser.add_argument("--batch_size", type=int, default=16,
help='batch size (default: 16)')
parser.add_argument("--val_batch_size", type=int, default=1,
help='batch size for validation (default: 4)')
parser.add_argument("--crop_size", type=int, default=513)
parser.add_argument("--center", action='store_true', default=False,
help="use center checkpoint")
parser.add_argument("--center_checkpoint", type=str, default='./center.npy',
help="use center checkpoint")
parser.add_argument("--ckpt", default=None, type=str,
help="restore from checkpoint")
parser.add_argument("--continue_training", action='store_true', default=False)
parser.add_argument("--loss_type", type=str, default='cross_entropy',
choices=['cross_entropy', 'focal_loss'], help="loss type (default: False)")
parser.add_argument("--gpu_id", type=str, default='0',
help="GPU ID")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument("--random_seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--print_interval", type=int, default=10,
help="print interval of loss (default: 10)")
parser.add_argument("--val_interval", type=int, default=100,
help="epoch interval for eval (default: 100)")
parser.add_argument("--download", action='store_true', default=False,
help="download datasets")
# PASCAL VOC Options
parser.add_argument("--year", type=str, default='2012',
choices=['2012_aug', '2012', '2011', '2009', '2008', '2007'], help='year of VOC')
# Visdom options
parser.add_argument("--enable_vis", action='store_true', default=False,
help="use visdom for visualization")
parser.add_argument("--vis_port", type=str, default='13570',
help='port for visdom')
parser.add_argument("--vis_env", type=str, default='main',
help='env for visdom')
parser.add_argument("--vis_num_samples", type=int, default=8,
help='number of samples for visualization (default: 8)')
return parser
def Normalization(x):
min_value = np.min(x)
max_value = np.max(x)
return (x - min_value) / (max_value - min_value)
def Certainty(x, ecdf, thre1, thre2, mean, cov):
x = ecdf(x)
# res = x
# res[res>0.2] = 1
threshold = ecdf(thre1)
coefficient = 50
res = 1 / (1 + np.exp(-coefficient * (x - threshold)))
return res
def get_dataset(opts):
""" Dataset And Augmentation
"""
if opts.dataset == 'voc':
train_transform = et.ExtCompose([
#et.ExtResize(size=opts.crop_size),
et.ExtRandomScale((0.5, 2.0)),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if opts.crop_val:
val_transform = et.ExtCompose([
et.ExtResize(opts.crop_size),
et.ExtCenterCrop(opts.crop_size),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
else:
val_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='train', download=opts.download, transform=train_transform)
val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
image_set='val', download=False, transform=val_transform)
if opts.dataset == 'cityscapes':
train_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
et.ExtRandomHorizontalFlip(),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
val_transform = et.ExtCompose([
#et.ExtResize( 512 ),
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
train_dst = Cityscapes(root=opts.data_root,
split='train', transform=train_transform)
val_dst = Cityscapes(root=opts.data_root,
split='val', transform=val_transform)
novel_dst = Cityscapes(root=opts.data_root,
split='train', transform=val_transform)
return train_dst, val_dst, novel_dst
def Coefficient_map(x, thre):
lamda = 20
return 1 / (1 + np.exp(lamda * (x - thre)))
def val(opts, model, metric_model, train_loader, val_loader, device):
metrics16 = StreamSegMetrics(19)
metrics19 = StreamSegMetrics(19)
model.eval()
metric_model.eval()
# val_save_dir = os.path.join(opts.output_dir, 'val')
# os.makedirs(val_save_dir, exist_ok=True)
# denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
center_embedding = generate_novel('novel', Cityscapes.unknown_target, model, metric_model, device) # {13: (128,), 14: (128,), 15: (128,)}
#center_embedding = align_embedding(opts, model, metric_model, train_loader, device, center_embedding)
for _, (images, labels, labels_true, _, _) in tqdm(enumerate(val_loader)):
assert images.shape[0] == 1
with torch.no_grad():
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
labels_true = labels_true.to(device, dtype=torch.long)
outputs, logits, features, _ = model(images) # outputs: (1, 16, H, W), logits: (1, 20, H, W), features: (1, 256, H/4, W/4)
logits = F.interpolate(logits, size=features.shape[-2:], mode='bilinear', align_corners=False) # (1, 20, H/4, W/4)
features = features[0].detach().cpu().numpy() # (256, H/4, W/4)
outputs = torch.argmax(outputs, dim=1)[0].detach().cpu().numpy() # (H, W)
metrics16.update(labels[0].detach().cpu().numpy(), outputs)
outputs19 = deepcopy(outputs)
#outputs19[outputs19 == 13] = 16
#outputs19[outputs19 == 14] = 17
#outputs19[outputs19 == 15] = 18
logits = logits[0].detach().cpu().numpy() # (20, H/4, W/4)
logits = logits[-9:] # (3, H/4, W/4)
logits = (logits >= 0.5).astype(np.uint8) # (3, H/4, W/4)
for c in range(logits.shape[0]):
logit = logits[c] # (H/4, W/4)
#Hl, Wl = logit.shape
#logit = cv2.resize(logit, (Wl//4, Hl//4), interpolation=cv2.INTER_NEAREST)
num_object, connect = cv2.connectedComponents(logit)
#connect = cv2.resize(connect, (Wl, Hl), interpolation=cv2.INTER_NEAREST)
for k in range(1, num_object+1):
mask = (connect == k)[np.newaxis, ...].astype(np.uint8) # (1, H/4, W/4)
if np.sum(mask) < 100: continue
embedding = (features * mask).reshape(features.shape[0], -1).sum(axis=-1) # (256,)
embedding = embedding / np.sum(mask)
embedding = torch.Tensor(embedding).unsqueeze(dim=0).to(device, dtype=torch.float32) # (1, 256)
embedding = metric_model.forward_feature(embedding)[0].cpu().detach().numpy() # (128,)
tmp_key, tmp_cos = None, None
for key, value in center_embedding.items():
cos = cosine_similarity(embedding, value)
if cos >= 0.75:
if tmp_cos is None or cos > tmp_cos:
tmp_key = key
tmp_cos = cos
if tmp_key is not None:
mask = cv2.resize(mask[0], outputs19.shape[::-1], interpolation=cv2.INTER_NEAREST)
outputs19 = mask * tmp_key + outputs19 * (1 - mask)
metrics19.update(labels_true[0].detach().cpu().numpy(), outputs19)
score16 = metrics16.get_results()
score19 = metrics19.get_results()
print('16 classes')
print(metrics16.to_str(score16))
print()
print('19 classes')
print(metrics19.to_str(score19))
def select_novel_each_target(novel_loader, unknown_target, device, save_path, shot_num=5):
print('select novel '+str(unknown_target))
now_path=os.path.join(save_path,str(unknown_target))
if (os.path.exists(now_path)==False):
os.makedirs(now_path)
file_path=os.path.join(now_path,'novel.txt')
f = open(file_path,'a',encoding = "utf-8")
q = queue.PriorityQueue()
for (images, labels, labels_true, image_name, target_name) in novel_loader:
labels_true=labels_true.to(device, dtype=torch.long)
now_sum=torch.sum(labels_true==unknown_target).data.cpu()
q.put([now_sum,(image_name,target_name)])
if (q.qsize()>shot_num): q.get()
assert q.qsize()==shot_num
while q.empty()==False:
now_sum,now_name=q.get()
image_name="".join(now_name[0])
target_name="".join(now_name[1])
f.write(image_name+'\t'+target_name+'\n')
f.close()
def select_novel(novel_loader, unknown_list, device, save_path='./novel', shot_num=5):
if (os.path.exists(save_path)==False):
os.makedirs(save_path)
for x in unknown_list:
select_novel_each_target(novel_loader,x,device,save_path, shot_num)
def generate_novel(novel_all, novel_path_name, unknown_list, model, device, shot_num=5):
model.eval()
novel_transform = et.ExtCompose([
et.ExtToTensor(),
et.ExtNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
with torch.no_grad():
for x in unknown_list:
print('generate novel: '+str(x))
log_path=os.path.join(novel_path_name,str(x))
center=None
novel_dst = Cityscapes_Novel(novel_path=novel_path_name,novel_no=x, transform=novel_transform)
novel_loader = data.DataLoader(novel_dst, batch_size=1, shuffle=False, num_workers=4)
novel_sum=0
for (image,target) in novel_loader:
print(image.max(), image.min(), '--------------')
image=image.to(device)
target=target.to(device,dtype=torch.long)
print(image.shape)
output,feature=model(image)
if target.shape[-1] != feature.shape[-1]:
target = torch.nn.functional.interpolate(target.unsqueeze(dim=1).float(), size=feature.shape[-2:], mode="nearest").squeeze(dim=1)
feature=feature.permute(0, 2, 3, 1)
b,h,w,c=feature.shape
feature=feature.view(h*w,c)
target=target.flatten()
print(target.shape)
print(feature.shape)
# for c in range(19):
# if c in target:
# temp=feature[target==c]
# print(c, np.round(np.mean(temp.detach().cpu().numpy(), axis=0), 2))
feature=feature[target==x]
feature=torch.sum(feature,dim=0)
if (center==None): center=torch.zeros(c,).to(device)
center+=feature
novel_sum+=torch.sum(target==x)
center=center/novel_sum
center_path=os.path.join(log_path,'novel.pth')
print(center.shape)
torch.save(center,center_path)
novel_all[x]=center.clone()
return novel_all
# def get_novel(center, num_classes, unknown_list):
# novel = torch.empty((num_classes,center.shape[1]))
# n=0
# x=0
# while (n<num_classes):
# if n in unknown_list:
# n+=1
# continue
# novel[n]=center[x].clone()
# x+=1
# n+=1
# return novel
def main():
opts = get_argparser().parse_args()
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
# Setup visualization
vis = Visualizer(port=opts.vis_port,
env=opts.vis_env) if opts.enable_vis else None
if vis is not None: # display options
vis.vis_table("Options", vars(opts))
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
# Setup random seed
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
# Setup dataloader
if opts.dataset=='voc' and not opts.crop_val:
opts.val_batch_size = 1
train_dst, val_dst, novel_dst = get_dataset(opts)
train_loader = data.DataLoader(
train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=16)
val_loader = data.DataLoader(
val_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=16)
novel_loader = data.DataLoader(
novel_dst, batch_size=opts.val_batch_size, shuffle=False, num_workers=16)
print("Dataset: %s, Train set: %d, Val set: %d" %
(opts.dataset, len(train_dst), len(val_dst)))
# Set up model
model_map = {
'deeplabv3_resnet50': network.deeplabv3_resnet50,
'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
'deeplabv3_resnet101': network.deeplabv3_resnet101,
'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
'deeplabv3plus_embedding_resnet101': network.deeplabv3plus_embedding_resnet101,
'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet,
'deeplabv3plus_metirc_resnet101': network.deeplabv3plus_metirc_resnet101
}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
# Set up metrics
metrics = StreamSegMetrics(opts.num_classes)
# Set up optimizer
optimizer = torch.optim.SGD(params=[
{'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
{'params': model.classifier.parameters(), 'lr': opts.lr},
], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
#optimizer = torch.optim.SGD(params=model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
#torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.lr_decay_step, gamma=opts.lr_decay_factor)
if opts.lr_policy=='poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy=='step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
# Set up criterion
#criterion = utils.get_loss(opts.loss_type)
if opts.loss_type == 'focal_loss':
criterion = utils.FocalLoss(ignore_index=255, size_average=True)
elif opts.loss_type == 'cross_entropy':
criterion = utils.CrossEntropyLoss(ignore_index=255, alpha=0.01, beta=0.01/80, gamma=0)
# def save_ckpt(path):
# """ save current model
# """
# torch.save({
# "cur_itrs": cur_itrs,
# "model_state": model.module.state_dict(),
# "optimizer_state": optimizer.state_dict(),
# "scheduler_state": scheduler.state_dict(),
# "best_score": best_score,
# }, path)
# print("Model saved as %s" % path)
utils.mkdir('checkpoints_131415_embedding')
# Restore
# best_score = 0.0
# cur_itrs = 0
# cur_epochs = 0
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
# https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
res = model.load_state_dict(checkpoint["model_state"])
print(res)
#model = nn.DataParallel(model)
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
cur_itrs = checkpoint["cur_itrs"]
best_score = checkpoint['best_score']
print("Training state restored from %s" % opts.ckpt)
print("Model restored from %s" % opts.ckpt)
del checkpoint # free memory
else:
print("[!] Retrain")
opts.gpu_id = [1]
# model = nn.DataParallel(model,device_ids=opts.gpu_id)
#model = nn.DataParallel(model)
model = model.cuda()
#========== Train Loop ==========#
vis_sample_id = np.random.randint(0, len(val_loader), opts.vis_num_samples,
np.int32) if opts.enable_vis else None # sample idxs for visualization
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # denormalization for ori images
#print(model)
# if (opts.center):
# center=torch.load(opts.center_checkpoint)
# print(center.shape, opts.num_classes, train_dst.unknown_target, '++++++++++')
#novel=get_novel(center,opts.num_classes,train_dst.unknown_target)
novel=np.load(opts.center_checkpoint)
novel=torch.from_numpy(novel)
# novel=torch.load('center.pth')
# novel=torch.cat([novel[:13], torch.zeros((3, novel.shape[1])).float().to(novel.device), novel[13:]], dim=0)
novel=novel.to(device)
print(novel.shape)
#select_novel(novel_loader,train_dst.unknown_target,device)
novel=generate_novel(novel,'./novel',Cityscapes.unknown_target,model,device,shot_num=5)
novel=torch.relu(novel)
for i in range(novel.shape[0]):
print(i, novel[i].detach().cpu().numpy())
novel=novel.to(device)
print(novel.shape)
# for i in range(novel.shape[0]):
# print(i, np.round(novel[i].detach().cpu().numpy(), 2))
# return
print('eval mode')
model.eval()
val_score, ret_samples = validate(
opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, novel=novel, ret_samples_ids=vis_sample_id)
print(metrics.to_str(val_score))
return
# if opts.test_only:
# model.eval()
# val_score, ret_samples = validate(
# opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
# print(metrics.to_str(val_score))
# return
# interval_loss = 0
# while True: #cur_itrs < opts.total_itrs:
# # ===== Train =====
# model.train()
# cur_epochs += 1
# for (images, labels, labels_true) in train_loader:
# cur_itrs += 1
# images = images.to(device, dtype=torch.float32)
# labels = labels.to(device, dtype=torch.long)
# optimizer.zero_grad()
# outputs, centers, features = model(images)
# loss = criterion(outputs, labels, features)
# loss.backward()
# optimizer.step()
# np_loss = loss.detach().cpu().numpy()
# interval_loss += np_loss
# if vis is not None:
# vis.vis_scalar('Loss', cur_itrs, np_loss)
# if (cur_itrs) % 10 == 0:
# interval_loss = interval_loss/10
# print("Epoch %d, Itrs %d/%d, Loss=%f" %
# (cur_epochs, cur_itrs, opts.total_itrs, interval_loss))
# interval_loss = 0.0
# if (cur_itrs) % opts.val_interval == 0:
# save_ckpt('checkpoints_131415_embedding/latest_%s_%s_os%d.pth' %
# (opts.model, opts.dataset, opts.output_stride))
# print("validation...")
# model.eval()
# val_score, ret_samples = validate(
# opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
# print(metrics.to_str(val_score))
# if val_score['Mean IoU'] > best_score: # save best model
# best_score = val_score['Mean IoU']
# save_ckpt('checkpoints_131415_embedding/best_%s_%s_os%d.pth' %
# (opts.model, opts.dataset,opts.output_stride))
# if vis is not None: # visualize validation score and samples
# vis.vis_scalar("[Val] Overall Acc", cur_itrs, val_score['Overall Acc'])
# vis.vis_scalar("[Val] Mean IoU", cur_itrs, val_score['Mean IoU'])
# vis.vis_table("[Val] Class IoU", val_score['Class IoU'])
# for k, (img, target, lbl) in enumerate(ret_samples):
# img = (denorm(img) * 255).astype(np.uint8)
# target = train_dst.decode_target(target).transpose(2, 0, 1).astype(np.uint8)
# lbl = train_dst.decode_target(lbl).transpose(2, 0, 1).astype(np.uint8)
# concat_img = np.concatenate((img, target, lbl), axis=2) # concat along width
# vis.vis_image('Sample %d' % k, concat_img)
# model.train()
# scheduler.step()
# if cur_itrs >= opts.total_itrs:
# return
if __name__ == '__main__':
main()
| 31,049 | 46.40458 | 153 | py |
RAML | RAML-master/incremental/metrics/stream_metrics.py | import numpy as np
from sklearn.metrics import confusion_matrix
class _StreamMetrics(object):
def __init__(self):
""" Overridden by subclasses """
raise NotImplementedError()
def update(self, gt, pred):
""" Overridden by subclasses """
raise NotImplementedError()
def get_results(self):
""" Overridden by subclasses """
raise NotImplementedError()
def to_str(self, metrics):
""" Overridden by subclasses """
raise NotImplementedError()
def reset(self):
""" Overridden by subclasses """
raise NotImplementedError()
class StreamSegMetrics(_StreamMetrics):
"""
Stream Metrics for Semantic Segmentation Task
"""
def __init__(self, n_classes, known_class=None):
self.n_classes = 19
self.confusion_matrix = np.zeros((n_classes, n_classes))
self.known_class = known_class
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist( lt.flatten(), lp.flatten() )
@staticmethod
def to_str(results):
string = "\n"
for k, v in results.items():
if k!="Class IoU":
string += "%s: %f\n"%(k, v)
#string+='Class IoU:\n'
#for k, v in results['Class IoU'].items():
# string += "\tclass %d: %f\n"%(k, v)
return string
def _fast_hist(self, label_true, label_pred):
mask = (label_true >= 0) & (label_true < self.n_classes)
hist = np.bincount(
self.n_classes * label_true[mask].astype(int) + label_pred[mask],
minlength=self.n_classes ** 2,
).reshape(self.n_classes, self.n_classes)
return hist
def get_results(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
print(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
if (self.known_class == None):
return {
"Overall Acc": acc,
"Mean Acc": acc_cls,
"FreqW Acc": fwavacc,
"Mean IoU": mean_iu,
"Class IoU": cls_iu,
}
else:
known_iu = iu[0:self.known_class]
unknown_iu = iu[self.known_class:]
known_mean_iu = np.nanmean(known_iu)
unknown_mean_iu =np.nanmean(unknown_iu)
return {
"Overall Acc": acc,
"Mean Acc": acc_cls,
"FreqW Acc": fwavacc,
"Mean IoU": mean_iu,
"Class IoU": cls_iu,
"Known IoU": known_mean_iu,
"Unknown IoU": unknown_mean_iu,
}
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
class AverageMeter(object):
"""Computes average values"""
def __init__(self):
self.book = dict()
def reset_all(self):
self.book.clear()
def reset(self, id):
item = self.book.get(id, None)
if item is not None:
item[0] = 0
item[1] = 0
def update(self, id, val):
record = self.book.get(id, None)
if record is None:
self.book[id] = [val, 1]
else:
record[0]+=val
record[1]+=1
def get_results(self, id):
record = self.book.get(id, None)
assert record is not None
return record[0] / record[1]
| 3,982 | 30.611111 | 82 | py |
RAML | RAML-master/incremental/metrics/__init__.py | from .stream_metrics import StreamSegMetrics, AverageMeter
| 60 | 19.333333 | 58 | py |
RAML | RAML-master/incremental/datasets/voc.py | import os
import sys
import tarfile
import collections
import torch.utils.data as data
import shutil
import numpy as np
from PIL import Image
from torchvision.datasets.utils import download_url, check_integrity
DATASET_YEAR_DICT = {
'2012': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '6cd6e144f989b92b3379bac3b3de84fd',
'base_dir': 'VOCdevkit/VOC2012'
},
'2011': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar',
'filename': 'VOCtrainval_25-May-2011.tar',
'md5': '6c3384ef61512963050cb5d687e5bf1e',
'base_dir': 'TrainVal/VOCdevkit/VOC2011'
},
'2010': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar',
'filename': 'VOCtrainval_03-May-2010.tar',
'md5': 'da459979d0c395079b5c75ee67908abb',
'base_dir': 'VOCdevkit/VOC2010'
},
'2009': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar',
'filename': 'VOCtrainval_11-May-2009.tar',
'md5': '59065e4b188729180974ef6572f6a212',
'base_dir': 'VOCdevkit/VOC2009'
},
'2008': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar',
'filename': 'VOCtrainval_11-May-2012.tar',
'md5': '2629fa636546599198acfcfbfcf1904a',
'base_dir': 'VOCdevkit/VOC2008'
},
'2007': {
'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
'filename': 'VOCtrainval_06-Nov-2007.tar',
'md5': 'c52e279531787c972589f7e41ab4ae64',
'base_dir': 'VOCdevkit/VOC2007'
}
}
def voc_cmap(N=256, normalized=False):
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap/255 if normalized else cmap
return cmap
class VOCSegmentation(data.Dataset):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset.
Args:
root (string): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years 2007 to 2012.
image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
cmap = voc_cmap()
def __init__(self,
root,
year='2012',
image_set='train',
download=False,
transform=None):
is_aug=False
if year=='2012_aug':
is_aug = True
year = '2012'
self.root = os.path.expanduser(root)
self.year = year
self.url = DATASET_YEAR_DICT[year]['url']
self.filename = DATASET_YEAR_DICT[year]['filename']
self.md5 = DATASET_YEAR_DICT[year]['md5']
self.transform = transform
self.image_set = image_set
base_dir = DATASET_YEAR_DICT[year]['base_dir']
voc_root = os.path.join(self.root, base_dir)
image_dir = os.path.join(voc_root, 'JPEGImages')
if download:
download_extract(self.url, self.root, self.filename, self.md5)
if not os.path.isdir(voc_root):
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if is_aug and image_set=='train':
mask_dir = os.path.join(voc_root, 'SegmentationClassAug')
assert os.path.exists(mask_dir), "SegmentationClassAug not found, please refer to README.md and prepare it manually"
split_f = os.path.join( self.root, 'train_aug.txt')#'./datasets/data/train_aug.txt'
else:
mask_dir = os.path.join(voc_root, 'SegmentationClass')
splits_dir = os.path.join(voc_root, 'ImageSets/Segmentation')
split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')
if not os.path.exists(split_f):
raise ValueError(
'Wrong image_set entered! Please use image_set="train" '
'or image_set="trainval" or image_set="val"')
with open(os.path.join(split_f), "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
self.masks = [os.path.join(mask_dir, x + ".png") for x in file_names]
assert (len(self.images) == len(self.masks))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
img = Image.open(self.images[index]).convert('RGB')
target = Image.open(self.masks[index])
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.images)
@classmethod
def decode_target(cls, mask):
"""decode semantic mask to RGB image"""
return cls.cmap[mask]
def download_extract(url, root, filename, md5):
download_url(url, root, filename, md5)
with tarfile.open(os.path.join(root, filename), "r") as tar:
tar.extractall(path=root) | 6,061 | 36.190184 | 128 | py |
RAML | RAML-master/incremental/datasets/cityscapes.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
import cv2
class Cityscapes(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
# 12+7
unknown_target = [10,13,14,15,16,17,18]
# 14+5
# unknown_target = [10,13,14,15,16]
# 18+1
#unknown_target = [13]
# 16+3 / 16+1
#unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, root, split='train', mode='fine', target_type='semantic', transform=None):
self.root = os.path.expanduser(root)
self.mode = 'gtFine'
self.target_type = target_type
self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
self.targets_dir = os.path.join(self.root, self.mode, split)
# self.targets_dir = self.images_dir
self.transform = transform
self.split = split
self.images = []
self.targets = []
if split not in ['train', 'test_car', 'val','test_truck', 'test_bus', 'test_car_1_shot',
'test_truck_1_shot', 'test_bus_1_shot', 'car_vis', 'bus_vis','demo_video',
'car_100','car_1000']:
raise ValueError('Invalid split for mode! Please use split="train", split="test"'
' or split="val"')
if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
' specified "split" and "mode" are inside the "root" directory')
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
files_name = os.listdir(img_dir)
files_name = sorted(files_name)
for file_name in files_name:
self.images.append(os.path.join(img_dir, file_name))
target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
self._get_target_suffix(self.mode, self.target_type))
self.targets.append(os.path.join(target_dir, target_name))
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
target_true = target.copy()
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
if cls.unknown_target != None:
cont = 0
for h_c in cls.unknown_target:
target[target == h_c - cont] = 100
for c in range(h_c - cont + 1, 19):
target[target == c] = c - 1
# target_true[target_true == c] = c - 1
cont = cont + 1
# target_true[target == 100] = 19 - len(cls.unknown_target)
target[target == 100] = 255
return target, target_true
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target, target_true = self.encode_target(target)
target_lst, class_lst = self.encode_target_czifan(target)
return image, target, target_true, target_lst, class_lst
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode)
def encode_target_czifan(self, target, output_size=16):
known_class = 19 - len(Cityscapes.unknown_target)
target_lst = np.zeros((known_class + 1, *target.shape))
class_lst = np.ones(known_class + 1) * 255
for c in range(known_class):
target_lst[c] = (target == c)
class_lst[c] = c
return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# target_lst = np.zeros((output_size**2, *target.shape))
# class_lst = np.ones(output_size**2) * 255
# for t in np.unique(target):
# tmp = np.where(target == t)
# gy, gx = int(np.mean(tmp[0])/32), int(np.mean(tmp[1])/32)
# target_lst[gy*output_size+gx,...] = (target == t)
# class_lst[gy*output_size+gx] = t
# return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# temp = cv2.resize(target.astype(np.uint8), (output_size, output_size), interpolation=cv2.INTER_LINEAR).reshape(-1)
# #temp = torch.nn.functional.interpolate(target.clone().unsqueeze(dim=1).float(), size=[output_size, output_size], mode="nearest").view(-1)
# target_lst, class_lst = [], []
# for t in temp:
# if t == 255:
# target_lst.append(np.zeros_like(target))
# else:
# target_lst.append(target == t)
# class_lst.append(t.item())
# target_lst = np.stack(target_lst, axis=0).astype(np.uint8) # (256, 512, 512)
# class_lst = np.asarray(class_lst).astype(np.uint8) # (256,)
# return target_lst, class_lst
| 11,663 | 51.540541 | 168 | py |
RAML | RAML-master/incremental/datasets/utils.py | import os
import os.path
import hashlib
import errno
from tqdm import tqdm
def gen_bar_updater(pbar):
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def check_integrity(fpath, md5=None):
if md5 is None:
return True
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str): Name to save the file under. If None, use the basename of the URL
md5 (str): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))
)
except OSError:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater(tqdm(unit='B', unit_scale=True))
)
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files | 3,804 | 29.198413 | 93 | py |
RAML | RAML-master/incremental/datasets/cityscapes_novel.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
class Cityscapes_Novel(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, novel_path, novel_no, novel_name='novel.txt', transform=None):
self.root=os.path.join(novel_path,str(novel_no))
self.root=os.path.join(self.root,novel_name)
self.transform=transform
file = open(self.root,'r').readlines()
self.images=[]
self.targets=[]
for line in file:
lines=line.strip('\n').split('\t')
self.images.append(lines[0])
self.targets.append(lines[1])
# self.targets = self.images
# print(self.images)
# print(self.images[10])
# print(self.images[102])
# print(self.images[107])
# print(self.images[197])
# print(self.images[200])
# print(self.images[207])
# print(self.images[474])
# print(self.images[486])
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
return target
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target = self.encode_target(target)
# unloader = transforms.ToPILImage()
#
# plt.figure()
# plt.imshow(unloader(image.cpu().clone()))
# plt.show()
#
# plt.figure()
# plt.imshow(target)
# plt.show()
#
# plt.figure()
# plt.imshow(target_true)
# plt.show()
#
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
# instance, counts = np.unique(target_true, False, False, True)
# print('true', instance, counts)
# return image
return image, target
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode) | 8,742 | 48.39548 | 168 | py |
RAML | RAML-master/incremental/datasets/__init__.py | from .voc import VOCSegmentation
from .cityscapes import Cityscapes
from .cityscapes_novel import Cityscapes_Novel | 114 | 37.333333 | 46 | py |
RAML | RAML-master/incremental/datasets/.ipynb_checkpoints/cityscapes-checkpoint.py | import json
import os
from collections import namedtuple
from matplotlib import set_loglevel
import torch
import torch.utils.data as data
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
import cv2
class Cityscapes(data.Dataset):
"""Cityscapes <http://www.cityscapes-dataset.com/> Dataset.
**Parameters:**
- **root** (string): Root directory of dataset where directory 'leftImg8bit' and 'gtFine' or 'gtCoarse' are located.
- **split** (string, optional): The image split to use, 'train', 'test' or 'val' if mode="gtFine" otherwise 'train', 'train_extra' or 'val'
- **mode** (string, optional): The quality mode to use, 'gtFine' or 'gtCoarse' or 'color'. Can also be a list to output a tuple with all specified target types.
- **transform** (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
- **target_transform** (callable, optional): A function/transform that takes in the target and transforms it.
"""
# Based on https://github.com/mcordts/cityscapesScripts
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id',
'has_instances', 'ignore_in_eval', 'color'])
classes = [
CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)),
CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)),
CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)),
CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)),
CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)),
CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)),
CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)),
CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)),
CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)),
CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)),
CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)),
CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)),
CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)),
CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)),
CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)),
CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)),
CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)),
CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)),
CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)),
CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)),
CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)),
CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)),
CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)),
CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)),
CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)),
CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)),
CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)),
CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)),
CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)),
CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)),
CityscapesClass('license plate', -1, 255, 'vehicle', 7, False, True, (0, 0, 142)),
]
train_id_to_color = [c.color for c in classes if (c.train_id != -1 and c.train_id != 255)]
train_id_to_color.append([0, 0, 0])
train_id_to_color = np.array(train_id_to_color)
id_to_train_id = np.array([c.train_id for c in classes])
unknown_target = None
# unknown_target = [1, 3, 4, 5, 6, 7, 8, 9, 12, 14, 15, 16, 18]
# 12+7
unknown_target = [10,13,14,15,16,17,18]
# 14+5
# unknown_target = [10,13,14,15,16]
# 18+1
#unknown_target = [13]
# 16+3 / 16+1
#unknown_target = [13,14,15]
# unknown_target = [i for i in range(19)]
# unknown_target.pop(13)
print('unknown_target is : ', unknown_target)
# unknown_target = [18]
#train_id_to_color = [(0, 0, 0), (128, 64, 128), (70, 70, 70), (153, 153, 153), (107, 142, 35),
# (70, 130, 180), (220, 20, 60), (0, 0, 142)]
#train_id_to_color = np.array(train_id_to_color)
#id_to_train_id = np.array([c.category_id for c in classes], dtype='uint8') - 1
def __init__(self, root, split='train', mode='fine', target_type='semantic', transform=None):
self.root = os.path.expanduser(root)
self.mode = 'gtFine'
self.target_type = target_type
self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
self.targets_dir = os.path.join(self.root, self.mode, split)
# self.targets_dir = self.images_dir
self.transform = transform
self.split = split
self.images = []
self.targets = []
if split not in ['train', 'test_car', 'val','test_truck', 'test_bus', 'test_car_1_shot',
'test_truck_1_shot', 'test_bus_1_shot', 'car_vis', 'bus_vis','demo_video',
'car_100','car_1000']:
raise ValueError('Invalid split for mode! Please use split="train", split="test"'
' or split="val"')
if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir):
raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the'
' specified "split" and "mode" are inside the "root" directory')
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
files_name = os.listdir(img_dir)
files_name = sorted(files_name)
for file_name in files_name:
self.images.append(os.path.join(img_dir, file_name))
target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0],
self._get_target_suffix(self.mode, self.target_type))
self.targets.append(os.path.join(target_dir, target_name))
@classmethod
def encode_target(cls, target):
target = cls.id_to_train_id[np.array(target)]
target_true = target.copy()
# instance, counts = np.unique(target, False, False, True)
# print('target', instance, counts)
if cls.unknown_target != None:
cont = 0
for h_c in cls.unknown_target:
target[target == h_c - cont] = 100
for c in range(h_c - cont + 1, 19):
target[target == c] = c - 1
# target_true[target_true == c] = c - 1
cont = cont + 1
# target_true[target == 100] = 19 - len(cls.unknown_target)
target[target == 100] = 255
return target, target_true
@classmethod
def decode_target(cls, target):
target[target == 255] = 19
#target = target.astype('uint8') + 1
return cls.train_id_to_color[target]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a tuple of all target types if target_type is a list with more
than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation.
"""
image = Image.open(self.images[index]).convert('RGB')
# image = Image.open(self.images[index])
target = Image.open(self.targets[index])
if self.transform:
image, target = self.transform(image, target)
target, target_true = self.encode_target(target)
target_lst, class_lst = self.encode_target_czifan(target)
return image, target, target_true, target_lst, class_lst
def __len__(self):
return len(self.images)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if target_type == 'instance':
return '{}_instanceIds.png'.format(mode)
elif target_type == 'semantic':
return '{}_labelIds.png'.format(mode)
elif target_type == 'color':
return '{}_color.png'.format(mode)
elif target_type == 'polygon':
return '{}_polygons.json'.format(mode)
elif target_type == 'depth':
return '{}_disparity.png'.format(mode)
def encode_target_czifan(self, target, output_size=16):
known_class = 19 - len(Cityscapes.unknown_target)
target_lst = np.zeros((known_class + 1, *target.shape))
class_lst = np.ones(known_class + 1) * 255
for c in range(known_class):
target_lst[c] = (target == c)
class_lst[c] = c
return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# target_lst = np.zeros((output_size**2, *target.shape))
# class_lst = np.ones(output_size**2) * 255
# for t in np.unique(target):
# tmp = np.where(target == t)
# gy, gx = int(np.mean(tmp[0])/32), int(np.mean(tmp[1])/32)
# target_lst[gy*output_size+gx,...] = (target == t)
# class_lst[gy*output_size+gx] = t
# return target_lst.astype(np.uint8), class_lst.astype(np.uint8)
# temp = cv2.resize(target.astype(np.uint8), (output_size, output_size), interpolation=cv2.INTER_LINEAR).reshape(-1)
# #temp = torch.nn.functional.interpolate(target.clone().unsqueeze(dim=1).float(), size=[output_size, output_size], mode="nearest").view(-1)
# target_lst, class_lst = [], []
# for t in temp:
# if t == 255:
# target_lst.append(np.zeros_like(target))
# else:
# target_lst.append(target == t)
# class_lst.append(t.item())
# target_lst = np.stack(target_lst, axis=0).astype(np.uint8) # (256, 512, 512)
# class_lst = np.asarray(class_lst).astype(np.uint8) # (256,)
# return target_lst, class_lst
| 11,663 | 51.540541 | 168 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.