repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ReBATE | ReBATE-master/rebate/setup_multisurf.py | """
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
exts = [Extension("multisurf", ["rebate/MultiSURF.pyx"],)]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = exts,
)
| 1,332 | 42 | 74 | py |
ReBATE | ReBATE-master/tests/tests_continuous_endpoint.py |
"""
ReBATE was primarily developed at the University of Pennsylvania by:
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Initialize hardcoded argument version of rebate.py
import rebate.IO as io
import rebate.Common as cmn
import rebate.relieff as R
import rebate.surf as S
import rebate.multisurf as MS
import time as tm
import sys
import os
###############################################################################
#Setup Options ################################################
options = dict()
options['filename'] = 'data/GAMETES_Epistasis_2-Way_continuous_endpoint_a_20s_1600her_0.4__maf_0.2_EDM-2_01.txt'
options['basename'] = 'GAMETES_Epistasis_2-Way_continuous_endpoint_a_20s_1600her_0.4__maf_0.2_EDM-2_01.txt'
options['dir_path'] = 'data'
options['testdata'] = None
options['phenotypename'] = "Class"
options['discretelimit'] = 10
options['neighbors'] = 10
options['missingdata'] = 'NA'
options['algorithm'] = 'relieff'
options['turfpct'] = '0'
options['verbose'] = False
options['debug'] = True
options['topattr'] = 0
options['outputdir'] = '.'
#########################################
#Below is just a copy of the required code from rebate.py#########################
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
#-----------------------------------------------------------------------------#
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data_tst(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#############################################################################
def test_relieff_GWAS_Sim():
""" Test ReliefF on GWAS_Sim Continuous Endpoint"""
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
print("ReliefF + GWAS_Sim CE")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surf_GWAS_Sim():
""" Test SURF on GWAS_Sim Continuous Endpoint"""
#New parameters
options['algorithm'] = 'surf'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF + GWAS_Sim CE ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surfstar_GWAS_Sim():
""" Test SURF* on GWAS_Sim Continuous Endpoint"""
#New parameters
options['algorithm'] = 'surfstar'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF* + GWAS_Sim CE ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurfstar_GWAS_Sim():
""" Test MultiSURF* on GWAS_Sim Continuous Endpoint"""
#New parameters
options['algorithm'] = 'multisurfstar'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF* + GWAS_Sim CE ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_GWAS_Sim():
""" Test MultiSURF on GWAS_Sim Continuous Endpoint"""
#New parameters
options['algorithm'] = 'multisurf'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF + GWAS_Sim CE ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
| 9,736 | 41.334783 | 141 | py |
ReBATE | ReBATE-master/tests/tests_missing_data.py |
"""
ReBATE was primarily developed at the University of Pennsylvania by:
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Initialize hardcoded argument version of rebate.py
import rebate.IO as io
import rebate.Common as cmn
import rebate.relieff as R
import rebate.surf as S
import rebate.multisurf as MS
import time as tm
import sys
import os
###############################################################################
#Setup Options ################################################
options = dict()
options['filename'] = 'data/GAMETES_Epistasis_2-Way_missing_values_0.1_a_20s_1600her_0.4__maf_0.2_EDM-2_01.txt'
options['basename'] = 'GAMETES_Epistasis_2-Way_missing_values_0.1_a_20s_1600her_0.4__maf_0.2_EDM-2_01.txt'
options['dir_path'] = 'data'
options['testdata'] = None
options['phenotypename'] = "Class"
options['discretelimit'] = 10
options['neighbors'] = 10
options['missingdata'] = 'NA'
options['algorithm'] = 'relieff'
options['turfpct'] = '0'
options['verbose'] = False
options['debug'] = True
options['topattr'] = 0
options['outputdir'] = '.'
#########################################
#Below is just a copy of the required code from rebate.py#########################
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
#-----------------------------------------------------------------------------#
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data_tst(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import rebate.mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#############################################################################
def test_relieff_GWAS_Sim():
""" Test ReliefF on GWAS_Sim Missing Data """
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
print("ReliefF + GWAS_Sim MD ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surf_GWAS_Sim():
""" Test SURF on GWAS_Sim Missing Data"""
#New parameters
options['algorithm'] = 'surf'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF + GWAS_Sim MD ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surfstar_GWAS_Sim():
""" Test SURF* on GWAS_Sim Missing Data"""
#New parameters
options['algorithm'] = 'surfstar'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF* + GWAS_Sim MD ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurfstar_GWAS_Sim():
""" Test MultiSURF* on GWAS_Sim Missing Data"""
#New parameters
options['algorithm'] = 'multisurfstar'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF* + GWAS_Sim MD ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_GWAS_Sim():
""" Test MultiSURF on GWAS_Sim Missing Data"""
#New parameters
options['algorithm'] = 'multisurf'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF + GWAS_Sim MD ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
| 9,708 | 41.213043 | 141 | py |
ReBATE | ReBATE-master/tests/tests_mixed_features.py |
"""
ReBATE was primarily developed at the University of Pennsylvania by:
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Initialize hardcoded argument version of rebate.py
import rebate.IO as io
import rebate.Common as cmn
import rebate.relieff as R
import rebate.surf as S
import rebate.multisurf as MS
import time as tm
import sys
import os
###############################################################################
#Setup Options ################################################
options = dict()
options['filename'] = 'data/GAMETES_Epistasis_2-Way_mixed_attribute_a_20s_1600her_0.4__maf_0.2_EDM-2_01.txt'
options['basename'] = 'GAMETES_Epistasis_2-Way_mixed_attribute_a_20s_1600her_0.4__maf_0.2_EDM-2_01.txt'
options['dir_path'] = 'data'
options['testdata'] = None
options['phenotypename'] = "Class"
options['discretelimit'] = 10
options['neighbors'] = 10
options['missingdata'] = 'NA'
options['algorithm'] = 'relieff'
options['turfpct'] = '0'
options['verbose'] = False
options['debug'] = True
options['topattr'] = 0
options['outputdir'] = '.'
#########################################
#Below is just a copy of the required code from rebate.py#########################
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
#-----------------------------------------------------------------------------#
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data_tst(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#############################################################################
def test_relieff_GWAS_Sim():
""" Test ReliefF on GWAS_Sim Mixed Features (Discrete and Continuous)"""
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
print("ReliefF + GWAS_Sim MF")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surf_GWAS_Sim():
""" Test SURF on GWAS_Sim Mixed Features (Discrete and Continuous)"""
#New parameters
options['algorithm'] = 'surf'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF + GWAS_Sim MF")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surfstar_GWAS_Sim():
""" Test SURF* on GWAS_Sim Mixed Features (Discrete and Continuous)"""
#New parameters
options['algorithm'] = 'surfstar'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF* + GWAS_Sim MF")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurfstar_GWAS_Sim():
""" Test MultiSURF* on GWAS_Sim Mixed Features (Discrete and Continuous)"""
#New parameters
options['algorithm'] = 'multisurfstar'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF* + GWAS_Sim MF")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_GWAS_Sim():
""" Test MultiSURF on GWAS_Sim Mixed Features (Discrete and Continuous)"""
#New parameters
options['algorithm'] = 'multisurf'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF + GWAS_Sim MF")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
| 9,829 | 41.73913 | 141 | py |
ReBATE | ReBATE-master/tests/tests_gwas_sim.py |
"""
ReBATE was primarily developed at the University of Pennsylvania by:
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Initialize hardcoded argument version of rebate.py
import rebate.IO as io
import rebate.Common as cmn
import rebate.relieff as R
import rebate.surf as S
import rebate.multisurf as MS
import rebate.Turf as T
import time as tm
import sys
import os
###############################################################################
#Setup Options ################################################
options = dict()
options['filename'] = 'data/GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1.txt'
options['basename'] = 'GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1.txt'
options['dir_path'] = 'data'
options['testdata'] = None
options['phenotypename'] = "Class"
options['discretelimit'] = 10
options['neighbors'] = 10
options['missingdata'] = 'NA'
options['algorithm'] = 'relieff'
options['turfpct'] = '0'
options['verbose'] = False
options['debug'] = True
options['topattr'] = 0
options['outputdir'] = '.'
#########################################
#Below is just a copy of the required code from rebate.py#########################
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
#-----------------------------------------------------------------------------#
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data_tst(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#############################################################################
def test_relieff_GWAS_Sim():
""" Test ReliefF on GWAS_Sim """
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
print("ReliefF + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surf_GWAS_Sim():
""" Test SURF on GWAS_Sim """
#New parameters
options['algorithm'] = 'surf'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surfstar_GWAS_Sim():
""" Test SURF* on GWAS_Sim """
#New parameters
options['algorithm'] = 'surfstar'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF* + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurfstar_GWAS_Sim():
""" Test MultiSURF* on GWAS_Sim """
#New parameters
options['algorithm'] = 'multisurfstar'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF* + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_GWAS_Sim():
""" Test MultiSURF on GWAS_Sim """
#New parameters
options['algorithm'] = 'multisurf'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_turf_GWAS_Sim():
""" Test MultiSURF with TuRF on GWAS_Sim """
#New parameters
options['algorithm'] = 'multisurf'
options['turfpct'] = '50'
turfpct = int(options['turfpct'])
pct = float(turfpct)/100.0
iterations = int(1/float(pct))
fun = MS.runMultiSURF
tempx = None
tempVar = None
tempfullscores = None
templost = None
temptable = None
Scores,tempx,tempVar,templost,temptable = T.runTurf(header,x,y,attr,var,distArray,pct,iterations,fun,options,cmn)
options['algorithm'] = algorithm + "-turf"
print("MultiSURF with TuRF + 6-bit Multiplexer ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 10 #6-bit Multiplexer problem
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 8 or indexTopScore == 9
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 8
################################# | 10,841 | 40.224335 | 141 | py |
ReBATE | ReBATE-master/tests/tests_6_bit_multiplexer.py |
"""
ReBATE was primarily developed at the University of Pennsylvania by:
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Initialize hardcoded argument version of rebate.py
import rebate.IO as io
import rebate.Common as cmn
import rebate.relieff as R
import rebate.surf as S
import rebate.multisurf as MS
import time as tm
import sys
import os
#import warnings
#warnings.filterwarnings('ignore')
###############################################################################
#Setup Options ################################################
options = dict()
options['filename'] = 'data/6Multiplexer_Data_500_0.txt'
options['basename'] = '6Multiplexer_Data_500_0.txt'
options['dir_path'] = 'data'
options['testdata'] = None
options['phenotypename'] = "Class"
options['discretelimit'] = 10
options['neighbors'] = 10
options['missingdata'] = 'NA'
options['algorithm'] = 'relieff'
options['turfpct'] = '0'
options['verbose'] = False
options['debug'] = True
options['topattr'] = 0
options['outputdir'] = '.'
#########################################
#Below is just a copy of the required code from rebate.py#########################
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
#-----------------------------------------------------------------------------#
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#############################################################################
###################################################################################################################################################
def test_relieff_Multiplexer():
""" Test ReliefF on 6-bit Multiplexer"""
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
print("ReliefF + 6-bit Multiplexer ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 6 #6-bit Multiplexer problem
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0 or indexTopScore == 1
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0
def test_surf_Multiplexer():
""" Test SURF on 6-bit Multiplexer"""
#New parameters
options['algorithm'] = 'surf'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF + 6-bit Multiplexer ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 6 #6-bit Multiplexer problem
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0 or indexTopScore == 1
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0
def test_surfstar_Multiplexer():
""" Test SURF* on 6-bit Multiplexer """
#New parameters
options['algorithm'] = 'surfstar'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF* + 6-bit Multiplexer ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 6 #6-bit Multiplexer problem
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0 or indexTopScore == 1
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0
def test_multisurfstar_Multiplexer():
""" Test MultiSURF* on 6-bit Multiplexer """
#New parameters
options['algorithm'] = 'multisurfstar'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF* + 6-bit Multiplexer ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 6 #6-bit Multiplexer problem
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0 or indexTopScore == 1
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0
def test_multisurf_Multiplexer():
""" Test MultiSURF on 6-bit Multiplexer """
#New parameters
options['algorithm'] = 'multisurf'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF + 6-bit Multiplexer ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 6 #6-bit Multiplexer problem
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0 or indexTopScore == 1
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 0
##################################################################################################################
| 9,930 | 40.902954 | 147 | py |
ReBATE | ReBATE-master/tests/tests_multiclass.py |
"""
ReBATE was primarily developed at the University of Pennsylvania by:
- Pete Schmitt ([email protected])
- Ryan J. Urbanowicz ([email protected])
- Weixuan Fu ([email protected])
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Initialize hardcoded argument version of rebate.py
import rebate.IO as io
import rebate.Common as cmn
import rebate.relieff as R
import rebate.surf as S
import rebate.multisurf as MS
import time as tm
import sys
import os
###############################################################################
#Setup Options ################################################
options = dict()
options['filename'] = 'data/3Class_Datasets_Loc_2_01.txt'
options['basename'] = '3Class_Datasets_Loc_2_01.txt'
options['dir_path'] = 'data'
options['testdata'] = None
options['phenotypename'] = "Class"
options['discretelimit'] = 10
options['neighbors'] = 10
options['missingdata'] = 'NA'
options['algorithm'] = 'relieff'
options['turfpct'] = '0'
options['verbose'] = False
options['debug'] = True
options['topattr'] = 0
options['outputdir'] = '.'
#########################################
#Below is just a copy of the required code from rebate.py#########################
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
#-----------------------------------------------------------------------------#
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data_tst(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#############################################################################
def test_relieff_GWAS_Sim():
""" Test ReliefF on GWAS_Sim Multiclass"""
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
print("ReliefF + GWAS_Sim MC")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surf_GWAS_Sim():
""" Test SURF on GWAS_Sim Multiclass"""
#New parameters
options['algorithm'] = 'surf'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF + GWAS_Sim MC")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surfstar_GWAS_Sim():
""" Test SURF* on GWAS_Sim Multiclass"""
#New parameters
options['algorithm'] = 'surfstar'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF* + GWAS_Sim MC")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurfstar_GWAS_Sim():
""" Test MultiSURF* on GWAS_Sim Multiclass"""
#New parameters
options['algorithm'] = 'multisurfstar'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF* + GWAS_Sim MC")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_GWAS_Sim():
""" Test MultiSURF on GWAS_Sim Multiclass"""
#New parameters
options['algorithm'] = 'multisurf'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF + GWAS_Sim MC")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 | 9,570 | 40.79476 | 141 | py |
iterative_cleaner | iterative_cleaner-master/iterative_cleaner.py | #!/usr/bin/env python
# Tool to remove RFI from pulsar archives.
# Originally written by Patrick Lazarus. Modified by Lars Kuenkel.
from __future__ import print_function
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import scipy.optimize
import argparse
import psrchive
def parse_arguments():
parser = argparse.ArgumentParser(description='Commands for the cleaner')
parser.add_argument('archive', nargs='+', help='The chosen archives')
parser.add_argument('-c', '--chanthresh', type=float, default=5, metavar=('channel_threshold'), help='The threshold (in number of sigmas) a '
'profile needs to stand out compared to '
'others in the same channel for it to '
'be removed.')
parser.add_argument('-s', '--subintthresh', type=float, default=5, metavar=('subint_threshold'), help='The threshold (in number of sigmas) a '
'profile needs to stand out compared to '
'others in the same subint for it to '
'be removed.')
parser.add_argument('-m', '--max_iter', type=int, default=5, metavar=('maximum_iterations'), help='Maximum number of iterations.')
parser.add_argument('-z', '--print_zap', action='store_true', help='Creates a plot that shows which profiles get zapped.')
parser.add_argument('-u', '--unload_res', action='store_true', help='Creates an archive that contains the pulse free residual.')
parser.add_argument('-p', '--pscrunch', action='store_true', help='Pscrunches the output archive.')
parser.add_argument('-q', '--quiet', action='store_true', help='Do not print cleaning information.')
parser.add_argument('-l', '--no_log', action='store_true', help='Do not create cleaning log.')
parser.add_argument('-r', '--pulse_region', nargs=3, type=float, default=[0,0,1],
metavar=('pulse_start', 'pulse_end', 'scaling_factor'), help="Defines the range of the pulse and a suppression factor.")
parser.add_argument('-o', '--output', type=str, default='', metavar=('output_filename'),
help="Name of the output file. If set to 'std' the pattern NAME.FREQ.MJD.ar will be used.")
parser.add_argument('--memory', action='store_true', help='Do not pscrunch the archive while it is in memory.\
Costs RAM but prevents having to reload the archive.')
parser.add_argument('--bad_chan', type=float, default=1, help='Fraction of subints that needs to be removed in order to remove the whole channel.')
parser.add_argument('--bad_subint', type=float, default=1, help='Fraction of channels that needs to be removed in order to remove the whole subint.')
args = parser.parse_args()
return args
def main(args):
for arch in args.archive:
ar = psrchive.Archive_load(arch)
if args.output == '':
orig_name = str(ar).split(':', 1)[1].strip()
o_name = orig_name + '_cleaned.ar'
else:
if args.output == 'std':
mjd = (float(ar.start_time().strtempo()) + float(ar.end_time().strtempo())) / 2.0
name = ar.get_source()
cent_freq = ar.get_centre_frequency()
o_name = "%s.%.3f.%f.ar" % (name, cent_freq, mjd)
else:
o_name = args.output
ar = clean(ar, args, arch)
ar.unload(o_name)
if not args.quiet:
print("Cleaned archive: %s" % o_name)
def clean(ar, args, arch):
orig_weights = ar.get_weights()
if args.memory and not args.pscrunch:
pass
else:
ar.pscrunch()
patient = ar.clone()
ar_name = ar.get_filename().split()[-1]
x = 0
max_iterations = args.max_iter
pulse_region = args.pulse_region
# Create list that is used to end the iteration
test_weights = []
test_weights.append(patient.get_weights())
profile_number = orig_weights.size
if not args.quiet:
print(("Total number of profiles: %s" % profile_number))
while x < max_iterations:
x += 1
if not args.quiet:
print(("Loop: %s" % x))
# Prepare the data for template creation
patient.pscrunch() # pscrunching again is not necessary if already pscrunched but prevents a bug
patient.remove_baseline()
patient.dedisperse()
patient.fscrunch()
patient.tscrunch()
template = patient.get_Profile(0, 0, 0).get_amps() * 10000
# Reset patient
patient = ar.clone()
patient.pscrunch()
patient.remove_baseline()
patient.dedisperse()
remove_profile_inplace(patient, template, pulse_region)
# re-set DM to 0
patient.dededisperse()
if args.unload_res:
residual = patient.clone()
# Get data (select first polarization - recall we already P-scrunched)
data = patient.get_data()[:, 0, :, :]
data = apply_weights(data, orig_weights)
# Mask profiles where weight is 0
mask_2d = np.bitwise_not(np.expand_dims(orig_weights, 2).astype(bool))
mask_3d = mask_2d.repeat(ar.get_nbin(), axis=2)
data = np.ma.masked_array(data, mask=mask_3d)
# RFI-ectomy must be recommended by average of tests
avg_test_results = comprehensive_stats(data, args, axis=2)
# Reset patient and set weights in patient
del patient
patient = ar.clone()
set_weights_archive(patient, avg_test_results)
# Test whether weigths were already used in a previous iteration
new_weights = patient.get_weights()
diff_weigths = np.sum(new_weights != test_weights[-1])
rfi_frac = (new_weights.size - np.count_nonzero(new_weights)) / float(new_weights.size)
# Print the changes to the previous loop to help in choosing a suitable max_iter
if not args.quiet:
print(("Differences to previous weights: %s RFI fraction: %s" %(diff_weigths, rfi_frac)))
for old_weights in test_weights:
if np.all(new_weights == old_weights):
if not args.quiet:
print(("RFI removal stops after %s loops." % x))
loops = x
x = 1000000
test_weights.append(new_weights)
if x == max_iterations:
if not args.quiet:
print(("Cleaning was interrupted after the maximum amount of loops (%s)" % max_iterations))
loops = max_iterations
# Reload archive if it is not supposed to be pscrunched.
if not args.pscrunch and not args.memory:
ar = psrchive.Archive_load(arch)
# Set weights in archive.
set_weights_archive(ar, avg_test_results)
# Test if whole channel or subints should be removed
if args.bad_chan != 1 or args.bad_subint != 1:
ar = find_bad_parts(ar, args)
# Unload residual if needed
if args.unload_res:
residual.unload("%s_residual_%s.ar" % (ar_name, loops))
# Create plot that shows zapped( red) and unzapped( blue) profiles if needed
if args.print_zap:
plt.imshow(avg_test_results.T, vmin=0.999, vmax=1.001, aspect='auto',
interpolation='nearest', cmap=cm.coolwarm)
plt.gca().invert_yaxis()
plt.title("%s cthresh=%s sthresh=%s" % (ar_name, args.chanthresh, args.subintthresh))
plt.savefig("%s_%s_%s.png" % (ar_name, args.chanthresh,
args.subintthresh), bbox_inches='tight')
# Create log that contains the used parameters
if not args.no_log:
with open("clean.log", "a") as myfile:
myfile.write("\n %s: Cleaned %s with %s, required loops=%s"
% (datetime.datetime.now(), ar_name, args, loops))
return ar
def comprehensive_stats(data, args, axis):
"""The comprehensive scaled stats that are used for
the "Surgical Scrub" cleaning strategy.
Inputs:
data: A 3-D numpy array.
axis: The axis that should be used for computing stats.
args: argparse namepsace object that need to contain the
following two parameters:
chanthresh: The threshold (in number of sigmas) a
profile needs to stand out compared to others in the
same channel for it to be removed.
(Default: use value defined in config files)
subintthresh: The threshold (in number of sigmas) a profile
needs to stand out compared to others in the same
sub-int for it to be removed.
(Default: use value defined in config files)
Output:
stats: A 2-D numpy array of stats.
"""
chanthresh = args.chanthresh
subintthresh = args.subintthresh
nsubs, nchans, nbins = data.shape
diagnostic_functions = [
np.ma.std,
np.ma.mean,
np.ma.ptp,
lambda data, axis: np.max(np.abs(np.fft.rfft(
data - np.expand_dims(data.mean(axis=axis), axis=axis),
axis=axis)), axis=axis)
]
# Compute diagnostics
diagnostics = []
for func in diagnostic_functions:
diagnostics.append(func(data, axis=2))
# Now step through data and identify bad profiles
scaled_diagnostics = []
for diag in diagnostics:
chan_scaled = np.abs(channel_scaler(diag)) / chanthresh
subint_scaled = np.abs(subint_scaler(diag)) / subintthresh
scaled_diagnostics.append(np.max((chan_scaled, subint_scaled), axis=0))
test_results = np.median(scaled_diagnostics, axis=0)
return test_results
def channel_scaler(array2d):
"""For each channel scale it.
"""
scaled = np.empty_like(array2d)
nchans = array2d.shape[1]
for ichan in np.arange(nchans):
with np.errstate(invalid='ignore', divide='ignore'):
channel = array2d[:, ichan]
median = np.ma.median(channel)
channel_rescaled = channel - median
mad = np.ma.median(np.abs(channel_rescaled))
scaled[:, ichan] = (channel_rescaled) / mad
return scaled
def subint_scaler(array2d):
"""For each sub-int scale it.
"""
scaled = np.empty_like(array2d)
nsubs = array2d.shape[0]
for isub in np.arange(nsubs):
with np.errstate(invalid='ignore', divide='ignore'):
subint = array2d[isub, :]
median = np.ma.median(subint)
subint_rescaled = subint - median
mad = np.ma.median(np.abs(subint_rescaled))
scaled[isub, :] = (subint_rescaled) / mad
return scaled
def remove_profile_inplace(ar, template, pulse_region):
"""Remove the temnplate pulse from the individual profiles.
"""
data = ar.get_data()[:, 0, :, :] # Select first polarization channel
# archive is P-scrunched, so this is
# total intensity, the only polarization
# channel
for isub, ichan in np.ndindex(ar.get_nsubint(), ar.get_nchan()):
amps = remove_profile1d(data[isub, ichan], isub, ichan, template, pulse_region)[1]
prof = ar.get_Profile(isub, 0, ichan)
if amps is None:
prof.set_weight(0)
else:
prof.get_amps()[:] = amps
def remove_profile1d(prof, isub, ichan, template, pulse_region):
err = lambda amp: amp * template - prof
params, status = scipy.optimize.leastsq(err, [1.0])
err2 = np.asarray(err(params))
if pulse_region != [0, 0, 1]:
p_start = int(pulse_region[1])
p_end = int(pulse_region[2])
err2[p_start:p_end] = err2[p_start:p_end] * pulse_region[0]
if status not in (1, 2, 3, 4):
print("Bad status for least squares fit when removing profile.")
return (isub, ichan), np.zeros_like(prof)
else:
return (isub, ichan), err2
def apply_weights(data, weights):
"""Apply the weigths to an array.
"""
nsubs, nchans, nbins = data.shape
for isub in range(nsubs):
data[isub] = data[isub] * weights[isub, ..., np.newaxis]
return data
def set_weights_archive(archive, test_results):
"""Apply the weigths to an archive according to the test results.
"""
for (isub, ichan) in np.argwhere(test_results >= 1):
integ = archive.get_Integration(int(isub))
integ.set_weight(int(ichan), 0.0)
def find_bad_parts(archive, args):
"""Checks whether whole channels or subints should be removed
"""
weights = archive.get_weights()
n_subints = archive.get_nsubint()
n_channels = archive.get_nchan()
n_bad_channels = 0
n_bad_subints = 0
for i in range(n_subints):
bad_frac = 1 - np.count_nonzero(weights[i, :]) / float(n_channels)
if bad_frac > args.bad_subint:
for j in range(n_channels):
integ = archive.get_Integration(int(i))
integ.set_weight(int(j), 0.0)
n_bad_subints += 1
for j in range(n_channels):
bad_frac = 1 - np.count_nonzero(weights[:, j]) / float(n_subints)
if bad_frac > args.bad_chan:
for i in range(n_subints):
integ = archive.get_Integration(int(i))
integ.set_weight(int(j), 0.0)
n_bad_channels += 1
if not args.quiet and n_bad_channels + n_bad_subints != 0:
print(("Removed %s bad subintegrations and %s bad channels." % (n_bad_subints, n_bad_channels)))
return archive
if __name__ == "__main__":
args = parse_arguments()
main(args)
| 13,854 | 39.630499 | 153 | py |
URLNet | URLNet-master/test.py | from utils import *
import pickle
import time
from tqdm import tqdm
import argparse
import numpy as np
import pickle
import tensorflow as tf
from tensorflow.contrib import learn
from tflearn.data_utils import to_categorical, pad_sequences
parser = argparse.ArgumentParser(description="Test URLNet model")
# data args
default_max_len_words = 200
parser.add_argument('--data.max_len_words', type=int, default=default_max_len_words, metavar="MLW",
help="maximum length of url in words (default: {})".format(default_max_len_words))
default_max_len_chars = 200
parser.add_argument('--data.max_len_chars', type=int, default=default_max_len_chars, metavar="MLC",
help="maximum length of url in characters (default: {})".format(default_max_len_chars))
default_max_len_subwords = 20
parser.add_argument('--data.max_len_subwords', type=int, default=default_max_len_subwords, metavar="MLSW",
help="maxium length of word in subwords/ characters (default: {})".format(default_max_len_subwords))
parser.add_argument('--data.data_dir', type=str, default='train_10000.txt', metavar="DATADIR",
help="location of data file")
default_delimit_mode = 1
parser.add_argument("--data.delimit_mode", type=int, default=default_delimit_mode, metavar="DLMODE",
help="0: delimit by special chars, 1: delimit by special chars + each char as a word (default: {})".format(default_delimit_mode))
parser.add_argument('--data.subword_dict_dir', type=str, default="runs/10000/subwords_dict.p", metavar="SUBWORD_DICT",
help="directory of the subword dictionary")
parser.add_argument('--data.word_dict_dir', type=str, default="runs/10000/words_dict.p", metavar="WORD_DICT",
help="directory of the word dictionary")
parser.add_argument('--data.char_dict_dir', type=str, default="runs/10000/chars_dict.p", metavar=" CHAR_DICT",
help="directory of the character dictionary")
# model args
default_emb_dim = 32
parser.add_argument('--model.emb_dim', type=int, default=default_emb_dim, metavar="EMBDIM",
help="embedding dimension size (default: {})".format(default_emb_dim))
default_emb_mode = 1
parser.add_argument('--model.emb_mode', type=int, default=default_emb_mode, metavar="EMBMODE",
help="1: charCNN, 2: wordCNN, 3: char + wordCNN, 4: char-level wordCNN, 5: char + char-level wordCNN (default: {})".format(default_emb_mode))
# test args
default_batch_size = 128
parser.add_argument('--test.batch_size', type=int, default=default_batch_size, metavar="BATCHSIZE",
help="Size of each test batch (default: {})".format(default_batch_size))
# log args
parser.add_argument('--log.output_dir', type=str, default="runs/10000/", metavar="OUTPUTDIR",
help="directory to save the test results")
parser.add_argument('--log.checkpoint_dir', type=str, default="runs/10000/checkpoints/", metavar="CHECKPOINTDIR",
help="directory of the learned model")
FLAGS = vars(parser.parse_args())
for key, val in FLAGS.items():
print("{}={}".format(key, val))
urls, labels = read_data(FLAGS["data.data_dir"])
x, word_reverse_dict = get_word_vocab(urls, FLAGS["data.max_len_words"])
word_x = get_words(x, word_reverse_dict, FLAGS["data.delimit_mode"], urls)
ngram_dict = pickle.load(open(FLAGS["data.subword_dict_dir"], "rb"))
print("Size of subword vocabulary (train): {}".format(len(ngram_dict)))
word_dict = pickle.load(open(FLAGS["data.word_dict_dir"], "rb"))
print("size of word vocabulary (train): {}".format(len(word_dict)))
ngramed_id_x, worded_id_x = ngram_id_x_from_dict(word_x, FLAGS["data.max_len_subwords"], ngram_dict, word_dict)
chars_dict = pickle.load(open(FLAGS["data.char_dict_dir"], "rb"))
chared_id_x = char_id_x(urls, chars_dict, FLAGS["data.max_len_chars"])
print("Number of testing urls: {}".format(len(labels)))
######################## EVALUATION ###########################
def test_step(x, emb_mode):
p = 1.0
if emb_mode == 1:
feed_dict = {
input_x_char_seq: x[0],
dropout_keep_prob: p}
elif emb_mode == 2:
feed_dict = {
input_x_word: x[0],
dropout_keep_prob: p}
elif emb_mode == 3:
feed_dict = {
input_x_char_seq: x[0],
input_x_word: x[1],
dropout_keep_prob: p}
elif emb_mode == 4:
feed_dict = {
input_x_word: x[0],
input_x_char: x[1],
input_x_char_pad_idx: x[2],
dropout_keep_prob: p}
elif emb_mode == 5:
feed_dict = {
input_x_char_seq: x[0],
input_x_word: x[1],
input_x_char: x[2],
input_x_char_pad_idx: x[3],
dropout_keep_prob: p}
preds, s = sess.run([predictions, scores], feed_dict)
return preds, s
checkpoint_file = tf.train.latest_checkpoint(FLAGS["log.checkpoint_dir"])
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
session_conf.gpu_options.allow_growth=True
sess = tf.Session(config=session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
if FLAGS["model.emb_mode"] in [1, 3, 5]:
input_x_char_seq = graph.get_operation_by_name("input_x_char_seq").outputs[0]
if FLAGS["model.emb_mode"] in [2, 3, 4, 5]:
input_x_word = graph.get_operation_by_name("input_x_word").outputs[0]
if FLAGS["model.emb_mode"] in [4, 5]:
input_x_char = graph.get_operation_by_name("input_x_char").outputs[0]
input_x_char_pad_idx = graph.get_operation_by_name("input_x_char_pad_idx").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
scores = graph.get_operation_by_name("output/scores").outputs[0]
if FLAGS["model.emb_mode"] == 1:
batches = batch_iter(list(chared_id_x), FLAGS["test.batch_size"], 1, shuffle=False)
elif FLAGS["model.emb_mode"] == 2:
batches = batch_iter(list(worded_id_x), FLAGS["test.batch_size"], 1, shuffle=False)
elif FLAGS["model.emb_mode"] == 3:
batches = batch_iter(list(zip(chared_id_x, worded_id_x)), FLAGS["test.batch_size"], 1, shuffle=False)
elif FLAGS["model.emb_mode"] == 4:
batches = batch_iter(list(zip(ngramed_id_x, worded_id_x)), FLAGS["test.batch_size"], 1, shuffle=False)
elif FLAGS["model.emb_mode"] == 5:
batches = batch_iter(list(zip(ngramed_id_x, worded_id_x, chared_id_x)), FLAGS["test.batch_size"], 1, shuffle=False)
all_predictions = []
all_scores = []
nb_batches = int(len(labels) / FLAGS["test.batch_size"])
if len(labels) % FLAGS["test.batch_size"] != 0:
nb_batches += 1
print("Number of batches in total: {}".format(nb_batches))
it = tqdm(range(nb_batches), desc="emb_mode {} delimit_mode {} test_size {}".format(FLAGS["model.emb_mode"], FLAGS["data.delimit_mode"], len(labels)), ncols=0)
for idx in it:
#for batch in batches:
batch = next(batches)
if FLAGS["model.emb_mode"] == 1:
x_char_seq = batch
elif FLAGS["model.emb_mode"] == 2:
x_word = batch
elif FLAGS["model.emb_mode"] == 3:
x_char_seq, x_word = zip(*batch)
elif FLAGS["model.emb_mode"] == 4:
x_char, x_word = zip(*batch)
elif FLAGS["model.emb_mode"] == 5:
x_char, x_word, x_char_seq = zip(*batch)
x_batch = []
if FLAGS["model.emb_mode"] in[1, 3, 5]:
x_char_seq = pad_seq_in_word(x_char_seq, FLAGS["data.max_len_chars"])
x_batch.append(x_char_seq)
if FLAGS["model.emb_mode"] in [2, 3, 4, 5]:
x_word = pad_seq_in_word(x_word, FLAGS["data.max_len_words"])
x_batch.append(x_word)
if FLAGS["model.emb_mode"] in [4, 5]:
x_char, x_char_pad_idx = pad_seq(x_char, FLAGS["data.max_len_words"], FLAGS["data.max_len_subwords"], FLAGS["model.emb_dim"])
x_batch.extend([x_char, x_char_pad_idx])
batch_predictions, batch_scores = test_step(x_batch, FLAGS["model.emb_mode"])
all_predictions = np.concatenate([all_predictions, batch_predictions])
all_scores.extend(batch_scores)
it.set_postfix()
if labels is not None:
correct_preds = float(sum(all_predictions == labels))
print("Accuracy: {}".format(correct_preds/float(len(labels))))
save_test_result(labels, all_predictions, all_scores, FLAGS["log.output_dir"])
| 8,848 | 47.092391 | 167 | py |
URLNet | URLNet-master/auc.py | import numpy as np
import argparse
import pdb
parser = argparse.ArgumentParser(description='Nill')
parser.add_argument('--input_path', default="results/svm_bow_lexical/baseline1/", type=str)
parser.add_argument('--input_file', type=str)
parser.add_argument('--threshold', default=0, type=float)
args = parser.parse_args()
print(args)
threshold = args.threshold
frequence = 0.05
with open(args.input_path + args.input_file) as f:
data = f.readlines()
out_file = args.input_file.split(".")[0] + '.auc'
f = open(args.input_path + out_file, 'w')
total = 0.
negative = 0.
positive = 0.
predictions = []
for line in data[1:]:
arr = line[:-1].split('\t')
idx = int(arr[0])
if idx == 1:
positive += 1
predictions.append([1, float(arr[-1])])
else:
negative += 1
predictions.append([-1, float(arr[-1])])
total = negative + positive
f.write('Total number of instances: ' + str(total) + '\n')
f.write('P: ' + str(positive) + '\n')
f.write('N: ' + str(negative) + '\n')
f.write('-'*30 + '\n')
f.write('Figure\n')
f.write('-'*30 + '\n')
f.write('decision_boundary\tTP\tFP\tTPR\tFPR\n')
TP = 0
FP = 0
TP_0 = 0
FP_0 = 0
AUC = 0.
target_TP = 1
target_FPR = 1e-5
table=[]
table_header = []
predictions.sort(key= lambda x: x[-1])
predictions = predictions[::-1]
for i, pred in enumerate(predictions):
if pred[0] > 0:
TP+= 1
if pred[1] > threshold:
TP_0+= 1
AUC+= FP
else:
FP+= 1
if pred[1] > threshold:
FP_0+= 1
if ((TP > target_TP) or (i == total - 1)):
target_TP = target_TP + frequence*positive
f.write(str(pred[1]) + '\t' + str(TP) + '\t' + str(FP) + '\t')
f.write(str(float(TP)/positive) + '\t' + str(float(FP)/negative) + '\n')
if ((FP > target_FPR*negative) or (i==total-1)):
table_header.append(target_FPR)
table.append(float(TP)/positive)
target_FPR = target_FPR * 10
f.write('-'*30 + '\n')
f.write('Table\n')
f.write('-'*30 + '\n')
f.write('FPR\tTPR\n')
for i,j in zip(table,table_header):
f.write(str(j) + '\t' + str(i) + '\n')
f.write('-'*30 + '\n')
f.write('AUC:\t' + str(1. - (float(AUC)/positive)/negative) + '\n')
f.write('When the decision boundary is set to be 0\n')
f.write('TP:\t' + str(TP_0) + '\n')
f.write('FN:\t' + str(positive - TP_0) + '\n')
f.write('FP:\t' + str(FP_0) + '\n')
f.write('TN:\t' + str(negative - FP_0) + '\n')
f.close()
| 2,436 | 26.382022 | 91 | py |
URLNet | URLNet-master/utils.py | import time
import os
import numpy as np
from collections import defaultdict
from bisect import bisect_left
import tensorflow as tf
from tflearn.data_utils import to_categorical
from tensorflow.contrib import learn
def read_data(file_dir):
with open(file_dir) as file:
urls = []
labels = []
for line in file.readlines():
items = line.split('\t')
label = int(items[0])
if label == 1:
labels.append(1)
else:
labels.append(0)
url = items[1][:-1]
urls.append(url)
return urls, labels
def split_url(line, part):
if line.startswith("http://"):
line=line[7:]
if line.startswith("https://"):
line=line[8:]
if line.startswith("ftp://"):
line=line[6:]
if line.startswith("www."):
line = line[4:]
slash_pos = line.find('/')
if slash_pos > 0 and slash_pos < len(line)-1: # line = "fsdfsdf/sdfsdfsd"
primarydomain = line[:slash_pos]
path_argument = line[slash_pos+1:]
path_argument_tokens = path_argument.split('/')
pathtoken = "/".join(path_argument_tokens[:-1])
last_pathtoken = path_argument_tokens[-1]
if len(path_argument_tokens) > 2 and last_pathtoken == '':
pathtoken = "/".join(path_argument_tokens[:-2])
last_pathtoken = path_argument_tokens[-2]
question_pos = last_pathtoken.find('?')
if question_pos != -1:
argument = last_pathtoken[question_pos+1:]
pathtoken = pathtoken + "/" + last_pathtoken[:question_pos]
else:
argument = ""
pathtoken = pathtoken + "/" + last_pathtoken
last_slash_pos = pathtoken.rfind('/')
sub_dir = pathtoken[:last_slash_pos]
filename = pathtoken[last_slash_pos+1:]
file_last_dot_pos = filename.rfind('.')
if file_last_dot_pos != -1:
file_extension = filename[file_last_dot_pos+1:]
filename = filename[:file_last_dot_pos]
else:
file_extension = ""
elif slash_pos == 0: # line = "/fsdfsdfsdfsdfsd"
primarydomain = line[1:]
pathtoken = ""
argument = ""
sub_dir = ""
filename = ""
file_extension = ""
elif slash_pos == len(line)-1: # line = "fsdfsdfsdfsdfsd/"
primarydomain = line[:-1]
pathtoken = ""
argument = ""
sub_dir = ""
filename = ""
file_extension = ""
else: # line = "fsdfsdfsdfsdfsd"
primarydomain = line
pathtoken = ""
argument = ""
sub_dir = ""
filename = ""
file_extension = ""
if part == 'pd':
return primarydomain
elif part == 'path':
return pathtoken
elif part == 'argument':
return argument
elif part == 'sub_dir':
return sub_dir
elif part == 'filename':
return filename
elif part == 'fe':
return file_extension
elif part == 'others':
if len(argument) > 0:
return pathtoken + '?' + argument
else:
return pathtoken
else:
return primarydomain, pathtoken, argument, sub_dir, filename, file_extension
def get_word_vocab(urls, max_length_words, min_word_freq=0):
vocab_processor = learn.preprocessing.VocabularyProcessor(max_length_words, min_frequency=min_word_freq)
start = time.time()
x = np.array(list(vocab_processor.fit_transform(urls)))
print("Finished build vocabulary and mapping to x in {}".format(time.time() - start))
vocab_dict = vocab_processor.vocabulary_._mapping
reverse_dict = dict(zip(vocab_dict.values(), vocab_dict.keys()))
print("Size of word vocabulary: {}".format(len(reverse_dict)))
return x, reverse_dict
def get_words(x, reverse_dict, delimit_mode, urls=None):
processed_x = []
if delimit_mode == 0:
for url in x:
words = []
for word_id in url:
if word_id != 0:
words.append(reverse_dict[word_id])
else:
break
processed_x.append(words)
elif delimit_mode == 1:
for i in range(x.shape[0]):
word_url = x[i]
raw_url = urls[i]
words = []
for w in range(len(word_url)):
word_id = word_url[w]
if word_id == 0:
words.extend(list(raw_url))
break
else:
word = reverse_dict[word_id]
idx = raw_url.index(word)
special_chars = list(raw_url[0:idx])
words.extend(special_chars)
words.append(word)
raw_url = raw_url[idx+len(word):]
if w == len(word_url) - 1:
words.extend(list(raw_url))
processed_x.append(words)
return processed_x
def get_char_ngrams(ngram_len, word):
word = "<" + word + ">"
chars = list(word)
begin_idx = 0
ngrams = []
while (begin_idx + ngram_len) <= len(chars):
end_idx = begin_idx + ngram_len
ngrams.append("".join(chars[begin_idx:end_idx]))
begin_idx += 1
return ngrams
def char_id_x(urls, char_dict, max_len_chars):
chared_id_x = []
for url in urls:
url = list(url)
url_in_char_id = []
l = min(len(url), max_len_chars)
for i in range(l):
c = url[i]
try:
c_id = char_dict[c]
except KeyError:
c_id = 0
url_in_char_id.append(c_id)
chared_id_x.append(url_in_char_id)
return chared_id_x
def ngram_id_x(word_x, max_len_subwords, high_freq_words=None):
char_ngram_len = 1
all_ngrams = set()
ngramed_x = []
all_words = set()
worded_x = []
counter = 0
for url in word_x:
if counter % 100000 == 0:
print("Processing #url {}".format(counter))
counter += 1
url_in_ngrams = []
url_in_words = []
words = url
for word in words:
ngrams = get_char_ngrams(char_ngram_len, word)
if (len(ngrams) > max_len_subwords) or \
(high_freq_words is not None and len(word)>1 and not is_in(high_freq_words, word)):
all_ngrams.update(ngrams[:max_len_subwords])
url_in_ngrams.append(ngrams[:max_len_subwords])
all_words.add("<UNKNOWN>")
url_in_words.append("<UNKNOWN>")
else:
all_ngrams.update(ngrams)
url_in_ngrams.append(ngrams)
all_words.add(word)
url_in_words.append(word)
ngramed_x.append(url_in_ngrams)
worded_x.append(url_in_words)
all_ngrams = list(all_ngrams)
ngrams_dict = dict()
for i in range(len(all_ngrams)):
ngrams_dict[all_ngrams[i]] = i+1 # ngram id=0 is for padding ngram
print("Size of ngram vocabulary: {}".format(len(ngrams_dict)))
all_words = list(all_words)
words_dict = dict()
for i in range(len(all_words)):
words_dict[all_words[i]] = i+1 #word id=0 for padding word
print("Size of word vocabulary: {}".format(len(words_dict)))
print("Index of <UNKNOWN> word: {}".format(words_dict["<UNKNOWN>"]))
ngramed_id_x = []
for ngramed_url in ngramed_x:
url_in_ngrams = []
for ngramed_word in ngramed_url:
ngram_ids = [ngrams_dict[x] for x in ngramed_word]
url_in_ngrams.append(ngram_ids)
ngramed_id_x.append(url_in_ngrams)
worded_id_x = []
for worded_url in worded_x:
word_ids = [words_dict[x] for x in worded_url]
worded_id_x.append(word_ids)
return ngramed_id_x, ngrams_dict, worded_id_x, words_dict
def ngram_id_x_from_dict(word_x, max_len_subwords, ngram_dict, word_dict = None):
char_ngram_len = 1
print("Index of <UNKNOWN> word: {}".format(word_dict["<UNKNOWN>"]))
ngramed_id_x = []
worded_id_x = []
counter = 0
if word_dict:
word_vocab = sorted(list(word_dict.keys()))
for url in word_x:
if counter % 100000 == 0:
print("Processing url #{}".format(counter))
counter += 1
url_in_ngrams = []
url_in_words = []
words = url
for word in words:
ngrams = get_char_ngrams(char_ngram_len, word)
if len(ngrams) > max_len_subwords:
word = "<UNKNOWN>"
ngrams_id = []
for ngram in ngrams:
if ngram in ngram_dict:
ngrams_id.append(ngram_dict[ngram])
else:
ngrams_id.append(0)
url_in_ngrams.append(ngrams_id)
if is_in(word_vocab, word):
word_id = word_dict[word]
else:
word_id = word_dict["<UNKNOWN>"]
url_in_words.append(word_id)
ngramed_id_x.append(url_in_ngrams)
worded_id_x.append(url_in_words)
return ngramed_id_x, worded_id_x
def bisect_search(a,x):
i = bisect_left(a,x)
if i != len(a) and a[i] == x:
return i
raise ValueError
def is_in(a,x):
i = bisect_left(a,x)
if i != len(a) and a[i] == x:
return True
else:
return False
def prep_train_test(pos_x, neg_x, dev_pct):
np.random.seed(10)
shuffle_indices=np.random.permutation(np.arange(len(pos_x)))
pos_x_shuffled = pos_x[shuffle_indices]
dev_idx = -1 * int(dev_pct * float(len(pos_x)))
pos_train = pos_x_shuffled[:dev_idx]
pos_test = pos_x_shuffled[dev_idx:]
np.random.seed(10)
shuffle_indices=np.random.permutation(np.arange(len(neg_x)))
neg_x_shuffled = neg_x[shuffle_indices]
dev_idx = -1 * int(dev_pct * float(len(neg_x)))
neg_train = neg_x_shuffled[:dev_idx]
neg_test = neg_x_shuffled[dev_idx:]
x_train = np.array(list(pos_train) + list(neg_train))
y_train = len(pos_train)*[1] + len(neg_train)*[0]
x_test = np.array(list(pos_test) + list(neg_test))
y_test = len(pos_test)*[1] + len(neg_test)*[0]
y_train = to_categorical(y_train, nb_classes=2)
y_test = to_categorical(y_test, nb_classes=2)
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffle_indices]
y_train = y_train[shuffle_indices]
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(x_test)))
x_test = x_test[shuffle_indices]
y_test = y_test[shuffle_indices]
print("Train Mal/Ben split: {}/{}".format(len(pos_train), len(neg_train)))
print("Test Mal/Ben split: {}/{}".format(len(pos_test), len(neg_test)))
print("Train/Test split: {}/{}".format(len(y_train), len(y_test)))
print("Train/Test split: {}/{}".format(len(x_train), len(x_test)))
return x_train, y_train, x_test, y_test
def get_ngramed_id_x(x_idxs, ngramed_id_x):
output_ngramed_id_x = []
for idx in x_idxs:
output_ngramed_id_x.append(ngramed_id_x[idx])
return output_ngramed_id_x
def pad_seq(urls, max_d1=0, max_d2=0, embedding_size=128):
if max_d1 == 0 and max_d2 == 0:
for url in urls:
if len(url) > max_d1:
max_d1 = len(url)
for word in url:
if len(word) > max_d2:
max_d2 = len(word)
pad_idx = np.zeros((len(urls), max_d1, max_d2, embedding_size))
pad_urls = np.zeros((len(urls), max_d1, max_d2))
pad_vec = [1 for i in range(embedding_size)]
for d0 in range(len(urls)):
url = urls[d0]
for d1 in range(len(url)):
if d1 < max_d1:
word = url[d1]
for d2 in range(len(word)):
if d2 < max_d2:
pad_urls[d0,d1,d2] = word[d2]
pad_idx[d0,d1,d2] = pad_vec
return pad_urls, pad_idx
def pad_seq_in_word(urls, max_d1=0, embedding_size=128):
if max_d1 == 0:
url_lens = [len(url) for url in urls]
max_d1 = max(url_lens)
pad_urls = np.zeros((len(urls), max_d1))
#pad_idx = np.zeros((len(urls), max_d1, embedding_size))
#pad_vec = [1 for i in range(embedding_size)]
for d0 in range(len(urls)):
url = urls[d0]
for d1 in range(len(url)):
if d1 < max_d1:
pad_urls[d0,d1] = url[d1]
#pad_idx[d0,d1] = pad_vec
return pad_urls
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def batch_iter(data, batch_size, num_epochs, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_idx = batch_num * batch_size
end_idx = min((batch_num+1) * batch_size, data_size)
yield shuffled_data[start_idx:end_idx]
def save_test_result(labels, all_predictions, all_scores, output_dir):
output_labels = []
for i in labels:
if i == 1:
output_labels.append(i)
else:
output_labels.append(-1)
output_preds = []
for i in all_predictions:
if i == 1:
output_preds.append(i)
else:
output_preds.append(-1)
softmax_scores = [softmax(i) for i in all_scores]
with open(output_dir, "w") as file:
output = "label\tpredict\tscore\n"
file.write(output)
for i in range(len(output_labels)):
output = str(int(output_labels[i])) + '\t' + str(int(output_preds[i])) + '\t' + str(softmax_scores[i][1]) + '\n'
file.write(output)
| 14,170 | 34.605528 | 125 | py |
URLNet | URLNet-master/TextCNN.py | import tensorflow as tf
class TextCNN(object):
def __init__(self, char_ngram_vocab_size, word_ngram_vocab_size, char_vocab_size, \
word_seq_len, char_seq_len, embedding_size, l2_reg_lambda=0, \
filter_sizes=[3,4,5,6], mode=0):
if mode == 4 or mode == 5:
self.input_x_char = tf.placeholder(tf.int32, [None, None, None], name="input_x_char")
self.input_x_char_pad_idx = tf.placeholder(tf.float32, [None, None, None, embedding_size], name="input_x_char_pad_idx")
if mode == 4 or mode == 5 or mode == 2 or mode == 3:
self.input_x_word = tf.placeholder(tf.int32, [None, None], name="input_x_word")
if mode == 1 or mode == 3 or mode == 5:
self.input_x_char_seq = tf.placeholder(tf.int32, [None, None], name="input_x_char_seq")
self.input_y = tf.placeholder(tf.float32, [None, 2], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
l2_loss = tf.constant(0.0)
with tf.name_scope("embedding"):
if mode == 4 or mode == 5:
self.char_w = tf.Variable(tf.random_uniform([char_ngram_vocab_size, embedding_size], -1.0, 1.0), name="char_emb_w")
if mode == 2 or mode == 3 or mode == 4 or mode == 5:
self.word_w = tf.Variable(tf.random_uniform([word_ngram_vocab_size, embedding_size], -1.0, 1.0), name="word_emb_w")
if mode == 1 or mode == 3 or mode == 5:
self.char_seq_w = tf.Variable(tf.random_uniform([char_vocab_size, embedding_size], -1.0, 1.0), name="char_seq_emb_w")
if mode == 4 or mode == 5:
self.embedded_x_char = tf.nn.embedding_lookup(self.char_w, self.input_x_char)
self.embedded_x_char = tf.multiply(self.embedded_x_char, self.input_x_char_pad_idx)
if mode == 2 or mode == 3 or mode == 4 or mode == 5:
self.embedded_x_word = tf.nn.embedding_lookup(self.word_w, self.input_x_word)
if mode == 1 or mode == 3 or mode == 5:
self.embedded_x_char_seq = tf.nn.embedding_lookup(self.char_seq_w, self.input_x_char_seq)
if mode == 4 or mode == 5:
self.sum_ngram_x_char = tf.reduce_sum(self.embedded_x_char, 2)
self.sum_ngram_x = tf.add(self.sum_ngram_x_char, self.embedded_x_word)
if mode == 4 or mode == 5:
self.sum_ngram_x_expanded = tf.expand_dims(self.sum_ngram_x, -1)
if mode == 2 or mode == 3:
self.sum_ngram_x_expanded = tf.expand_dims(self.embedded_x_word, -1)
if mode == 1 or mode == 3 or mode == 5:
self.char_x_expanded = tf.expand_dims(self.embedded_x_char_seq, -1)
########################### WORD CONVOLUTION LAYER ################################
if mode == 2 or mode == 3 or mode == 4 or mode == 5:
pooled_x = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv_maxpool_%s" % filter_size):
filter_shape = [filter_size, embedding_size, 1, 256]
b = tf.Variable(tf.constant(0.1, shape=[256]), name="b")
w = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="w")
conv = tf.nn.conv2d(
self.sum_ngram_x_expanded,
w,
strides = [1,1,1,1],
padding = "VALID",
name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv,b), name="relu")
pooled = tf.nn.max_pool(
h,
ksize=[1, word_seq_len - filter_size + 1, 1, 1],
strides=[1,1,1,1],
padding="VALID",
name="pool")
pooled_x.append(pooled)
num_filters_total = 256 * len(filter_sizes)
self.h_pool = tf.concat(pooled_x, 3)
self.x_flat = tf.reshape(self.h_pool, [-1, num_filters_total], name="pooled_x")
self.h_drop = tf.nn.dropout(self.x_flat, self.dropout_keep_prob, name="dropout_x")
########################### CHAR CONVOLUTION LAYER ###########################
if mode == 1 or mode == 3 or mode == 5:
pooled_char_x = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("char_conv_maxpool_%s" % filter_size):
filter_shape = [filter_size, embedding_size, 1, 256]
b = tf.Variable(tf.constant(0.1, shape=[256]), name="b")
w = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="w")
conv = tf.nn.conv2d(
self.char_x_expanded,
w,
strides=[1,1,1,1],
padding="VALID",
name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv,b), name="relu")
pooled = tf.nn.max_pool(
h,
ksize=[1, char_seq_len - filter_size + 1, 1, 1],
strides=[1,1,1,1],
padding="VALID",
name="pool")
pooled_char_x.append(pooled)
num_filters_total = 256*len(filter_sizes)
self.h_char_pool = tf.concat(pooled_char_x, 3)
self.char_x_flat = tf.reshape(self.h_char_pool, [-1, num_filters_total], name="pooled_char_x")
self.char_h_drop = tf.nn.dropout(self.char_x_flat, self.dropout_keep_prob, name="dropout_char_x")
############################### CONCAT WORD AND CHAR BRANCH ############################
if mode == 3 or mode == 5:
with tf.name_scope("word_char_concat"):
ww = tf.get_variable("ww", shape=(num_filters_total, 512), initializer=tf.contrib.layers.xavier_initializer())
bw = tf.Variable(tf.constant(0.1, shape=[512]), name="bw")
l2_loss += tf.nn.l2_loss(ww)
l2_loss += tf.nn.l2_loss(bw)
word_output = tf.nn.xw_plus_b(self.h_drop, ww, bw)
wc = tf.get_variable("wc", shape=(num_filters_total, 512), initializer=tf.contrib.layers.xavier_initializer())
bc = tf.Variable(tf.constant(0.1, shape=[512]), name="bc")
l2_loss += tf.nn.l2_loss(wc)
l2_loss += tf.nn.l2_loss(bc)
char_output = tf.nn.xw_plus_b(self.char_h_drop, wc, bc)
self.conv_output = tf.concat([word_output, char_output], 1)
elif mode == 2 or mode == 4:
self.conv_output = self.h_drop
elif mode == 1:
self.conv_output = self.char_h_drop
################################ RELU AND FC ###################################
with tf.name_scope("output"):
w0 = tf.get_variable("w0", shape=[1024, 512], initializer=tf.contrib.layers.xavier_initializer())
b0 = tf.Variable(tf.constant(0.1, shape=[512]), name="b0")
l2_loss += tf.nn.l2_loss(w0)
l2_loss += tf.nn.l2_loss(b0)
output0 = tf.nn.relu(tf.matmul(self.conv_output, w0) + b0)
w1 = tf.get_variable("w1", shape=[512, 256], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.constant(0.1, shape=[256]), name="b1")
l2_loss += tf.nn.l2_loss(w1)
l2_loss += tf.nn.l2_loss(b1)
output1 = tf.nn.relu(tf.matmul(output0, w1) + b1)
w2 = tf.get_variable("w2", shape=[256,128], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.constant(0.1, shape=[128]), name="b2")
l2_loss += tf.nn.l2_loss(w2)
l2_loss += tf.nn.l2_loss(b2)
output2 = tf.nn.relu(tf.matmul(output1, w2) + b2)
w = tf.get_variable("w", shape=(128, 2), initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[2]), name="b")
l2_loss += tf.nn.l2_loss(w)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(output2, w, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
with tf.name_scope("accuracy"):
correct_preds = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_preds, "float"), name="accuracy")
| 8,998 | 55.955696 | 141 | py |
URLNet | URLNet-master/train.py | import re
import time
import datetime
import os
import pdb
import pickle
import argparse
import numpy as np
from tqdm import tqdm
from bisect import bisect_left
import tensorflow as tf
from tensorflow.contrib import learn
from tflearn.data_utils import to_categorical, pad_sequences
from TextCNN import *
from utils import *
parser = argparse.ArgumentParser(description="Train URLNet model")
# data args
default_max_len_words = 200
parser.add_argument('--data.max_len_words', type=int, default=default_max_len_words, metavar="MLW",
help="maximum length of url in words (default: {})".format(default_max_len_words))
default_max_len_chars = 200
parser.add_argument('--data.max_len_chars', type=int, default=default_max_len_chars, metavar="MLC",
help="maximum length of url in characters (default: {})".format(default_max_len_chars))
default_max_len_subwords = 20
parser.add_argument('--data.max_len_subwords', type=int, default=default_max_len_subwords, metavar="MLSW",
help="maxium length of word in subwords/ characters (default: {})".format(default_max_len_subwords))
default_min_word_freq = 1
parser.add_argument('--data.min_word_freq', type=int, default=default_min_word_freq, metavar="MWF",
help="minimum frequency of word in training population to build vocabulary (default: {})".format(default_min_word_freq))
default_dev_pct = 0.001
parser.add_argument('--data.dev_pct', type=float, default=default_dev_pct, metavar="DEVPCT",
help="percentage of training set used for dev (default: {})".format(default_dev_pct))
parser.add_argument('--data.data_dir', type=str, default='train_10000.txt', metavar="DATADIR",
help="location of data file")
default_delimit_mode = 1
parser.add_argument("--data.delimit_mode", type=int, default=default_delimit_mode, metavar="DLMODE",
help="0: delimit by special chars, 1: delimit by special chars + each char as a word (default: {})".format(default_delimit_mode))
# model args
default_emb_dim = 32
parser.add_argument('--model.emb_dim', type=int, default=default_emb_dim, metavar="EMBDIM",
help="embedding dimension size (default: {})".format(default_emb_dim))
default_filter_sizes = "3,4,5,6"
parser.add_argument('--model.filter_sizes', type=str, default=default_filter_sizes, metavar="FILTERSIZES",
help="filter sizes of the convolution layer (default: {})".format(default_filter_sizes))
default_emb_mode = 1
parser.add_argument('--model.emb_mode', type=int, default=default_emb_mode, metavar="EMBMODE",
help="1: charCNN, 2: wordCNN, 3: char + wordCNN, 4: char-level wordCNN, 5: char + char-level wordCNN (default: {})".format(default_emb_mode))
# train args
default_nb_epochs = 5
parser.add_argument('--train.nb_epochs', type=int, default=default_nb_epochs, metavar="NEPOCHS",
help="number of training epochs (default: {})".format(default_nb_epochs))
default_batch_size = 128
parser.add_argument('--train.batch_size', type=int, default=default_batch_size, metavar="BATCHSIZE",
help="Size of each training batch (default: {})".format(default_batch_size))
parser.add_argument('--train.l2_reg_lambda', type=float, default=0.0, metavar="L2LREGLAMBDA",
help="l2 lambda for regularization (default: 0.0)")
default_lr = 0.001
parser.add_argument('--train.lr', type=float, default=default_lr, metavar="LR",
help="learning rate for optimizer (default: {})".format(default_lr))
# log args
parser.add_argument('--log.output_dir', type=str, default="runs/10000/", metavar="OUTPUTDIR",
help="directory of the output model")
parser.add_argument('--log.print_every', type=int, default=50, metavar="PRINTEVERY",
help="print training result every this number of steps (default: 50)")
parser.add_argument('--log.eval_every', type=int, default=500, metavar="EVALEVERY",
help="evaluate the model every this number of steps (default: 500)")
parser.add_argument('--log.checkpoint_every', type=int, default=500, metavar="CHECKPOINTEVERY",
help="save a model every this number of steps (default: 500)")
FLAGS = vars(parser.parse_args())
for key, val in FLAGS.items():
print("{}={}".format(key, val))
urls, labels = read_data(FLAGS["data.data_dir"])
high_freq_words = None
if FLAGS["data.min_word_freq"] > 0:
x1, word_reverse_dict = get_word_vocab(urls, FLAGS["data.max_len_words"], FLAGS["data.min_word_freq"])
high_freq_words = sorted(list(word_reverse_dict.values()))
print("Number of words with freq >={}: {}".format(FLAGS["data.min_word_freq"], len(high_freq_words)))
x, word_reverse_dict = get_word_vocab(urls, FLAGS["data.max_len_words"])
word_x = get_words(x, word_reverse_dict, FLAGS["data.delimit_mode"], urls)
ngramed_id_x, ngrams_dict, worded_id_x, words_dict = ngram_id_x(word_x, FLAGS["data.max_len_subwords"], high_freq_words)
chars_dict = ngrams_dict
chared_id_x = char_id_x(urls, chars_dict, FLAGS["data.max_len_chars"])
pos_x = []
neg_x = []
for i in range(len(labels)):
label = labels[i]
if label == 1:
pos_x.append(i)
else:
neg_x.append(i)
print("Overall Mal/Ben split: {}/{}".format(len(pos_x), len(neg_x)))
pos_x = np.array(pos_x)
neg_x = np.array(neg_x)
x_train, y_train, x_test, y_test = prep_train_test(pos_x, neg_x, FLAGS["data.dev_pct"])
x_train_char = get_ngramed_id_x(x_train, ngramed_id_x)
x_test_char = get_ngramed_id_x(x_test, ngramed_id_x)
x_train_word = get_ngramed_id_x(x_train, worded_id_x)
x_test_word = get_ngramed_id_x(x_test, worded_id_x)
x_train_char_seq = get_ngramed_id_x(x_train, chared_id_x)
x_test_char_seq = get_ngramed_id_x(x_test, chared_id_x)
###################################### Training #########################################################
def train_dev_step(x, y, emb_mode, is_train=True):
if is_train:
p = 0.5
else:
p = 1.0
if emb_mode == 1:
feed_dict = {
cnn.input_x_char_seq: x[0],
cnn.input_y: y,
cnn.dropout_keep_prob: p}
elif emb_mode == 2:
feed_dict = {
cnn.input_x_word: x[0],
cnn.input_y: y,
cnn.dropout_keep_prob: p}
elif emb_mode == 3:
feed_dict = {
cnn.input_x_char_seq: x[0],
cnn.input_x_word: x[1],
cnn.input_y: y,
cnn.dropout_keep_prob: p}
elif emb_mode == 4:
feed_dict = {
cnn.input_x_word: x[0],
cnn.input_x_char: x[1],
cnn.input_x_char_pad_idx: x[2],
cnn.input_y: y,
cnn.dropout_keep_prob: p}
elif emb_mode == 5:
feed_dict = {
cnn.input_x_char_seq: x[0],
cnn.input_x_word: x[1],
cnn.input_x_char: x[2],
cnn.input_x_char_pad_idx: x[3],
cnn.input_y: y,
cnn.dropout_keep_prob: p}
if is_train:
_, step, loss, acc = sess.run([train_op, global_step, cnn.loss, cnn.accuracy], feed_dict)
else:
step, loss, acc = sess.run([global_step, cnn.loss, cnn.accuracy], feed_dict)
return step, loss, acc
def make_batches(x_train_char_seq, x_train_word, x_train_char, y_train, batch_size, nb_epochs, shuffle=False):
if FLAGS["model.emb_mode"] == 1:
batch_data = list(zip(x_train_char_seq, y_train))
elif FLAGS["model.emb_mode"] == 2:
batch_data = list(zip(x_train_word, y_train))
elif FLAGS["model.emb_mode"] == 3:
batch_data = list(zip(x_train_char_seq, x_train_word, y_train))
elif FLAGS["model.emb_mode"] == 4:
batch_data = list(zip(x_train_char, x_train_word, y_train))
elif FLAGS["model.emb_mode"] == 5:
batch_data = list(zip(x_train_char, x_train_word, x_train_char_seq, y_train))
batches = batch_iter(batch_data, batch_size, nb_epochs, shuffle)
if nb_epochs > 1:
nb_batches_per_epoch = int(len(batch_data)/batch_size)
if len(batch_data)%batch_size != 0:
nb_batches_per_epoch += 1
nb_batches = int(nb_batches_per_epoch * nb_epochs)
return batches, nb_batches_per_epoch, nb_batches
else:
return batches
def prep_batches(batch):
if FLAGS["model.emb_mode"] == 1:
x_char_seq, y_batch = zip(*batch)
elif FLAGS["model.emb_mode"] == 2:
x_word, y_batch = zip(*batch)
elif FLAGS["model.emb_mode"] == 3:
x_char_seq, x_word, y_batch = zip(*batch)
elif FLAGS["model.emb_mode"] == 4:
x_char, x_word, y_batch = zip(*batch)
elif FLAGS["model.emb_mode"] == 5:
x_char, x_word, x_char_seq, y_batch = zip(*batch)
x_batch = []
if FLAGS["model.emb_mode"] in [1, 3, 5]:
x_char_seq = pad_seq_in_word(x_char_seq, FLAGS["data.max_len_chars"])
x_batch.append(x_char_seq)
if FLAGS["model.emb_mode"] in [2, 3, 4, 5]:
x_word = pad_seq_in_word(x_word, FLAGS["data.max_len_words"])
x_batch.append(x_word)
if FLAGS["model.emb_mode"] in [4, 5]:
x_char, x_char_pad_idx = pad_seq(x_char, FLAGS["data.max_len_words"], FLAGS["data.max_len_subwords"], FLAGS["model.emb_dim"])
x_batch.extend([x_char, x_char_pad_idx])
return x_batch, y_batch
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
session_conf.gpu_options.allow_growth=True
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
char_ngram_vocab_size = len(ngrams_dict)+1,
word_ngram_vocab_size = len(words_dict)+1,
char_vocab_size = len(chars_dict)+1,
embedding_size=FLAGS["model.emb_dim"],
word_seq_len=FLAGS["data.max_len_words"],
char_seq_len=FLAGS["data.max_len_chars"],
l2_reg_lambda=FLAGS["train.l2_reg_lambda"],
mode=FLAGS["model.emb_mode"],
filter_sizes=list(map(int, FLAGS["model.filter_sizes"].split(","))))
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(FLAGS["train.lr"])
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step = global_step)
print("Writing to {}\n".format(FLAGS["log.output_dir"]))
if not os.path.exists(FLAGS["log.output_dir"]):
os.makedirs(FLAGS["log.output_dir"])
# Save dictionary files
ngrams_dict_dir = FLAGS["log.output_dir"] + "subwords_dict.p"
pickle.dump(ngrams_dict, open(ngrams_dict_dir,"wb"))
words_dict_dir = FLAGS["log.output_dir"] + "words_dict.p"
pickle.dump(words_dict, open(words_dict_dir, "wb"))
chars_dict_dir = FLAGS["log.output_dir"] + "chars_dict.p"
pickle.dump(chars_dict, open(chars_dict_dir, "wb"))
# Save training and validation logs
train_log_dir = FLAGS["log.output_dir"] + "train_logs.csv"
with open(train_log_dir, "w") as f:
f.write("step,time,loss,acc\n")
val_log_dir = FLAGS["log.output_dir"] + "val_logs.csv"
with open(val_log_dir, "w") as f:
f.write("step,time,loss,acc\n")
# Save model checkpoints
checkpoint_dir = FLAGS["log.output_dir"] + "checkpoints/"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = checkpoint_dir + "model"
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
sess.run(tf.global_variables_initializer())
train_batches, nb_batches_per_epoch, nb_batches = make_batches(x_train_char_seq, x_train_word, x_train_char, y_train, FLAGS["train.batch_size"], FLAGS['train.nb_epochs'], True)
min_dev_loss = float('Inf')
dev_loss = float('Inf')
dev_acc = 0.0
print("Number of baches in total: {}".format(nb_batches))
print("Number of batches per epoch: {}".format(nb_batches_per_epoch))
it = tqdm(range(nb_batches), desc="emb_mode {} delimit_mode {} train_size {}".format(FLAGS["model.emb_mode"], FLAGS["data.delimit_mode"], x_train.shape[0]), ncols=0)
for idx in it:
batch = next(train_batches)
x_batch, y_batch = prep_batches(batch)
step, loss, acc = train_dev_step(x_batch, y_batch, emb_mode=FLAGS["model.emb_mode"], is_train=True)
if step % FLAGS["log.print_every"] == 0:
with open(train_log_dir, "a") as f:
f.write("{:d},{:s},{:e},{:e}\n".format(step, datetime.datetime.now().isoformat(), loss, acc))
it.set_postfix(
trn_loss='{:.3e}'.format(loss),
trn_acc='{:.3e}'.format(acc),
dev_loss='{:.3e}'.format(dev_loss),
dev_acc='{:.3e}'.format(dev_acc),
min_dev_loss='{:.3e}'.format(min_dev_loss))
if step % FLAGS["log.eval_every"] == 0 or idx == (nb_batches-1):
total_loss = 0
nb_corrects = 0
nb_instances = 0
test_batches = make_batches(x_test_char_seq, x_test_word, x_test_char, y_test, FLAGS['train.batch_size'], 1, False)
for test_batch in test_batches:
x_test_batch, y_test_batch = prep_batches(test_batch)
step, batch_dev_loss, batch_dev_acc = train_dev_step(x_test_batch, y_test_batch, emb_mode=FLAGS["model.emb_mode"], is_train=False)
nb_instances += x_test_batch[0].shape[0]
total_loss += batch_dev_loss * x_test_batch[0].shape[0]
nb_corrects += batch_dev_acc * x_test_batch[0].shape[0]
dev_loss = total_loss / nb_instances
dev_acc = nb_corrects / nb_instances
with open(val_log_dir, "a") as f:
f.write("{:d},{:s},{:e},{:e}\n".format(step, datetime.datetime.now().isoformat(), dev_loss, dev_acc))
if step % FLAGS["log.checkpoint_every"] == 0 or idx == (nb_batches-1):
if dev_loss < min_dev_loss:
path = saver.save(sess, checkpoint_prefix, global_step = step)
min_dev_loss = dev_loss
| 14,237 | 46.145695 | 184 | py |
NeuralKG | NeuralKG-main/main.py | # -*- coding: utf-8 -*-
# from torch._C import T
# from train import Trainer
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from IPython import embed
import wandb
from neuralkg.utils import setup_parser
from neuralkg.utils.tools import *
from neuralkg.data.Sampler import *
from neuralkg.data.Grounding import GroundAllRules
def main():
parser = setup_parser() #设置参数
args = parser.parse_args()
if args.load_config:
args = load_config(args, args.config_path)
seed_everything(args.seed)
"""set up sampler to datapreprocess""" #设置数据处理的采样过程
train_sampler_class = import_class(f"neuralkg.data.{args.train_sampler_class}")
train_sampler = train_sampler_class(args) # 这个sampler是可选择的
#print(train_sampler)
test_sampler_class = import_class(f"neuralkg.data.{args.test_sampler_class}")
test_sampler = test_sampler_class(train_sampler) # test_sampler是一定要的
"""set up datamodule""" #设置数据模块
data_class = import_class(f"neuralkg.data.{args.data_class}") #定义数据类 DataClass
kgdata = data_class(args, train_sampler, test_sampler)
"""set up model"""
model_class = import_class(f"neuralkg.model.{args.model_name}")
if args.model_name == "RugE":
ground = GroundAllRules(args)
ground.PropositionalizeRule()
if args.model_name == "ComplEx_NNE_AER":
model = model_class(args, train_sampler.rel2id)
elif args.model_name == "IterE":
print(f"data.{args.train_sampler_class}")
model = model_class(args, train_sampler, test_sampler)
else:
model = model_class(args)
if args.model_name == 'SEGNN':
src_list = train_sampler.get_train_1.src_list
dst_list = train_sampler.get_train_1.dst_list
rel_list = train_sampler.get_train_1.rel_list
"""set up lit_model"""
litmodel_class = import_class(f"neuralkg.lit_model.{args.litmodel_name}")
if args.model_name =='SEGNN':
lit_model = litmodel_class(model, args, src_list, dst_list, rel_list)
else:
lit_model = litmodel_class(model, args)
"""set up logger"""
logger = pl.loggers.TensorBoardLogger("training/logs")
if args.use_wandb:
log_name = "_".join([args.model_name, args.dataset_name, str(args.lr)])
logger = pl.loggers.WandbLogger(name=log_name, project="NeuralKG")
logger.log_hyperparams(vars(args))
"""early stopping"""
early_callback = pl.callbacks.EarlyStopping(
monitor="Eval|mrr",
mode="max",
patience=args.early_stop_patience,
# verbose=True,
check_on_train_epoch_end=False,
)
"""set up model save method"""
# 目前是保存在验证集上mrr结果最好的模型
# 模型保存的路径
dirpath = "/".join(["output", args.eval_task, args.dataset_name, args.model_name])
model_checkpoint = pl.callbacks.ModelCheckpoint(
monitor="Eval|mrr",
mode="max",
filename="{epoch}-{Eval|mrr:.3f}",
dirpath=dirpath,
save_weights_only=True,
save_top_k=1,
)
callbacks = [early_callback, model_checkpoint]
# initialize trainer
if args.model_name == "IterE":
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=callbacks,
logger=logger,
default_root_dir="training/logs",
gpus="0,",
check_val_every_n_epoch=args.check_per_epoch,
reload_dataloaders_every_n_epochs=1 # IterE
)
else:
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=callbacks,
logger=logger,
default_root_dir="training/logs",
gpus="0,",
check_val_every_n_epoch=args.check_per_epoch,
)
'''保存参数到config'''
if args.save_config:
save_config(args)
if args.use_wandb:
logger.watch(lit_model)
if not args.test_only:
# train&valid
trainer.fit(lit_model, datamodule=kgdata)
# 加载本次实验中dev上表现最好的模型,进行test
path = model_checkpoint.best_model_path
else:
path = args.checkpoint_dir
lit_model.load_state_dict(torch.load(path)["state_dict"])
lit_model.eval()
trainer.test(lit_model, datamodule=kgdata)
if __name__ == "__main__":
main()
| 4,261 | 33.934426 | 86 | py |
NeuralKG | NeuralKG-main/setup.py | #!/usr/bin/env python
# coding: utf-8
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='neuralkg',
version='1.0.21',
author='ZJUKG',
author_email='[email protected]',
url='https://github.com/zjukg/NeuralKG',
description='An Open Source Library for Diverse Representation Learning of Knowledge Graphs',
package_dir={"": "src"},
packages=setuptools.find_packages("src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
'pytorch_lightning==1.5.10',
'PyYAML>=6.0',
'wandb>=0.12.7',
'IPython>=5.0.0'
],
python_requires=">=3.6"
)
| 907 | 24.942857 | 97 | py |
NeuralKG | NeuralKG-main/demo.py | # -*- coding: utf-8 -*-
# from torch._C import T
# from train import Trainer
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from IPython import embed
import wandb
from neuralkg.utils import setup_parser
from neuralkg.utils.tools import *
from neuralkg.data.Sampler import *
from neuralkg.data.Grounding import GroundAllRules
def main(arg_path):
print('This demo is powered by \033[1;32mNeuralKG \033[0m')
args = setup_parser() #设置参数
args = load_config(args, arg_path)
seed_everything(args.seed)
"""set up sampler to datapreprocess""" #设置数据处理的采样过程
train_sampler_class = import_class(f"neuralkg.data.{args.train_sampler_class}")
train_sampler = train_sampler_class(args) # 这个sampler是可选择的
#print(train_sampler)
test_sampler_class = import_class(f"neuralkg.data.{args.test_sampler_class}")
test_sampler = test_sampler_class(train_sampler) # test_sampler是一定要的
"""set up datamodule""" #设置数据模块
data_class = import_class(f"neuralkg.data.{args.data_class}") #定义数据类 DataClass
kgdata = data_class(args, train_sampler, test_sampler)
"""set up model"""
model_class = import_class(f"neuralkg.model.{args.model_name}")
if args.model_name == "RugE":
ground = GroundAllRules(args)
ground.PropositionalizeRule()
if args.model_name == "ComplEx_NNE_AER":
model = model_class(args, train_sampler.rel2id)
elif args.model_name == "IterE":
print(f"data.{args.train_sampler_class}")
model = model_class(args, train_sampler, test_sampler)
else:
model = model_class(args)
"""set up lit_model"""
litmodel_class = import_class(f"neuralkg.lit_model.{args.litmodel_name}")
lit_model = litmodel_class(model, args)
"""set up logger"""
logger = pl.loggers.TensorBoardLogger("training/logs")
if args.use_wandb:
log_name = "_".join([args.model_name, args.dataset_name, str(args.lr)])
logger = pl.loggers.WandbLogger(name=log_name, project="NeuralKG")
logger.log_hyperparams(vars(args))
"""early stopping"""
early_callback = pl.callbacks.EarlyStopping(
monitor="Eval|mrr",
mode="max",
patience=args.early_stop_patience,
# verbose=True,
check_on_train_epoch_end=False,
)
"""set up model save method"""
# 目前是保存在验证集上mrr结果最好的模型
# 模型保存的路径
dirpath = "/".join(["output", args.eval_task, args.dataset_name, args.model_name])
model_checkpoint = pl.callbacks.ModelCheckpoint(
monitor="Eval|mrr",
mode="max",
filename="{epoch}-{Eval|mrr:.3f}",
dirpath=dirpath,
save_weights_only=True,
save_top_k=1,
)
callbacks = [early_callback, model_checkpoint]
# initialize trainer
if args.model_name == "IterE":
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=callbacks,
logger=logger,
default_root_dir="training/logs",
gpus="0,",
check_val_every_n_epoch=args.check_per_epoch,
reload_dataloaders_every_n_epochs=1 # IterE
)
else:
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=callbacks,
logger=logger,
default_root_dir="training/logs",
gpus="0,",
check_val_every_n_epoch=args.check_per_epoch,
)
'''保存参数到config'''
if args.save_config:
save_config(args)
if not args.test_only:
# train&valid
trainer.fit(lit_model, datamodule=kgdata)
# 加载本次实验中dev上表现最好的模型,进行test
path = model_checkpoint.best_model_path
else:
# path = args.checkpoint_dir
path = "./output/link_prediction/FB15K237/TransE/epoch=24-Eval|mrr=0.300.ckpt"
lit_model.load_state_dict(torch.load(path)["state_dict"])
lit_model.eval()
score = lit_model.model(torch.tensor(train_sampler.test_triples), mode='tail-batch')
value, index = score.topk(10, dim=1)
index = index.squeeze(0).tolist()
top10_ent = [train_sampler.id2ent[i] for i in index]
rank = index.index(train_sampler.ent2id['杭州市']) + 1
print('\033[1;32mInteresting display! \033[0m')
print('Use the trained KGE to predict entity')
print("\033[1;32mQuery: \033[0m (浙江大学, 位于市, ?)")
print(f"\033[1;32mTop 10 Prediction:\033[0m{top10_ent}")
print(f"\033[1;32mGroud Truth: \033[0m杭州市 \033[1;32mRank: \033[0m{rank}")
print('This demo is powered by \033[1;32mNeuralKG \033[0m')
if __name__ == "__main__":
main(arg_path = 'config/TransE_demo_kg.yaml')
| 4,592 | 36.647541 | 88 | py |
NeuralKG | NeuralKG-main/dataset/demo_kg/data-preprocess.py | train = './train.txt'
entity2id = './entity2id.txt'
relation2id = './relation2id.txt'
entity = set()
relation = set()
def write_dict(name):
if name == "entity":
read_path = entity2id
write_path = "./entities.dict"
else:
read_path = relation2id
write_path = "./relations.dict"
kk = open(write_path, "w")
with open(read_path, "r") as f:
for line in f.readlines():
item, idx = line.strip().split("\t")
kk.write(idx+"\t"+item+"\n")
write_dict("entity")
write_dict("relation")
| 499 | 17.518519 | 39 | py |
NeuralKG | NeuralKG-main/src/neuralkg/__init__.py | from .data import *
from .eval_task import *
from .lit_model import *
from .loss import *
from .model import *
from .utils import * | 131 | 21 | 24 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/RugELitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from neuralkg import loss
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
from neuralkg.data import RuleDataLoader
from tqdm import tqdm
import pdb
class RugELitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
self.args = args
self.temp_list = []
self.rule_dataloader = RuleDataLoader(self.args)
tq = tqdm(self.rule_dataloader, desc='{}'.format('rule'), ncols=0)
print('start first load')
for new_data in tq:
self.temp_list.append(new_data)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
pos_sample = batch["positive_sample"]
neg_sample = batch["negative_sample"]
mode = batch["mode"]
pos_score = self.model(pos_sample)
neg_score = self.model(pos_sample, neg_sample, mode)
rule, confidence, triple_num = self.temp_list[0][0], self.temp_list[0][1], self.temp_list[0][2]
loss = self.loss(pos_score, neg_score, rule, confidence, triple_num, len(pos_sample))
self.temp_list.remove(self.temp_list[0])
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def training_epoch_end(self, training_step_outputs):
self.temp_list = []
print('start reload')
tq = tqdm(self.rule_dataloader, desc='{}'.format('rule'), ncols=0)
for new_data in tq:
self.temp_list.append(new_data)
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
# milestones = int(self.args.max_epochs / 2)
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr)
# StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[milestones], gamma=0.1)
optim_dict = {'optimizer': optimizer}
return optim_dict
| 3,391 | 35.869565 | 103 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/XTransELitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class XTransELitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
triples = batch["positive_sample"]
neg = batch["negative_sample"]
neighbor = batch["neighbor"]
mask = batch['mask']
mode = batch['mode']
pos_score = self.model(triples, neighbor, mask)
neg_score = self.model(triples, neighbor, mask, neg, mode=mode)
loss = self.loss(pos_score, neg_score)
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
milestones = int(self.args.max_epochs / 2)
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr)
StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[milestones], gamma=0.1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 2,658 | 35.930556 | 100 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/SEGNNLitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
import dgl
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class SEGNNLitModel(BaseLitModel):
def __init__(self, model, args, src_list, dst_list, rel_list):
super().__init__(model, args)
self.src_list = src_list
self.dst_list = dst_list
self.rel_list = rel_list
self.kg = self.get_kg(src_list, dst_list, rel_list)
def forward(self, x):
return self.model(x)
def training_step(self, batch):
optimizer = self.optimizers()
#optimizer = optimizer.optimizer
optimizer.zero_grad()
(head, rel, _), label, rm_edges= batch
kg = self.get_kg(self.src_list, self.dst_list, self.rel_list)
kg = kg.to(torch.device("cuda:0"))
if self.args.rm_rate > 0:
kg.remove_edges(rm_edges)
score = self.model(head, rel, kg)
loss = self.loss(score, label)
self.manual_backward(loss)
optimizer.step()
sch = self.lr_schedulers()
sch.step()
return loss
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict_SEGNN(batch, self.kg, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
#results['mr'] = results.get('mr', 0.) + ranks.sum().item()
results['mrr'] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks<=k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict_SEGNN(batch, self.kg, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
#results['mr'] = results.get('MR', 0.) + ranks.sum().item()
results['mrr'] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def get_kg(self, src_list, dst_list, rel_list):
n_ent = self.args.num_ent
kg = dgl.graph((src_list, dst_list), num_nodes=n_ent)
kg.edata['rel_id'] = rel_list
return kg
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
def lr_lambda(current_step):
"""
Compute a ratio according to current step,
by which the optimizer's lr will be mutiplied.
:param current_step:
:return:
"""
assert current_step <= self.args.maxsteps
if current_step < self.args.warm_up_steps:
return current_step / self.args.warm_up_steps
else:
return (self.args.maxsteps - current_step) / (self.args.maxsteps - self.args.warm_up_steps)
assert self.args.maxsteps >= self.args.warm_up_steps
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr = self.args.lr)
#StepLR = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.5, last_epoch=-1)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=-1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler':scheduler}
return optim_dict
| 4,053 | 34.876106 | 115 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/RGCNLitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class RGCNLitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
graph = batch["graph"]
triples = batch["triples"]
label = batch["label"]
entity = batch['entity']
relation = batch['relation']
norm = batch['norm']
score = self.model(graph, entity, relation, norm, triples)
loss = self.loss(score, label)
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
milestones = int(self.args.max_epochs / 2)
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr)
StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[milestones], gamma=0.1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 2,602 | 36.185714 | 100 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/KGELitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from .BaseLitModel import BaseLitModel
from IPython import embed
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class KGELitModel(BaseLitModel):
"""Processing of training, evaluation and testing.
"""
def __init__(self, model, args):
super().__init__(model, args)
def forward(self, x):
return self.model(x)
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--weight_decay", type=float, default=0.01)
return parser
def training_step(self, batch, batch_idx):
"""Getting samples and training in KG model.
Args:
batch: The training data.
batch_idx: The dict_key in batch, type: list.
Returns:
loss: The training loss for back propagation.
"""
pos_sample = batch["positive_sample"]
neg_sample = batch["negative_sample"]
mode = batch["mode"]
pos_score = self.model(pos_sample)
neg_score = self.model(pos_sample, neg_sample, mode)
if self.args.use_weight:
subsampling_weight = batch["subsampling_weight"]
loss = self.loss(pos_score, neg_score, subsampling_weight)
else:
loss = self.loss(pos_score, neg_score)
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
"""Getting samples and validation in KG model.
Args:
batch: The evalutaion data.
batch_idx: The dict_key in batch, type: list.
Returns:
results: mrr and hits@1,3,10.
"""
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
"""Getting samples and test in KG model.
Args:
batch: The evaluation data.
batch_idx: The dict_key in batch, type: list.
Returns:
results: mrr and hits@1,3,10.
"""
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def configure_optimizers(self):
"""Setting optimizer and lr_scheduler.
Returns:
optim_dict: Record the optimizer and lr_scheduler, type: dict.
"""
milestones = int(self.args.max_epochs / 2)
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr)
StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[milestones], gamma=0.1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 3,834 | 32.938053 | 100 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/CrossELitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class CrossELitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
sample = batch["sample"]
hr_label = batch["hr_label"]
tr_label = batch["tr_label"]
# sample_id = batch["sample_id"]
# sample_score = self.model(sample, sample_id)
hr_score, tr_score = self.model(sample)
hr_loss = self.loss(hr_score, hr_label)
tr_loss = self.loss(tr_score, tr_label)
loss = hr_loss + tr_loss
regularize_loss = self.args.weight_decay * self.model.regularize_loss(1)
loss += regularize_loss
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
milestones = int(self.args.max_epochs / 2)
# optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr, weight_decay = self.args.weight_decay)
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr)
StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[milestones], gamma=0.1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 2,908 | 37.276316 | 123 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/BaseLitModel.py | import argparse
import pytorch_lightning as pl
import torch
from collections import defaultdict as ddict
from neuralkg import loss
import numpy as np
class Config(dict):
def __getattr__(self, name):
return self.get(name)
def __setattr__(self, name, val):
self[name] = val
class BaseLitModel(pl.LightningModule):
"""
Generic PyTorch-Lightning class that must be initialized with a PyTorch module.
"""
def __init__(self, model, args: argparse.Namespace = None, src_list = None, dst_list=None, rel_list=None):
super().__init__()
self.model = model
self.args = args
optim_name = args.optim_name
self.optimizer_class = getattr(torch.optim, optim_name)
loss_name = args.loss_name
self.loss_class = getattr(loss, loss_name)
self.loss = self.loss_class(args, model)
if self.args.model_name == 'SEGNN':
self.automatic_optimization = False
#TODO:SEGNN
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--weight_decay", type=float, default=0.01)
return parser
def configure_optimizers(self):
raise NotImplementedError
def forward(self, x):
raise NotImplementedError
def training_step(self, batch, batch_idx): # pylint: disable=unused-argument
raise NotImplementedError
def validation_step(self, batch, batch_idx): # pylint: disable=unused-argument
raise NotImplementedError
def test_step(self, batch, batch_idx): # pylint: disable=unused-argument
raise NotImplementedError
def get_results(self, results, mode):
"""Summarize the results of each batch and calculate the final result of the epoch
Args:
results ([type]): The results of each batch
mode ([type]): Eval or Test
Returns:
dict: The final result of the epoch
"""
outputs = ddict(float)
count = np.array([o["count"] for o in results]).sum()
for metric in list(results[0].keys())[1:]:
final_metric = "|".join([mode, metric])
outputs[final_metric] = np.around(np.array([o[metric] for o in results]).sum() / count, decimals=3).item()
return outputs
| 2,337 | 31.472222 | 118 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/__init__.py | from .BaseLitModel import BaseLitModel
from .KGELitModel import KGELitModel
from .ConvELitModel import ConvELitModel
from .RGCNLitModel import RGCNLitModel
from .KBATLitModel import KBATLitModel
from .CompGCNLitModel import CompGCNLitModel
from .CrossELitModel import CrossELitModel
from .XTransELitModel import XTransELitModel
from .IterELitModel import IterELitModel
from .RugELitModel import RugELitModel
from .SEGNNLitModel import SEGNNLitModel | 448 | 39.818182 | 44 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/CompGCNLitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class CompGCNLitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
graph = batch["graph"] #和RGCNLitModel差别很小 形式上只有entity的区别,是否要改动
sample = batch["sample"]
label = batch["label"]
relation = batch['relation']
norm = batch['norm']
score = self.model(graph, relation, norm, sample)
label = ((1.0 - self.args.smoothing) * label) + (
1.0 / self.args.num_ent
)
loss = self.loss(score, label)
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
milestones = int(self.args.max_epochs / 2)
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr, weight_decay = 1e-7)
StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[milestones], gamma=0.1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 2,737 | 36.506849 | 103 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/ConvELitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class ConvELitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
sample = batch["sample"]
label = batch["label"]
sample_score = self.model(sample)
label = ((1.0 - self.args.smoothing) * label) + (
1.0 / self.args.num_ent
)
loss = self.loss(sample_score,label)
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='tail')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
milestones = int(self.args.max_epochs / 2)
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr, weight_decay=0)
StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[milestones], gamma=0.1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 2,572 | 34.246575 | 100 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/IterELitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
import pickle
import time
from functools import partial
class IterELitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
self.epoch=0
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
pos_sample = batch["positive_sample"]
neg_sample = batch["negative_sample"]
mode = batch["mode"]
pos_score = self.model(pos_sample)
neg_score = self.model(pos_sample, neg_sample, mode)
if self.args.use_weight:
subsampling_weight = batch["subsampling_weight"]
loss = self.loss(pos_score, neg_score, subsampling_weight)
else:
loss = self.loss(pos_score, neg_score)
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def training_epoch_end(self, results):
self.epoch+=1
if self.epoch % self.args.update_axiom_per == 0 and self.epoch !=0:
# axioms include probability for each axiom in axiom pool
# order: ref, sym, tran, inver, sub, equi, inferC
# update_axioms:
# 1) calculate probability for each axiom in axiom pool with current embeddings
# 2) update the valid_axioms
axioms_probability = self.update_axiom()
updated_train_data = self.model.update_train_triples(epoch = self.epoch, update_per= self.args.update_axiom_per)
if updated_train_data:
self.trainer.datamodule.data_train=updated_train_data
self.trainer.datamodule.train_sampler.count = self.trainer.datamodule.train_sampler.count_frequency(updated_train_data)
def update_axiom(self):
time_s = time.time()
axiom_pro = self.model.run_axiom_probability()
time_e = time.time()
print('calculate axiom score:', time_e -time_s)
with open('./save_axiom_prob/axiom_prob.pickle', 'wb') as f: pickle.dump(axiom_pro, f, pickle.HIGHEST_PROTOCOL)
with open('./save_axiom_prob/axiom_pools.pickle', 'wb') as f: pickle.dump(self.model.axiompool, f, pickle.HIGHEST_PROTOCOL)
self.model.update_valid_axioms(axiom_pro)
return self.model.run_axiom_probability()
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
milestones = [5,50]
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr)
StepLR = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 4,230 | 40.07767 | 139 | py |
NeuralKG | NeuralKG-main/src/neuralkg/lit_model/KBATLitModel.py | from logging import debug
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import json
from collections import defaultdict as ddict
from IPython import embed
from .BaseLitModel import BaseLitModel
from neuralkg.eval_task import *
from IPython import embed
from functools import partial
class KBATLitModel(BaseLitModel):
def __init__(self, model, args):
super().__init__(model, args)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
num_epoch = self.current_epoch
if num_epoch < self.args.epoch_GAT:
model = "GAT"
adj = batch['adj_matrix']
n_hop = batch['n_hop']
pos_triple = batch['triples_GAT_pos']
neg_triple = batch['triples_GAT_neg']
pos_score = self.model(pos_triple, model, adj, n_hop)
neg_score = self.model(neg_triple, model, adj, n_hop)
loss = self.loss(model, pos_score, neg_score)
else:
model = "ConvKB"
triples = batch['triples_Con']
label = batch['label']
score = self.model(triples, model)
loss = self.loss(model, score, label=label)
self.log("Train|loss", loss, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
# pos_triple, tail_label, head_label = batch
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def validation_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Eval")
# self.log("Eval|mrr", outputs["Eval|mrr"], on_epoch=True)
self.log_dict(outputs, prog_bar=True, on_epoch=True)
def test_step(self, batch, batch_idx):
results = dict()
ranks = link_predict(batch, self.model, prediction='all')
results["count"] = torch.numel(ranks)
results["mrr"] = torch.sum(1.0 / ranks).item()
for k in self.args.calc_hits:
results['hits@{}'.format(k)] = torch.numel(ranks[ranks <= k])
return results
def test_epoch_end(self, results) -> None:
outputs = self.get_results(results, "Test")
self.log_dict(outputs, prog_bar=True, on_epoch=True)
'''这里设置优化器和lr_scheduler'''
def configure_optimizers(self):
if self.current_epoch < self.args.epoch_GAT:
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr, weight_decay=1e-6)
StepLR = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.5, last_epoch=-1)
else:
optimizer = self.optimizer_class(self.model.parameters(), lr=self.args.lr, weight_decay=1e-5)
StepLR = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.5, last_epoch=-1)
optim_dict = {'optimizer': optimizer, 'lr_scheduler': StepLR}
return optim_dict
| 3,269 | 37.928571 | 105 | py |
NeuralKG | NeuralKG-main/src/neuralkg/eval_task/link_prediction_SEGNN.py | import torch
import os
from IPython import embed
#TODO: SEGNN
def link_predict_SEGNN(batch, kg, model, prediction="all"):
"""The evaluate task is predicting the head entity or tail entity in incomplete triples.
Args:
batch: The batch of the triples for validation or test.
model: The KG model for training.
predicion: mode of link prediction.
Returns:
ranks: The rank of the triple to be predicted.
"""
ent_emb, rel_emb = model.aggragate_emb(kg)
if prediction == "all":
tail_ranks = tail_predict_SEGNN(batch, ent_emb, rel_emb, model)
head_ranks = head_predict_SEGNN(batch, ent_emb, rel_emb, model)
ranks = torch.cat([tail_ranks, head_ranks])
elif prediction == "head":
ranks = head_predict_SEGNN(batch, ent_emb, rel_emb, model)
elif prediction == "tail":
ranks = tail_predict_SEGNN(batch, ent_emb, rel_emb, model)
return ranks.float()
def head_predict_SEGNN(batch, ent_emb, rel_emb, model):
"""Getting head entity ranks.
Args:
batch: The batch of the triples for validation or test
model: The KG model for training.
Returns:
tensor: The rank of the head entity to be predicted, dim [batch_size]
"""
pos_triple = batch["positive_sample"]
head_idx = pos_triple[:, 0]
tail_idx = pos_triple[:, 2]
rel_idx = [pos_triple[:, 1][i] + 11 for i in range(len(pos_triple[:, 1]))]
rel_idx = torch.tensor(rel_idx)
filter_head = batch["filter_head"]
pred_score = model.predictor.score_func(ent_emb[tail_idx], rel_emb[rel_idx], ent_emb)
return calc_ranks_SEGNN(head_idx, filter_head, pred_score)
def tail_predict_SEGNN(batch, ent_emb, rel_emb, model):
"""Getting tail entity ranks.
Args:
batch: The batch of the triples for validation or test
model: The KG model for training.
Returns:
tensor: The rank of the tail entity to be predicted, dim [batch_size]
"""
pos_triple = batch["positive_sample"]
head_idx = pos_triple[:, 0]
rel_idx = pos_triple[:, 1]
tail_idx = pos_triple[:, 2]
filter_tail = batch["filter_tail"]
pred_score = model.predictor.score_func(ent_emb[head_idx], rel_emb[rel_idx], ent_emb)
return calc_ranks_SEGNN(tail_idx, filter_tail, pred_score)
def calc_ranks_SEGNN(idx, filter_label, pred_score):
"""Calculating triples score ranks.
Args:
idx ([type]): The id of the entity to be predicted.
label ([type]): The id of existing triples, to calc filtered results.
pred_score ([type]): The score of the triple predicted by the model.
Returns:
ranks: The rank of the triple to be predicted, dim [batch_size].
"""
score = pred_score + filter_label
size = filter_label.shape[0]
pred_score1 = score[torch.arange(size), idx].unsqueeze(dim=1)
compare_up = torch.gt(score, pred_score1)
compare_low = torch.ge(score, pred_score1)
ranking_up = compare_up.to(dtype=torch.float).sum(dim=1) + 1 # (bs, )
ranking_low = compare_low.to(dtype=torch.float).sum(dim=1) # include the pos one itself, no need to +1
ranking = (ranking_up + ranking_low) / 2
return ranking | 3,206 | 35.443182 | 107 | py |
NeuralKG | NeuralKG-main/src/neuralkg/eval_task/link_prediction.py | import torch
import os
from IPython import embed
def link_predict(batch, model, prediction="all"):
"""The evaluate task is predicting the head entity or tail entity in incomplete triples.
Args:
batch: The batch of the triples for validation or test.
model: The KG model for training.
predicion: mode of link prediction.
Returns:
ranks: The rank of the triple to be predicted.
"""
if prediction == "all":
tail_ranks = tail_predict(batch, model)
head_ranks = head_predict(batch, model)
ranks = torch.cat([tail_ranks, head_ranks])
elif prediction == "head":
ranks = head_predict(batch, model)
elif prediction == "tail":
ranks = tail_predict(batch, model)
return ranks.float()
def head_predict(batch, model):
"""Getting head entity ranks.
Args:
batch: The batch of the triples for validation or test
model: The KG model for training.
Returns:
tensor: The rank of the head entity to be predicted, dim [batch_size]
"""
pos_triple = batch["positive_sample"]
idx = pos_triple[:, 0]
label = batch["head_label"]
pred_score = model.get_score(batch, "head_predict")
return calc_ranks(idx, label, pred_score)
def tail_predict(batch, model):
"""Getting tail entity ranks.
Args:
batch: The batch of the triples for validation or test
model: The KG model for training.
Returns:
tensor: The rank of the tail entity to be predicted, dim [batch_size]
"""
pos_triple = batch["positive_sample"]
idx = pos_triple[:, 2]
label = batch["tail_label"]
pred_score = model.get_score(batch, "tail_predict")
return calc_ranks(idx, label, pred_score)
def calc_ranks(idx, label, pred_score):
"""Calculating triples score ranks.
Args:
idx ([type]): The id of the entity to be predicted.
label ([type]): The id of existing triples, to calc filtered results.
pred_score ([type]): The score of the triple predicted by the model.
Returns:
ranks: The rank of the triple to be predicted, dim [batch_size].
"""
b_range = torch.arange(pred_score.size()[0])
target_pred = pred_score[b_range, idx]
pred_score = torch.where(label.bool(), -torch.ones_like(pred_score) * 10000000, pred_score)
pred_score[b_range, idx] = target_pred
ranks = (
1
+ torch.argsort(
torch.argsort(pred_score, dim=1, descending=True), dim=1, descending=False
)[b_range, idx]
)
return ranks | 2,582 | 29.034884 | 95 | py |
NeuralKG | NeuralKG-main/src/neuralkg/eval_task/__init__.py | from .link_prediction import *
from .link_prediction_SEGNN import * | 67 | 33 | 36 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/KBAT_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
class KBAT_Loss(nn.Module):
def __init__(self, args, model):
super(KBAT_Loss, self).__init__()
self.args = args
self.model = model
self.GAT_loss = nn.MarginRankingLoss(self.args.margin)
self.Con_loss = nn.SoftMarginLoss()
def forward(self, model, score, neg_score=None, label=None):
if model == 'GAT':
y = -torch.ones( 2 * self.args.num_neg * self.args.train_bs).type_as(score)
score = torch.tile(score, (2*self.args.num_neg, 1)).reshape(-1)
loss = self.GAT_loss(score, neg_score, y)
elif model == 'ConvKB':
loss = self.Con_loss(score.view(-1), label.view(-1))
return loss | 774 | 34.227273 | 91 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/ComplEx_NNE_AER_Loss.py | import torch
import torch.nn as nn
from IPython import embed
from neuralkg.data import KGData
class ComplEx_NNE_AER_Loss(nn.Module):
def __init__(self, args, model):
super(ComplEx_NNE_AER_Loss, self).__init__()
self.args = args
self.model = model
self.rule_p, self.rule_q = model.rule
self.confidence = model.conf
def forward(self, pos_score, neg_score):
logistic_neg = torch.log(1 + torch.exp(neg_score)).sum(dim=1)
logistic_pos = torch.log(1 + torch.exp(-pos_score)).sum(dim=1)
logistic_loss = logistic_neg + logistic_pos
re_p, im_p = self.model.rel_emb(self.rule_p).chunk(2, dim=-1)
re_q, im_q = self.model.rel_emb(self.rule_q).chunk(2, dim=-1)
entail_loss_re = self.args.mu * torch.sum(
self.confidence * (re_p - re_q).clamp(min=0).sum(dim=-1)
)
entail_loss_im = self.args.mu * torch.sum(
self.confidence * (im_p - im_q).pow(2).sum(dim=-1)
)
entail_loss = entail_loss_re + entail_loss_im
loss = logistic_loss + entail_loss
# return loss
if self.args.regularization != 0.0:
# Use L2 regularization for ComplEx_NNE_AER
regularization = self.args.regularization * (
self.model.ent_emb.weight.norm(p=2) ** 2
+ self.model.rel_emb.weight.norm(p=2) ** 2
)
loss = loss + regularization
loss = loss.mean()
return loss
| 1,497 | 36.45 | 70 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/Cross_Entropy_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from IPython import embed
class Cross_Entropy_Loss(nn.Module):
"""Binary CrossEntropyLoss
Attributes:
args: Some pre-set parameters, etc
model: The KG model for training.
"""
def __init__(self, args, model):
super(Cross_Entropy_Loss, self).__init__()
self.args = args
self.model = model
self.loss = torch.nn.BCELoss()
def forward(self, pred, label):
"""Creates a criterion that measures the Binary Cross Entropy between the target and
the input probabilities. In math:
l_n = - w_n \left[ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right],
Args:
pred: The score of all samples.
label: Vectors used to distinguish positive and negative samples.
Returns:
loss: The training loss for back propagation.
"""
loss = self.loss(pred, label)
return loss | 1,003 | 30.375 | 92 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/SimplE_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from IPython import embed
class SimplE_Loss(nn.Module):
def __init__(self, args, model):
super(SimplE_Loss, self).__init__()
self.args = args
self.model = model
def forward(self, pos_score, neg_score):
pos_score = -pos_score
score = torch.cat((neg_score, pos_score), dim = -1) #shape:[bs, neg_num+1]
loss = torch.sum(F.softplus(score)) + self.args.regularization * self.model.l2_loss()
return loss
| 542 | 26.15 | 93 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/RugE_Loss.py | import torch
import torch.nn as nn
import math
from torch.autograd import Variable
from IPython import embed
class RugE_Loss(nn.Module):
def __init__(self,args, model):
super(RugE_Loss, self).__init__()
self.args = args
self.model = model
def forward(self, pos_score, neg_score, rule, confidence, triple_num, pos_len):
entroy = nn.BCELoss()
# 这段代码写的太简陋了 先跑通再说
pos_label = torch.ones([pos_len, 1])
neg_label = torch.zeros([pos_len, self.args.num_neg])
one = torch.ones([1])
zero = torch.zeros([1])
pos_label = Variable(pos_label).to(self.args.gpu, dtype=torch.float)
neg_label = Variable(neg_label).to(self.args.gpu, dtype=torch.float)
one = Variable(one).to(self.args.gpu, dtype=torch.float)
zero = Variable(zero).to(self.args.gpu, dtype=torch.float)
sigmoid_neg = torch.sigmoid(neg_score)
sigmoid_pos = torch.sigmoid(pos_score)
postive_loss = entroy(sigmoid_pos, pos_label)
negative_loss = entroy(sigmoid_neg, neg_label)
pi_gradient = dict()
# 感觉应该放在这个大函数的外面,不然每次被清空也没什么用
sigmoid_value = dict()
# 在计算每个grounding rule中的unlable的三元组对应的类似gradient
for i in range(len(rule[0])):
if triple_num[i] == 2:
p1_rule = rule[0][i]
unlabel_rule = rule[1][i]
if p1_rule not in sigmoid_value:
p1_rule_score = self.model(p1_rule.unsqueeze(0))
sigmoid_rule = torch.sigmoid(p1_rule_score)
sigmoid_value[p1_rule] = sigmoid_rule
else:
sigmoid_rule = sigmoid_value[p1_rule]
if unlabel_rule not in pi_gradient:
pi_gradient[unlabel_rule] = self.args.slackness_penalty * confidence[i] * sigmoid_rule
else:
pi_gradient[unlabel_rule] += self.args.slackness_penalty * confidence[i] * sigmoid_rule
elif triple_num[i] == 3:
p1_rule = rule[0][i]
p2_rule = rule[1][i]
unlabel_rule = rule[2][i]
if p1_rule not in sigmoid_value:
p1_rule_score = self.model(p1_rule.unsqueeze(0))
sigmoid_rule = torch.sigmoid(p1_rule_score)
sigmoid_value[p1_rule] = sigmoid_rule
else:
sigmoid_rule = sigmoid_value[p1_rule]
if p2_rule not in sigmoid_value:
p2_rule_score = self.model(p2_rule.unsqueeze(0))
sigmoid_rule2 = torch.sigmoid(p2_rule_score)
sigmoid_value[p2_rule] = sigmoid_rule
else:
sigmoid_rule2 = sigmoid_value[p2_rule]
if unlabel_rule not in pi_gradient:
pi_gradient[unlabel_rule] = self.args.slackness_penalty * confidence[i] * sigmoid_rule * sigmoid_rule2
else:
pi_gradient[unlabel_rule] += self.args.slackness_penalty * confidence[i] * sigmoid_rule * sigmoid_rule2
unlabel_loss = 0.
unlabel_triples = []
gradient = []
# 对于pi_gradient中的每个三元组(不重复)的 根据公式计算s函数
for unlabel_triple in pi_gradient.keys():
unlabel_triples.append(unlabel_triple.cpu().numpy())
gradient.append(pi_gradient[unlabel_triple].cpu().detach().numpy())
unlabel_triples = torch.tensor(unlabel_triples).to(self.args.gpu)
gradient = torch.tensor(gradient).to(self.args.gpu).view(-1, 1)
unlabel_triple_score = self.model(unlabel_triples)
unlabel_triple_score = torch.sigmoid(unlabel_triple_score)
unlabel_scores = []
for i in range(0, len(gradient)):
unlabel_score = (torch.min(torch.max(unlabel_triple_score[i] + gradient[i], zero), one)).cpu().detach().numpy()
unlabel_scores.append(unlabel_score[0])
unlabel_scores = torch.tensor(unlabel_scores).to(self.args.gpu)
unlabel_scores = unlabel_scores.unsqueeze(1)
unlabel_loss = entroy(unlabel_triple_score, unlabel_scores)
# for unlabel_triple in pi_gradient.keys():
# unlabelrule_score = model(unlabel_triple.unsqueeze(0))
# sigmoid_unlabelrule = torch.sigmoid(unlabelrule_score)
# unlabel_score = torch.min(torch.max(sigmoid_unlabelrule + args.slackness_penalty * pi_gradient[unlabel_triple], zero), one)
# loss_part = entroy(sigmoid_unlabelrule, unlabel_score.to(args.gpu).detach())
# unlabel_loss = unlabel_loss + loss_part
# 所有的grounding的unlbeled的两个值sigmoid和s函数都存在list中,需要转成tensor,然后一起计算loss
loss = postive_loss + negative_loss + unlabel_loss
if self.args.weight_decay != 0.0:
#Use L2 regularization for ComplEx_NNE_AER
ent_emb_all = self.model.ent_emb(torch.arange(self.args.num_ent).to(self.args.gpu))
rel_emb_all = self.model.rel_emb(torch.arange(self.args.num_rel).to(self.args.gpu))
regularization = self.args.weight_decay * (
ent_emb_all.norm(p = 2)**2 + rel_emb_all.norm(p=2)**2
)
# print(postive_loss)
# print(negative_loss)
# print(unlabel_loss)
loss += regularization
return loss
| 5,328 | 42.325203 | 137 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/CrossE_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from IPython import embed
class CrossE_Loss(nn.Module):
def __init__(self, args, model):
super(CrossE_Loss, self).__init__()
self.args = args
self.model = model
def forward(self, score, label):
pos = torch.log(torch.clamp(score, 1e-10, 1.0)) * torch.clamp(label, 0.0, 1.0)
neg = torch.log(torch.clamp(1-score, 1e-10, 1.0)) * torch.clamp(-label, 0.0, 1.0)
num_pos = torch.sum(torch.clamp(label, 0.0, 1.0), -1)
num_neg = torch.sum(torch.clamp(-label, 0.0, 1.0), -1)
loss = - torch.sum(torch.sum(pos, -1)/num_pos) - torch.sum(torch.sum(neg, -1)/num_neg)
return loss
| 713 | 34.7 | 94 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/Margin_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from IPython import embed
class Margin_Loss(nn.Module):
"""Margin Ranking Loss
Attributes:
args: Some pre-set parameters, etc
model: The KG model for training.
"""
def __init__(self, args, model):
super(Margin_Loss, self).__init__()
self.args = args
self.model = model
self.loss = nn.MarginRankingLoss(self.args.margin)
def forward(self, pos_score, neg_score):
"""Creates a criterion that measures the loss given inputs pos_score and neg_score. In math:
\text{loss}(x1, x2, y) = \max(0, -y * (x1 - x2) + \text{margin})
Args:
pos_score: The score of positive samples.
neg_score: The score of negative samples.
Returns:
loss: The training loss for back propagation.
"""
label = torch.Tensor([1]).type_as(pos_score)
loss = self.loss(pos_score, neg_score, label)
return loss | 1,040 | 30.545455 | 100 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/RGCN_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
class RGCN_Loss(nn.Module):
def __init__(self, args, model):
super(RGCN_Loss, self).__init__()
self.args = args
self.model = model
def reg_loss(self):
return torch.mean(self.model.Loss_emb.pow(2)) + torch.mean(self.model.rel_emb.pow(2))
def forward(self, score, labels):
loss = F.binary_cross_entropy_with_logits(score, labels)
regu = self.args.regularization * self.reg_loss()
loss += regu
return loss | 558 | 28.421053 | 93 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/Adv_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from IPython import embed
class Adv_Loss(nn.Module):
"""Negative sampling loss with self-adversarial training.
Attributes:
args: Some pre-set parameters, such as self-adversarial temperature, etc.
model: The KG model for training.
"""
def __init__(self, args, model):
super(Adv_Loss, self).__init__()
self.args = args
self.model = model
def forward(self, pos_score, neg_score, subsampling_weight=None):
"""Negative sampling loss with self-adversarial training. In math:
L=-\log \sigma\left(\gamma-d_{r}(\mathbf{h}, \mathbf{t})\right)-\sum_{i=1}^{n} p\left(h_{i}^{\prime}, r, t_{i}^{\prime}\right) \log \sigma\left(d_{r}\left(\mathbf{h}_{i}^{\prime}, \mathbf{t}_{i}^{\prime}\right)-\gamma\right)
Args:
pos_score: The score of positive samples.
neg_score: The score of negative samples.
subsampling_weight: The weight for correcting pos_score and neg_score.
Returns:
loss: The training loss for back propagation.
"""
if self.args.negative_adversarial_sampling:
neg_score = (F.softmax(neg_score * self.args.adv_temp, dim=1).detach()
* F.logsigmoid(-neg_score)).sum(dim=1) #shape:[bs]
else:
neg_score = F.logsigmoid(-neg_score).mean(dim = 1)
pos_score = F.logsigmoid(pos_score).view(neg_score.shape[0]) #shape:[bs]
# from IPython import embed;embed();exit()
if self.args.use_weight:
positive_sample_loss = - (subsampling_weight * pos_score).sum()/subsampling_weight.sum()
negative_sample_loss = - (subsampling_weight * neg_score).sum()/subsampling_weight.sum()
else:
positive_sample_loss = - pos_score.mean()
negative_sample_loss = - neg_score.mean()
loss = (positive_sample_loss + negative_sample_loss) / 2
if self.args.model_name == 'ComplEx' or self.args.model_name == 'DistMult' or self.args.model_name == 'BoxE' or self.args.model_name=="IterE":
#Use L3 regularization for ComplEx and DistMult
regularization = self.args.regularization * (
self.model.ent_emb.weight.norm(p = 3)**3 + \
self.model.rel_emb.weight.norm(p = 3)**3
)
# embed();exit()
loss = loss + regularization
return loss
def normalize(self):
"""calculating the regularization.
"""
regularization = self.args.regularization * (
self.model.ent_emb.weight.norm(p = 3)**3 + \
self.model.rel_emb.weight.norm(p = 3)**3
)
return regularization | 2,791 | 41.30303 | 232 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/Softplus_Loss.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from IPython import embed
class Softplus_Loss(nn.Module):
"""softplus loss.
Attributes:
args: Some pre-set parameters, etc.
model: The KG model for training.
"""
def __init__(self, args, model):
super(Softplus_Loss, self).__init__()
self.criterion = nn.Softplus()
self.args = args
self.model = model
def forward(self, pos_score, neg_score, subsampling_weight=None):
"""Negative sampling loss Softplus_Loss. In math:
\begin{aligned}
L(\boldsymbol{Q}, \boldsymbol{W})=& \sum_{r(h, t) \in \Omega \cup \Omega^{-}} \log \left(1+\exp \left(-Y_{h r t} \phi(h, r, t)\right)\right) \\
&+\lambda_1\|\boldsymbol{Q}\|_2^2+\lambda_2\|\boldsymbol{W}\|_2^2
\end{aligned}
Args:
pos_score: The score of positive samples (with regularization if DualE).
neg_score: The score of negative samples (with regularization if DualE).
Returns:
loss: The training loss for back propagation.
"""
if self.args.model_name == 'DualE':
p_score, pos_regul_1, pos_regul_2 = pos_score
n_score, neg_regul_1, neg_regul_2 = neg_score
score = torch.cat((-p_score,n_score))
loss = torch.mean(self.criterion(score))
if self.args.model_name == 'DualE':
regularization1 = (pos_regul_1+neg_regul_1*self.args.num_neg)/(self.args.num_neg+1)*self.args.regularization
regularization2 = (pos_regul_2+neg_regul_2*self.args.num_neg)/(self.args.num_neg+1)*self.args.regularization_two
loss = loss+regularization1+regularization2
return loss | 1,754 | 39.813953 | 155 | py |
NeuralKG | NeuralKG-main/src/neuralkg/loss/__init__.py | from .Adv_Loss import Adv_Loss
from .ComplEx_NNE_AER_Loss import ComplEx_NNE_AER_Loss
from .SimplE_Loss import SimplE_Loss
from .Cross_Entropy_Loss import Cross_Entropy_Loss
from .RGCN_Loss import RGCN_Loss
from .KBAT_Loss import KBAT_Loss
from .CrossE_Loss import CrossE_Loss
from .Margin_Loss import Margin_Loss
from .RugE_Loss import RugE_Loss
from .Softplus_Loss import Softplus_Loss | 387 | 37.8 | 54 | py |
NeuralKG | NeuralKG-main/src/neuralkg/utils/setup_parser.py | # -*- coding: utf-8 -*-
import argparse
import os
import yaml
import pytorch_lightning as pl
from neuralkg import lit_model
from neuralkg import data
def setup_parser():
"""Set up Python's ArgumentParser with data, model, trainer, and other arguments."""
parser = argparse.ArgumentParser(add_help=False)
# Add Trainer specific arguments, such as --max_epochs, --gpus, --precision
trainer_parser = pl.Trainer.add_argparse_args(parser)
trainer_parser._action_groups[1].title = "Trainer Args" # pylint: disable=protected-access
parser = argparse.ArgumentParser(add_help=False, parents=[trainer_parser])
# Basic arguments
parser.add_argument('--model_name', default="TransE", type=str, help='The name of model.')
parser.add_argument('--dataset_name', default="FB15K237", type=str, help='The name of dataset.')
parser.add_argument('--data_class', default="KGDataModule", type=str, help='The name of data preprocessing module, default KGDataModule.')
parser.add_argument("--litmodel_name", default="KGELitModel", type=str, help='The name of processing module of training, evaluation and testing, default KGELitModel.')
parser.add_argument("--train_sampler_class",default="UniSampler",type=str, help='Sampling method used in training, default UniSampler.')
parser.add_argument("--test_sampler_class",default="TestSampler",type=str, help='Sampling method used in validation and testing, default TestSampler.')
parser.add_argument('--loss_name', default="Adv_Loss", type=str, help='The name of loss function.')
parser.add_argument('--negative_adversarial_sampling','-adv', default=True, action='store_false', help='Use self-adversarial negative sampling.')
parser.add_argument('--optim_name', default="Adam", type=str, help='The name of optimizer')
parser.add_argument("--seed", default=321, type=int, help='Random seed.')
parser.add_argument('--margin', default=12.0, type=float, help='The fixed margin in loss function. ')
parser.add_argument('--adv_temp', default=1.0, type=float, help='The temperature of sampling in self-adversarial negative sampling.')
parser.add_argument('--emb_dim', default=200, type=int, help='The embedding dimension in KGE model.')
parser.add_argument('--out_dim', default=200, type=int, help='The output embedding dimmension in some KGE model.')
parser.add_argument('--num_neg', default=10, type=int, help='The number of negative samples corresponding to each positive sample')
parser.add_argument('--num_ent', default=None, type=int, help='The number of entity, autogenerate.')
parser.add_argument('--num_rel', default=None, type=int, help='The number of relation, autogenerate.')
parser.add_argument('--check_per_epoch', default=5, type=int, help='Evaluation per n epoch of training.')
parser.add_argument('--early_stop_patience', default=5, type=int, help='If the number of consecutive bad results is n, early stop.')
parser.add_argument("--num_layers", default=2, type=int, help='The number of layers in some GNN model.')
parser.add_argument('--regularization', '-r', default=0.0, type=float)
parser.add_argument("--decoder_model", default=None, type=str, help='The name of decoder model, in some model.')
parser.add_argument('--eval_task', default="link_prediction", type=str, help='The task of validation, default link_prediction.')
parser.add_argument("--calc_hits", default=[1,3,10], type=lambda s: [int(item) for item in s.split(',')], help='calc hits list')
parser.add_argument('--filter_flag', default=True, action='store_false', help='Filter in negative sampling.')
parser.add_argument('--gpu', default='cuda:0', type=str, help='Select the GPU in training, default cuda:0.')
parser.add_argument("--use_wandb", default=False, action='store_true',help='Use "weight and bias" to record the result.')
parser.add_argument('--use_weight', default=False, action='store_true', help='Use subsampling weight.')
parser.add_argument('--checkpoint_dir', default="", type=str, help='The checkpoint model path')
parser.add_argument('--save_config', default=False, action='store_true', help='Save paramters config file.')
parser.add_argument('--load_config', default=False, action='store_true', help='Load parametes config file.')
parser.add_argument('--config_path', default="", type=str, help='The config file path.')
parser.add_argument('--freq_init', default=4, type=int)
parser.add_argument('--test_only', default=False, action='store_true')
parser.add_argument('--shuffle', default=True, action='store_false')
parser.add_argument('--norm_flag', default=False, action='store_true')
#parser only for Ruge
parser.add_argument('--slackness_penalty', default=0.01, type=float)
#parser only for CompGCN
parser.add_argument("--opn", default='corr',type=str, help="only on CompGCN, choose Composition Operation")
#parser only for BoxE
parser.add_argument("--dis_order", default=2, type=int, help="only on BoxE, the distance order of score")
# parser only for ComplEx_NNE
parser.add_argument('--mu', default=10, type=float, help='only on ComplEx_NNE,penalty coefficient for ComplEx_NNE')
# paerser only for KBAT
parser.add_argument('--epoch_GAT', default=3000, type=int, help='only on KBAT, the epoch of GAT model')
parser.add_argument("-p2hop", "--partial_2hop", default=False, action='store_true')
# parser only for CrossE
parser.add_argument('--dropout', default=0.5, type=float, help='only on CrossE,for Dropout')
parser.add_argument('--neg_weight', default=50, type=int, help='only on CrossE, make up label')
# parer only for ConvE
parser.add_argument('--emb_shape', default=20, type=int, help='only on ConvE,The first dimension of the reshaped 2D embedding')
parser.add_argument('--inp_drop', default=0.2, type=float, help='only on ConvE,Dropout for the input embeddings')
parser.add_argument('--hid_drop', default=0.3, type=float, help='only on ConvE,Dropout for the hidden layer')
parser.add_argument('--fet_drop', default=0.2, type=float, help='only on ConvE,Dropout for the convolutional features')
parser.add_argument('--hid_size_component', default=3648, type=int, help='only on ConvE,The side of the hidden layer. The required size changes with the size of the embeddings.')
parser.add_argument('--hid_size', default=9728, type=int, help='only on ConvE,The side of the hidden layer. The required size changes with the size of the embeddings.')
parser.add_argument('--smoothing', default=0.1, type=float, help='only on ConvE,Make the label smooth')
parser.add_argument("--out_channel", default=32, type=int, help="in ConvE.py")
parser.add_argument("--ker_sz", default=3, type=int, help="in ConvE.py")
parser.add_argument("--ent_drop_pred", default=0.3, type=float, help="in ConvE.py")
parser.add_argument("--k_h", default=10, type=int, help="in ConvE.py")
parser.add_argument("--k_w", default=20, type=int, help="in ConvE.py")
#parser only for SEGNN
parser.add_argument("--kg_layer", default=1, type=int, help="in SEGNN.py")
parser.add_argument("--rm_rate", default=0.5, type=float, help= "in SEGNN.py")
parser.add_argument("--ent_drop", default=0.2, type=float, help="in SEGNN.py")
parser.add_argument("--rel_drop", default=0, type=float, help="in SEGNN.py")
parser.add_argument("--fc_drop", default = 0.1, type=float, help = "in SEGNN.py")
parser.add_argument("--comp_op", default='mul', type=str, help="in SEGNN.py")
#WN18RR
#parser.add_argument("--bn", default=True, action='store_true')
#FB15K237
parser.add_argument("--bn", default=False, action='store_true')
parser.add_argument("--warmup_epoch", default=5, type=int, help="in SEGNN.py")
parser.add_argument("--warm_up_steps", default=None, type=int, help="in SEGNN.py")
parser.add_argument("--maxsteps", default=None, type=int, help="in SEGNN.py")
#WN18RR
#parser.add_argument("--pred_rel_w", default=True, action="store_true")
#FB15K237
parser.add_argument("--pred_rel_w", default=False, action="store_true")
parser.add_argument("--label_smooth", default=0.1, type=float, help="in SEGNN.py")
# parser only for IterE
parser.add_argument("--max_entialments", default=2000, type=int, help="in IterE.py")
parser.add_argument("--axiom_types", default=10, type=int, help="in IterE.py")
parser.add_argument("--select_probability", default=0.8, type=float, help="in IterE.py")
parser.add_argument("--axiom_weight", default=1.0, type=float, help="in IterE.py")
parser.add_argument("--inject_triple_percent", default=1.0, type=float, help="in IterE.py")
parser.add_argument("--update_axiom_per",default=2, type=int, help='in IterELitModel.py')
#parser only for HAKE
parser.add_argument("--phase_weight", default=1.0, type=float, help='only on HAKE,The weight of phase part')
parser.add_argument("--modulus_weight", default=1.0, type=float, help='only on HAKE,The weight of modulus part')
#parser only for DualE
parser.add_argument("--regularization_two", default=0, type=float, help='only on DualE, regularization_two')
# Get data, model, and LitModel specific arguments
lit_model_group = parser.add_argument_group("LitModel Args")
lit_model.BaseLitModel.add_to_argparse(lit_model_group)
data_group = parser.add_argument_group("Data Args")
data.BaseDataModule.add_to_argparse(data_group)
parser.add_argument("--help", "-h", action="help")
return parser
| 9,625 | 69.262774 | 182 | py |
NeuralKG | NeuralKG-main/src/neuralkg/utils/tools.py | import importlib
from IPython import embed
import os
import time
import yaml
import torch
from torch.nn import Parameter
from torch.nn.init import xavier_normal_
def import_class(module_and_class_name: str) -> type:
"""Import class from a module, e.g. 'model.TransE'"""
module_name, class_name = module_and_class_name.rsplit(".", 1)
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
return class_
def save_config(args):
args.save_config = False #防止和load_config冲突,导致把加载的config又保存了一遍
if not os.path.exists("config"):
os.mkdir("config")
config_file_name = time.strftime(str(args.model_name)+"_"+str(args.dataset_name)) + ".yaml"
day_name = time.strftime("%Y-%m-%d")
if not os.path.exists(os.path.join("config", day_name)):
os.makedirs(os.path.join("config", day_name))
config = vars(args)
with open(os.path.join(os.path.join("config", day_name), config_file_name), "w") as file:
file.write(yaml.dump(config))
def load_config(args, config_path):
with open(config_path, "r") as f:
config = yaml.safe_load(f)
args.__dict__.update(config)
return args
def get_param(*shape):
param = Parameter(torch.zeros(shape))
xavier_normal_(param)
return param | 1,287 | 32.025641 | 95 | py |
NeuralKG | NeuralKG-main/src/neuralkg/utils/__init__.py | from .setup_parser import setup_parser
from .tools import * | 59 | 29 | 38 | py |
NeuralKG | NeuralKG-main/src/neuralkg/data/KGDataModule.py | """Base DataModule class."""
from pathlib import Path
from typing import Dict
import argparse
import os
from torch.utils.data import DataLoader
from .base_data_module import *
import pytorch_lightning as pl
class KGDataModule(BaseDataModule):
"""
Base DataModule.
Learn more at https://pytorch-lightning.readthedocs.io/en/stable/datamodules.html
"""
def __init__(
self, args: argparse.Namespace = None, train_sampler=None, test_sampler=None
) -> None:
super().__init__(args)
self.eval_bs = self.args.eval_bs
self.num_workers = self.args.num_workers
self.train_sampler = train_sampler
self.test_sampler = test_sampler
#for SEGNN
#TODO:SEGNN
if self.args.model_name == 'SEGNN':
self.data_train = self.train_sampler.get_train()
single_epoch_step = len(self.train_dataloader()) + 1
self.args.maxsteps = self.args.max_epochs * single_epoch_step
self.args.warm_up_steps = int(single_epoch_step * self.args.warmup_epoch)
def get_data_config(self):
"""Return important settings of the dataset, which will be passed to instantiate models."""
return {
"num_training_steps": self.num_training_steps,
"num_labels": self.num_labels,
}
def prepare_data(self):
"""
Use this method to do things that might write to disk or that need to be done only from a single GPU in distributed settings (so don't set state `self.x = y`).
"""
pass
def setup(self, stage=None):
"""
Split into train, val, test, and set dims.
Should assign `torch Dataset` objects to self.data_train, self.data_val, and optionally self.data_test.
"""
self.data_train = self.train_sampler.get_train()
self.data_val = self.train_sampler.get_valid()
self.data_test = self.train_sampler.get_test()
def get_train_bs(self):
"""Get batch size for training.
If the num_batches isn`t zero, it will divide data_train by num_batches to get batch size.
And if user don`t give batch size and num_batches=0, it will raise ValueError.
Returns:
self.args.train_bs: The batch size for training.
"""
if self.args.num_batches != 0:
self.args.train_bs = len(self.data_train) // self.args.num_batches
elif self.args.train_bs == 0:
raise ValueError("train_bs or num_batches must specify one")
return self.args.train_bs
def train_dataloader(self):
self.train_bs = self.get_train_bs()
return DataLoader(
self.data_train,
shuffle=True,
batch_size=self.train_bs,
num_workers=self.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=self.train_sampler.sampling,
)
def val_dataloader(self):
return DataLoader(
self.data_val,
shuffle=False,
batch_size=self.eval_bs,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.test_sampler.sampling,
)
def test_dataloader(self):
return DataLoader(
self.data_test,
shuffle=False,
batch_size=self.eval_bs,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.test_sampler.sampling,
) | 3,501 | 33.333333 | 167 | py |
NeuralKG | NeuralKG-main/src/neuralkg/data/Grounding.py | from .DataPreprocess import KGData
import pdb
class GroundAllRules:
def __init__(self, args):
self.MapRelation2ID = {}
self.MapEntity2ID = {}
self.Relation2Tuple = {}
self.MapID2Entity = {}
self.MapID2Relation = {}
self.TrainTriples = {}
self.RelSub2Obj = {}
self.MapVariable = {}
self.args = args
self.fnEntityIDMap = args.data_path + "/entities.dict"
self.fnRelationIDMap = args.data_path + "/relations.dict"
path_len = len(args.data_path.split('/'))
self.fnRuleType = args.data_path + "/" + args.data_path.split('/')[path_len - 1] + "_rule"
self.fnTrainingTriples = args.data_path + "/train.txt"
self.fnOutout = args.data_path + "/groudings.txt"
def PropositionalizeRule(self):
self.kgData = KGData(self.args)
self.readData(self.fnEntityIDMap, self.fnRelationIDMap, self.fnTrainingTriples)
self.groundRule(self.fnRuleType, self.fnOutout)
def readData(self, fnEntityIDMap, fnRelationIDMap, fnTrainingTriples):
tokens = []
self.MapEntity2ID = self.kgData.ent2id
self.MapRelation2ID = self.kgData.rel2id
self.TrainTriples = self.kgData.TrainTriples
self.Relation2Tuple = self.kgData.Relation2Tuple
self.RelSub2Obj = self.kgData.RelSub2Obj
# with open(fnEntityIDMap, 'r', encoding='utf-8') as f:
# for line in f.readlines():
# line = line.strip('\n')
# tokens = line.split("\t")
# iEntityID = int(tokens[0])
# strValue = tokens[1]
# self.MapEntity2ID[strValue] = iEntityID
# self.MapID2Entity[iEntityID] = strValue
#
# with open(fnRelationIDMap, "r", encoding='utf-8') as f:
# for line in f.readlines():
# line = line.strip('\n')
# tokens = line.split("\t")
# iRelationID = int(tokens[0])
# strValue = tokens[1]
# self.MapRelation2ID[strValue] = iRelationID
# self.MapID2Relation[iRelationID] = strValue
print("Start to load soft rules......")
# with open(fnTrainingTriples, "r", encoding='utf-8') as f:
# for line in f.readlines():
# line = line.strip('\n')
# tokens = line.split("\t")
# iRelationID = self.MapRelation2ID[tokens[1]]
# strValue = tokens[0] + "#" + tokens[2]
# line = line.replace(tokens[0], str(self.MapEntity2ID[tokens[0]]))
# line = line.replace(tokens[1], str(self.MapRelation2ID[tokens[1]]))
# line = line.replace(tokens[2], str(self.MapEntity2ID[tokens[2]]))
# self.TrainTriples[line] = True
# if not iRelationID in self.Relation2Tuple:
# tmpLst = []
# tmpLst.append(strValue)
# self.Relation2Tuple[iRelationID] = tmpLst
# else:
# self.Relation2Tuple[iRelationID].append(strValue)
# with open(fnTrainingTriples, "r", encoding='utf-8') as f:
# for line in f.readlines():
# line = line.strip('\n')
# tokens = line.split("\t")
# iRelationID = self.MapRelation2ID[tokens[1]]
# iSubjectID = self.MapEntity2ID[tokens[0]]
# iObjectID = self.MapEntity2ID[tokens[2]]
# tmpMap = {}
# tmpMap_in = {}
# if not iRelationID in self.RelSub2Obj:
# if not iSubjectID in tmpMap:
# tmpMap_in.clear()
# tmpMap_in[iObjectID] = True
# tmpMap[iSubjectID] = tmpMap_in
# else:
# tmpMap[iSubjectID][iObjectID] = True
# self.RelSub2Obj[iRelationID] = tmpMap
# else:
# tmpMap = self.RelSub2Obj[iRelationID]
# if not iSubjectID in tmpMap:
# tmpMap_in.clear()
# tmpMap_in[iObjectID] = True
# tmpMap[iSubjectID] = tmpMap_in
# else:
# tmpMap[iSubjectID][iObjectID] = True
# self.RelSub2Obj[iRelationID] = tmpMap # 是不是应该要加?
print("success")
def groundRule(self, fnRuleType, fnOutput):
print("Start to propositionalize soft rules......")
writer = open(fnOutput, "w")
tmpLst = {}
with open(fnRuleType, "r", encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
if line.startswith("?"):
bodys = line.split("=>")[0].strip().split(" ")
heads = line.split("=>")[1].strip().split(" ")
if len(bodys) == 3:
bEntity1 = bodys[0]
iFstRelation = self.MapRelation2ID[bodys[1]]
bEntity2 = bodys[2]
bEntity3 = heads[0]
iSndRelation = self.MapRelation2ID[heads[1]]
bEntity4 = heads[2].split("\t")[0]
hEntity1 = heads[2].split("\t")[1]
confi = float(hEntity1)
iSize = len(self.Relation2Tuple[iFstRelation])
for i in range(0, iSize):
strValue = self.Relation2Tuple.get(iFstRelation)[i]
iSubjectID = self.MapEntity2ID[strValue.split("#")[0]]
iObjectID = self.MapEntity2ID[strValue.split("#")[1]]
self.MapVariable[bEntity1] = iSubjectID
self.MapVariable[bEntity2] = iObjectID
strKey = "(" + str(iSubjectID) + "\t" + str(iFstRelation) + "\t" + str(
iObjectID) + ")\t" + "(" + str(self.MapVariable[bEntity3]) + "\t" + str(
iSndRelation) + "\t" + str(self.MapVariable[bEntity4]) + ")"
strCons = str(self.MapVariable[bEntity3]) + "\t" + str(iSndRelation) + "\t" + str(
self.MapVariable[bEntity4])
if (not strKey in tmpLst) and (not strCons in self.TrainTriples):
writer.write("2\t" + str(strKey) + "\t" + str(confi) + "\n")
tmpLst[strKey] = True
writer.flush()
self.MapVariable.clear()
if len(bodys) == 6:
bEntity1 = bodys[0].strip()
iFstRelation = self.MapRelation2ID[bodys[1].strip()]
bEntity2 = bodys[2].strip()
bEntity3 = bodys[3].strip()
iSndRelation = self.MapRelation2ID[bodys[4].strip()]
bEntity4 = bodys[5].strip()
hEntity1 = heads[0].strip()
iTrdRelation = self.MapRelation2ID[heads[1].strip()]
hEntity2 = heads[2].split("\t")[0].strip()
confidence = heads[2].split("\t")[1].strip()
confi = float(confidence)
mapFstRel = self.RelSub2Obj[iFstRelation]
mapSndRel = self.RelSub2Obj[iSndRelation]
for lstEntity1 in mapFstRel:
self.MapVariable[bEntity1] = lstEntity1
lstEntity2 = list(mapFstRel[lstEntity1].keys())
iFstSize = len(lstEntity2)
for iFstIndex in range(0, iFstSize):
iEntity2ID = lstEntity2[iFstIndex]
self.MapVariable[bEntity1] = lstEntity1
self.MapVariable[bEntity2] = iEntity2ID
lstEntity3 = []
if (bEntity3 in self.MapVariable) and (self.MapVariable[bEntity3] in mapSndRel):
lstEntity3.append(self.MapVariable[bEntity3])
else:
if not bEntity3 in self.MapVariable:
lstEntity3 = list(mapSndRel.keys())
iSndSize = len(lstEntity3)
for iSndIndex in range(0, iSndSize):
iEntity3ID = lstEntity3[iSndIndex]
self.MapVariable[bEntity1] = lstEntity1
self.MapVariable[bEntity2] = iEntity2ID
self.MapVariable[bEntity3] = iEntity3ID
lstEntity4 = []
if (bEntity4 in self.MapVariable) and (
self.MapVariable[bEntity4] in mapSndRel[iEntity3ID]):
lstEntity4.append(self.MapVariable[bEntity4])
else:
if not bEntity4 in self.MapVariable:
lstEntity4 = list(mapSndRel[iEntity3ID].keys())
iTrdSize = len(lstEntity4)
for iTrdIndex in range(0, iTrdSize):
iEntity4ID = lstEntity4[iTrdIndex]
self.MapVariable[bEntity4] = iEntity4ID
infer = str(self.MapVariable[hEntity1]) + "\t" + str(iTrdRelation) + "\t" + str(
self.MapVariable[hEntity2])
strKey = "(" + str(lstEntity1) + "\t" + str(iFstRelation) + "\t" + str(
iEntity2ID) + ")\t(" + str(iEntity3ID) + "\t" + str(iSndRelation) + "\t" + str(
iEntity4ID) + ")\t" + "(" + str(self.MapVariable[hEntity1]) + "\t" + str(
iTrdRelation) + "\t" + str(self.MapVariable[hEntity2]) + ")"
if (not strKey in tmpLst) and (not infer in self.TrainTriples):
writer.write("3\t" + strKey + "\t" + str(confi) + "\n")
tmpLst[strKey] = True
self.MapVariable.clear()
self.MapVariable.clear()
writer.flush()
self.MapVariable.clear()
| 10,567 | 51.84 | 119 | py |
NeuralKG | NeuralKG-main/src/neuralkg/data/base_data_module.py | """Base DataModule class."""
from pathlib import Path
from typing import Dict
import argparse
import os
import pytorch_lightning as pl
from torch.utils.data import DataLoader
class Config(dict):
def __getattr__(self, name):
return self.get(name)
def __setattr__(self, name, val):
self[name] = val
BATCH_SIZE = 8
NUM_WORKERS = 8
class BaseDataModule(pl.LightningDataModule):
"""
Base DataModule.
Learn more at https://pytorch-lightning.readthedocs.io/en/stable/datamodules.html
"""
def __init__(self, args) -> None:
super().__init__()
self.args = args
@staticmethod
def add_to_argparse(parser):
parser.add_argument(
"--train_bs",
type=int,
default=0,
help="Number of examples to operate on per forward step.",
)
parser.add_argument(
"--num_batches",
type=int,
default=0,
help="Number of examples to operate on per forward step.",
)
parser.add_argument(
"--eval_bs",
type=int,
default=16,
help="Number of examples to operate on per forward step.",
)
parser.add_argument(
"--num_workers",
type=int,
default=8,
help="Number of additional processes to load data.",
)
parser.add_argument(
"--data_path",
type=str,
default="./dataset/WN18RR",
help="Number of additional processes to load data.",
)
return parser
def prepare_data(self):
"""
Use this method to do things that might write to disk or that need to be done only from a single GPU in distributed settings (so don't set state `self.x = y`).
"""
pass
def setup(self, stage=None):
"""
Split into train, val, test, and set dims.
Should assign `torch Dataset` objects to self.data_train, self.data_val, and optionally self.data_test.
"""
self.data_train = None
self.data_val = None
self.data_test = None
def train_dataloader(self):
return DataLoader(self.data_train, shuffle=True, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.data_val, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True)
def test_dataloader(self):
return DataLoader(self.data_test, shuffle=False, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True)
def get_config(self):
return dict(num_labels=self.num_labels) | 2,731 | 28.06383 | 167 | py |
NeuralKG | NeuralKG-main/src/neuralkg/data/DataPreprocess.py | import numpy as np
from torch.utils.data import Dataset
import torch
import os
from collections import defaultdict as ddict
from IPython import embed
class KGData(object):
"""Data preprocessing of kg data.
Attributes:
args: Some pre-set parameters, such as dataset path, etc.
ent2id: Encoding the entity in triples, type: dict.
rel2id: Encoding the relation in triples, type: dict.
id2ent: Decoding the entity in triples, type: dict.
id2rel: Decoding the realtion in triples, type: dict.
train_triples: Record the triples for training, type: list.
valid_triples: Record the triples for validation, type: list.
test_triples: Record the triples for testing, type: list.
all_true_triples: Record all triples including train,valid and test, type: list.
TrainTriples
Relation2Tuple
RelSub2Obj
hr2t_train: Record the tail corresponding to the same head and relation, type: defaultdict(class:set).
rt2h_train: Record the head corresponding to the same tail and relation, type: defaultdict(class:set).
h2rt_train: Record the tail, relation corresponding to the same head, type: defaultdict(class:set).
t2rh_train: Record the head, realtion corresponding to the same tail, type: defaultdict(class:set).
"""
# TODO:把里面的函数再分一分,最基础的部分再初始化的使用调用,其他函数具体情况再调用
def __init__(self, args):
self.args = args
# 基础部分
self.ent2id = {}
self.rel2id = {}
# predictor需要
self.id2ent = {}
self.id2rel = {}
# 存放三元组的id
self.train_triples = []
self.valid_triples = []
self.test_triples = []
self.all_true_triples = set()
# grounding 使用
self.TrainTriples = {}
self.Relation2Tuple = {}
self.RelSub2Obj = {}
self.hr2t_train = ddict(set)
self.rt2h_train = ddict(set)
self.h2rt_train = ddict(set)
self.t2rh_train = ddict(set)
self.get_id()
self.get_triples_id()
if args.use_weight:
self.count = self.count_frequency(self.train_triples)
def get_id(self):
"""Get entity/relation id, and entity/relation number.
Update:
self.ent2id: Entity to id.
self.rel2id: Relation to id.
self.id2ent: id to Entity.
self.id2rel: id to Relation.
self.args.num_ent: Entity number.
self.args.num_rel: Relation number.
"""
with open(os.path.join(self.args.data_path, "entities.dict")) as fin:
for line in fin:
eid, entity = line.strip().split("\t")
self.ent2id[entity] = int(eid)
self.id2ent[int(eid)] = entity
with open(os.path.join(self.args.data_path, "relations.dict")) as fin:
for line in fin:
rid, relation = line.strip().split("\t")
self.rel2id[relation] = int(rid)
self.id2rel[int(rid)] = relation
self.args.num_ent = len(self.ent2id)
self.args.num_rel = len(self.rel2id)
def get_triples_id(self):
"""Get triples id, save in the format of (h, r, t).
Update:
self.train_triples: Train dataset triples id.
self.valid_triples: Valid dataset triples id.
self.test_triples: Test dataset triples id.
"""
with open(os.path.join(self.args.data_path, "train.txt")) as f:
for line in f.readlines():
h, r, t = line.strip().split()
self.train_triples.append(
(self.ent2id[h], self.rel2id[r], self.ent2id[t])
)
tmp = str(self.ent2id[h]) + '\t' + str(self.rel2id[r]) + '\t' + str(self.ent2id[t])
self.TrainTriples[tmp] = True
iRelationID = self.rel2id[r]
strValue = str(h) + "#" + str(t)
if not iRelationID in self.Relation2Tuple:
tmpLst = []
tmpLst.append(strValue)
self.Relation2Tuple[iRelationID] = tmpLst
else:
self.Relation2Tuple[iRelationID].append(strValue)
iRelationID = self.rel2id[r]
iSubjectID = self.ent2id[h]
iObjectID = self.ent2id[t]
tmpMap = {}
tmpMap_in = {}
if not iRelationID in self.RelSub2Obj:
if not iSubjectID in tmpMap:
tmpMap_in.clear()
tmpMap_in[iObjectID] = True
tmpMap[iSubjectID] = tmpMap_in
else:
tmpMap[iSubjectID][iObjectID] = True
self.RelSub2Obj[iRelationID] = tmpMap
else:
tmpMap = self.RelSub2Obj[iRelationID]
if not iSubjectID in tmpMap:
tmpMap_in.clear()
tmpMap_in[iObjectID] = True
tmpMap[iSubjectID] = tmpMap_in
else:
tmpMap[iSubjectID][iObjectID] = True
self.RelSub2Obj[iRelationID] = tmpMap # 是不是应该要加?
with open(os.path.join(self.args.data_path, "valid.txt")) as f:
for line in f.readlines():
h, r, t = line.strip().split()
self.valid_triples.append(
(self.ent2id[h], self.rel2id[r], self.ent2id[t])
)
with open(os.path.join(self.args.data_path, "test.txt")) as f:
for line in f.readlines():
h, r, t = line.strip().split()
self.test_triples.append(
(self.ent2id[h], self.rel2id[r], self.ent2id[t])
)
self.all_true_triples = set(
self.train_triples + self.valid_triples + self.test_triples
)
def get_hr2t_rt2h_from_train(self):
"""Get the set of hr2t and rt2h from train dataset, the data type is numpy.
Update:
self.hr2t_train: The set of hr2t.
self.rt2h_train: The set of rt2h.
"""
for h, r, t in self.train_triples:
self.hr2t_train[(h, r)].add(t)
self.rt2h_train[(r, t)].add(h)
for h, r in self.hr2t_train:
self.hr2t_train[(h, r)] = np.array(list(self.hr2t_train[(h, r)]))
for r, t in self.rt2h_train:
self.rt2h_train[(r, t)] = np.array(list(self.rt2h_train[(r, t)]))
@staticmethod
def count_frequency(triples, start=4):
'''Get frequency of a partial triple like (head, relation) or (relation, tail).
The frequency will be used for subsampling like word2vec.
Args:
triples: Sampled triples.
start: Initial count number.
Returns:
count: Record the number of (head, relation).
'''
count = {}
for head, relation, tail in triples:
if (head, relation) not in count:
count[(head, relation)] = start
else:
count[(head, relation)] += 1
if (tail, -relation-1) not in count:
count[(tail, -relation-1)] = start
else:
count[(tail, -relation-1)] += 1
return count
def get_h2rt_t2hr_from_train(self):
"""Get the set of h2rt and t2hr from train dataset, the data type is numpy.
Update:
self.h2rt_train: The set of h2rt.
self.t2rh_train: The set of t2hr.
"""
for h, r, t in self.train_triples:
self.h2rt_train[h].add((r, t))
self.t2rh_train[t].add((r, h))
for h in self.h2rt_train:
self.h2rt_train[h] = np.array(list(self.h2rt_train[h]))
for t in self.t2rh_train:
self.t2rh_train[t] = np.array(list(self.t2rh_train[t]))
def get_hr_trian(self):
'''Change the generation mode of batch.
Merging triples which have same head and relation for 1vsN training mode.
Returns:
self.train_triples: The tuple(hr, t) list for training
'''
self.t_triples = self.train_triples
self.train_triples = [ (hr, list(t)) for (hr,t) in self.hr2t_train.items()]
class BaseSampler(KGData):
"""Traditional random sampling mode.
"""
def __init__(self, args):
super().__init__(args)
self.get_hr2t_rt2h_from_train()
def corrupt_head(self, t, r, num_max=1):
"""Negative sampling of head entities.
Args:
t: Tail entity in triple.
r: Relation in triple.
num_max: The maximum of negative samples generated
Returns:
neg: The negative sample of head entity filtering out the positive head entity.
"""
tmp = torch.randint(low=0, high=self.args.num_ent, size=(num_max,)).numpy()
if not self.args.filter_flag:
return tmp
mask = np.in1d(tmp, self.rt2h_train[(r, t)], assume_unique=True, invert=True)
neg = tmp[mask]
return neg
def corrupt_tail(self, h, r, num_max=1):
"""Negative sampling of tail entities.
Args:
h: Head entity in triple.
r: Relation in triple.
num_max: The maximum of negative samples generated
Returns:
neg: The negative sample of tail entity filtering out the positive tail entity.
"""
tmp = torch.randint(low=0, high=self.args.num_ent, size=(num_max,)).numpy()
if not self.args.filter_flag:
return tmp
mask = np.in1d(tmp, self.hr2t_train[(h, r)], assume_unique=True, invert=True)
neg = tmp[mask]
return neg
def head_batch(self, h, r, t, neg_size=None):
"""Negative sampling of head entities.
Args:
h: Head entity in triple
t: Tail entity in triple.
r: Relation in triple.
neg_size: The size of negative samples.
Returns:
The negative sample of head entity. [neg_size]
"""
neg_list = []
neg_cur_size = 0
while neg_cur_size < neg_size:
neg_tmp = self.corrupt_head(t, r, num_max=(neg_size - neg_cur_size) * 2)
neg_list.append(neg_tmp)
neg_cur_size += len(neg_tmp)
return np.concatenate(neg_list)[:neg_size]
def tail_batch(self, h, r, t, neg_size=None):
"""Negative sampling of tail entities.
Args:
h: Head entity in triple
t: Tail entity in triple.
r: Relation in triple.
neg_size: The size of negative samples.
Returns:
The negative sample of tail entity. [neg_size]
"""
neg_list = []
neg_cur_size = 0
while neg_cur_size < neg_size:
neg_tmp = self.corrupt_tail(h, r, num_max=(neg_size - neg_cur_size) * 2)
neg_list.append(neg_tmp)
neg_cur_size += len(neg_tmp)
return np.concatenate(neg_list)[:neg_size]
def get_train(self):
return self.train_triples
def get_valid(self):
return self.valid_triples
def get_test(self):
return self.test_triples
def get_all_true_triples(self):
return self.all_true_triples
class RevSampler(KGData):
"""Adding reverse triples in traditional random sampling mode.
For each triple (h, r, t), generate the reverse triple (t, r`, h).
r` = r + num_rel.
Attributes:
hr2t_train: Record the tail corresponding to the same head and relation, type: defaultdict(class:set).
rt2h_train: Record the head corresponding to the same tail and relation, type: defaultdict(class:set).
"""
def __init__(self, args):
super().__init__(args)
self.hr2t_train = ddict(set)
self.rt2h_train = ddict(set)
self.add_reverse_relation()
self.add_reverse_triples()
self.get_hr2t_rt2h_from_train()
def add_reverse_relation(self):
"""Get entity/relation/reverse relation id, and entity/relation number.
Update:
self.ent2id: Entity id.
self.rel2id: Relation id.
self.args.num_ent: Entity number.
self.args.num_rel: Relation number.
"""
with open(os.path.join(self.args.data_path, "relations.dict")) as fin:
len_rel2id = len(self.rel2id)
for line in fin:
rid, relation = line.strip().split("\t")
self.rel2id[relation + "_reverse"] = int(rid) + len_rel2id
self.id2rel[int(rid) + len_rel2id] = relation + "_reverse"
self.args.num_rel = len(self.rel2id)
def add_reverse_triples(self):
"""Generate reverse triples (t, r`, h).
Update:
self.train_triples: Triples for training.
self.valid_triples: Triples for validation.
self.test_triples: Triples for testing.
self.all_ture_triples: All triples including train, valid and test.
"""
with open(os.path.join(self.args.data_path, "train.txt")) as f:
for line in f.readlines():
h, r, t = line.strip().split()
self.train_triples.append(
(self.ent2id[t], self.rel2id[r + "_reverse"], self.ent2id[h])
)
with open(os.path.join(self.args.data_path, "valid.txt")) as f:
for line in f.readlines():
h, r, t = line.strip().split()
self.valid_triples.append(
(self.ent2id[t], self.rel2id[r + "_reverse"], self.ent2id[h])
)
with open(os.path.join(self.args.data_path, "test.txt")) as f:
for line in f.readlines():
h, r, t = line.strip().split()
self.test_triples.append(
(self.ent2id[t], self.rel2id[r + "_reverse"], self.ent2id[h])
)
self.all_true_triples = set(
self.train_triples + self.valid_triples + self.test_triples
)
def get_train(self):
return self.train_triples
def get_valid(self):
return self.valid_triples
def get_test(self):
return self.test_triples
def get_all_true_triples(self):
return self.all_true_triples
def corrupt_head(self, t, r, num_max=1):
"""Negative sampling of head entities.
Args:
t: Tail entity in triple.
r: Relation in triple.
num_max: The maximum of negative samples generated
Returns:
neg: The negative sample of head entity filtering out the positive head entity.
"""
tmp = torch.randint(low=0, high=self.args.num_ent, size=(num_max,)).numpy()
if not self.args.filter_flag:
return tmp
mask = np.in1d(tmp, self.rt2h_train[(r, t)], assume_unique=True, invert=True)
neg = tmp[mask]
return neg
def corrupt_tail(self, h, r, num_max=1):
"""Negative sampling of tail entities.
Args:
h: Head entity in triple.
r: Relation in triple.
num_max: The maximum of negative samples generated
Returns:
neg: The negative sample of tail entity filtering out the positive tail entity.
"""
tmp = torch.randint(low=0, high=self.args.num_ent, size=(num_max,)).numpy()
if not self.args.filter_flag:
return tmp
mask = np.in1d(tmp, self.hr2t_train[(h, r)], assume_unique=True, invert=True)
neg = tmp[mask]
return neg
def head_batch(self, h, r, t, neg_size=None):
"""Negative sampling of head entities.
Args:
h: Head entity in triple
t: Tail entity in triple.
r: Relation in triple.
neg_size: The size of negative samples.
Returns:
The negative sample of head entity. [neg_size]
"""
neg_list = []
neg_cur_size = 0
while neg_cur_size < neg_size:
neg_tmp = self.corrupt_head(t, r, num_max=(neg_size - neg_cur_size) * 2)
neg_list.append(neg_tmp)
neg_cur_size += len(neg_tmp)
return np.concatenate(neg_list)[:neg_size]
def tail_batch(self, h, r, t, neg_size=None):
"""Negative sampling of tail entities.
Args:
h: Head entity in triple
t: Tail entity in triple.
r: Relation in triple.
neg_size: The size of negative samples.
Returns:
The negative sample of tail entity. [neg_size]
"""
neg_list = []
neg_cur_size = 0
while neg_cur_size < neg_size:
neg_tmp = self.corrupt_tail(h, r, num_max=(neg_size - neg_cur_size) * 2)
neg_list.append(neg_tmp)
neg_cur_size += len(neg_tmp)
return np.concatenate(neg_list)[:neg_size] | 17,102 | 34.930672 | 110 | py |
NeuralKG | NeuralKG-main/src/neuralkg/data/Sampler.py | from numpy.random.mtrand import normal
import torch
import numpy as np
from torch.utils.data import Dataset
from collections import defaultdict as ddict
import random
from .DataPreprocess import *
from IPython import embed
import dgl
import torch.nn.functional as F
import time
import queue
from os.path import join
import math
class UniSampler(BaseSampler):
"""Random negative sampling
Filtering out positive samples and selecting some samples randomly as negative samples.
Attributes:
cross_sampling_flag: The flag of cross sampling head and tail negative samples.
"""
def __init__(self, args):
super().__init__(args)
self.cross_sampling_flag = 0
def sampling(self, data):
"""Filtering out positive samples and selecting some samples randomly as negative samples.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The training data.
"""
batch_data = {}
neg_ent_sample = []
subsampling_weight = []
self.cross_sampling_flag = 1 - self.cross_sampling_flag
if self.cross_sampling_flag == 0:
batch_data['mode'] = "head-batch"
for h, r, t in data:
neg_head = self.head_batch(h, r, t, self.args.num_neg)
neg_ent_sample.append(neg_head)
if self.args.use_weight:
weight = self.count[(h, r)] + self.count[(t, -r-1)]
subsampling_weight.append(weight)
else:
batch_data['mode'] = "tail-batch"
for h, r, t in data:
neg_tail = self.tail_batch(h, r, t, self.args.num_neg)
neg_ent_sample.append(neg_tail)
if self.args.use_weight:
weight = self.count[(h, r)] + self.count[(t, -r-1)]
subsampling_weight.append(weight)
batch_data["positive_sample"] = torch.LongTensor(np.array(data))
batch_data['negative_sample'] = torch.LongTensor(np.array(neg_ent_sample))
if self.args.use_weight:
batch_data["subsampling_weight"] = torch.sqrt(1/torch.tensor(subsampling_weight))
return batch_data
def uni_sampling(self, data):
batch_data = {}
neg_head_list = []
neg_tail_list = []
for h, r, t in data:
neg_head = self.head_batch(h, r, t, self.args.num_neg)
neg_head_list.append(neg_head)
neg_tail = self.tail_batch(h, r, t, self.args.num_neg)
neg_tail_list.append(neg_tail)
batch_data["positive_sample"] = torch.LongTensor(np.array(data))
batch_data['negative_head'] = torch.LongTensor(np.arrary(neg_head_list))
batch_data['negative_tail'] = torch.LongTensor(np.arrary(neg_tail_list))
return batch_data
def get_sampling_keys(self):
return ['positive_sample', 'negative_sample', 'mode']
class BernSampler(BaseSampler):
"""Using bernoulli distribution to select whether to replace the head entity or tail entity.
Attributes:
lef_mean: Record the mean of head entity
rig_mean: Record the mean of tail entity
"""
def __init__(self, args):
super().__init__(args)
self.lef_mean, self.rig_mean = self.calc_bern()
def __normal_batch(self, h, r, t, neg_size):
"""Generate replace head/tail list according to Bernoulli distribution.
Args:
h: The head of triples.
r: The relation of triples.
t: The tail of triples.
neg_size: The number of negative samples corresponding to each triple
Returns:
numpy.array: replace head list and replace tail list.
"""
neg_size_h = 0
neg_size_t = 0
prob = self.rig_mean[r] / (self.rig_mean[r] + self.lef_mean[r])
for i in range(neg_size):
if random.random() > prob:
neg_size_h += 1
else:
neg_size_t += 1
res = []
neg_list_h = []
neg_cur_size = 0
while neg_cur_size < neg_size_h:
neg_tmp_h = self.corrupt_head(t, r, num_max=(neg_size_h - neg_cur_size) * 2)
neg_list_h.append(neg_tmp_h)
neg_cur_size += len(neg_tmp_h)
if neg_list_h != []:
neg_list_h = np.concatenate(neg_list_h)
for hh in neg_list_h[:neg_size_h]:
res.append((hh, r, t))
neg_list_t = []
neg_cur_size = 0
while neg_cur_size < neg_size_t:
neg_tmp_t = self.corrupt_tail(h, r, num_max=(neg_size_t - neg_cur_size) * 2)
neg_list_t.append(neg_tmp_t)
neg_cur_size += len(neg_tmp_t)
if neg_list_t != []:
neg_list_t = np.concatenate(neg_list_t)
for tt in neg_list_t[:neg_size_t]:
res.append((h, r, tt))
return res
def sampling(self, data):
"""Using bernoulli distribution to select whether to replace the head entity or tail entity.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The training data.
"""
batch_data = {}
neg_ent_sample = []
batch_data['mode'] = 'bern'
for h, r, t in data:
neg_ent = self.__normal_batch(h, r, t, self.args.num_neg)
neg_ent_sample += neg_ent
batch_data["positive_sample"] = torch.LongTensor(np.array(data))
batch_data["negative_sample"] = torch.LongTensor(np.array(neg_ent_sample))
return batch_data
def calc_bern(self):
"""Calculating the lef_mean and rig_mean.
Returns:
lef_mean: Record the mean of head entity.
rig_mean: Record the mean of tail entity.
"""
h_of_r = ddict(set)
t_of_r = ddict(set)
freqRel = ddict(float)
lef_mean = ddict(float)
rig_mean = ddict(float)
for h, r, t in self.train_triples:
freqRel[r] += 1.0
h_of_r[r].add(h)
t_of_r[r].add(t)
for r in h_of_r:
lef_mean[r] = freqRel[r] / len(h_of_r[r])
rig_mean[r] = freqRel[r] / len(t_of_r[r])
return lef_mean, rig_mean
@staticmethod
def sampling_keys():
return ['positive_sample', 'negative_sample', 'mode']
class AdvSampler(BaseSampler):
"""Self-adversarial negative sampling, in math:
p\left(h_{j}^{\prime}, r, t_{j}^{\prime} \mid\left\{\left(h_{i}, r_{i}, t_{i}\right)\right\}\right)=\frac{\exp \alpha f_{r}\left(\mathbf{h}_{j}^{\prime}, \mathbf{t}_{j}^{\prime}\right)}{\sum_{i} \exp \alpha f_{r}\left(\mathbf{h}_{i}^{\prime}, \mathbf{t}_{i}^{\prime}\right)}
Attributes:
freq_hr: The count of (h, r) pairs.
freq_tr: The count of (t, r) pairs.
"""
def __init__(self, args):
super().__init__(args)
self.freq_hr, self.freq_tr = self.calc_freq()
def sampling(self, pos_sample):
"""Self-adversarial negative sampling.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The training data.
"""
data = pos_sample.numpy().tolist()
adv_sampling = []
for h, r, t in data:
weight = self.freq_hr[(h, r)] + self.freq_tr[(t, r)]
adv_sampling.append(weight)
adv_sampling = torch.tensor(adv_sampling, dtype=torch.float32).cuda()
adv_sampling = torch.sqrt(1 / adv_sampling)
return adv_sampling
def calc_freq(self):
"""Calculating the freq_hr and freq_tr.
Returns:
freq_hr: The count of (h, r) pairs.
freq_tr: The count of (t, r) pairs.
"""
freq_hr, freq_tr = {}, {}
for h, r, t in self.train_triples:
if (h, r) not in freq_hr:
freq_hr[(h, r)] = self.args.freq_init
else:
freq_hr[(h, r)] += 1
if (t, r) not in freq_tr:
freq_tr[(t, r)] = self.args.freq_init
else:
freq_tr[(t, r)] += 1
return freq_hr, freq_tr
class AllSampler(RevSampler):
"""Merging triples which have same head and relation, all false tail entities are taken as negative samples.
"""
def __init__(self, args):
super().__init__(args)
# self.num_rel_without_rev = self.args.num_rel // 2
def sampling(self, data):
"""Randomly sampling from the merged triples.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The training data.
"""
# sample_id = [] #确定triple里的relation是否是reverse的。reverse为1,不是为0
batch_data = {}
table = torch.zeros(len(data), self.args.num_ent)
for id, (h, r, _) in enumerate(data):
hr_sample = self.hr2t_train[(h, r)]
table[id][hr_sample] = 1
# if r > self.num_rel_without_rev:
# sample_id.append(1)
# else:
# sample_id.append(0)
batch_data["sample"] = torch.LongTensor(np.array(data))
batch_data["label"] = table.float()
# batch_data["sample_id"] = torch.LongTensor(sample_id)
return batch_data
def sampling_keys(self):
return ["sample", "label"]
class CrossESampler(BaseSampler):
# TODO:类名还需要商榷下
def __init__(self, args):
super().__init__(args)
self.neg_weight = float(self.args.neg_weight / self.args.num_ent)
def sampling(self, data):
'''一个样本同时做head/tail prediction'''
batch_data = {}
hr_label = self.init_label(len(data))
tr_label = self.init_label(len(data))
for id, (h, r, t) in enumerate(data):
hr_sample = self.hr2t_train[(h, r)]
hr_label[id][hr_sample] = 1.0
tr_sample = self.rt2h_train[(r, t)]
tr_label[id][tr_sample] = 1.0
batch_data["sample"] = torch.LongTensor(data)
batch_data["hr_label"] = hr_label.float()
batch_data["tr_label"] = tr_label.float()
return batch_data
def init_label(self, row):
label = torch.rand(row, self.args.num_ent)
label = (label > self.neg_weight).float()
label -= 1.0
return label
def sampling_keys(self):
return ["sample", "label"]
class ConvSampler(RevSampler): #TODO:SEGNN
"""Merging triples which have same head and relation, all false tail entities are taken as negative samples.
The triples which have same head and relation are treated as one triple.
Attributes:
label: Mask the false tail as negative samples.
triples: The triples used to be sampled.
"""
def __init__(self, args):
self.label = None
self.triples = None
super().__init__(args)
super().get_hr_trian()
def sampling(self, pos_hr_t):
"""Randomly sampling from the merged triples.
Args:
pos_hr_t: The triples ((head,relation) pairs) used to be sampled.
Returns:
batch_data: The training data.
"""
batch_data = {}
t_triples = []
self.label = torch.zeros(self.args.train_bs, self.args.num_ent)
self.triples = torch.LongTensor([hr for hr , _ in pos_hr_t])
for hr, t in pos_hr_t:
t_triples.append(t)
for id, hr_sample in enumerate([t for _ ,t in pos_hr_t]):
self.label[id][hr_sample] = 1
batch_data["sample"] = self.triples
batch_data["label"] = self.label
batch_data["t_triples"] = t_triples
return batch_data
def sampling_keys(self):
return ["sample", "label", "t_triples"]
class XTransESampler(RevSampler):
"""Random negative sampling and recording neighbor entities.
Attributes:
triples: The triples used to be sampled.
neg_sample: The negative samples.
h_neighbor: The neighbor of sampled entites.
h_mask: The tag of effecitve neighbor.
max_neighbor: The maximum of the neighbor entities.
"""
def __init__(self, args):
super().__init__(args)
super().get_h2rt_t2hr_from_train()
self.triples = None
self.neg_sample = None
self.h_neighbor = None
self.h_mask = None
self.max_neighbor = 200
def sampling(self, data):
"""Random negative sampling and recording neighbor entities.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The training data.
"""
batch_data = {}
neg_ent_sample = []
mask = np.zeros([self.args.train_bs, 20000], dtype=float)
h_neighbor = np.zeros([self.args.train_bs, 20000, 2])
for id, triples in enumerate(data):
h,r,t = triples
num_h_neighbor = len(self.h2rt_train[h])
h_neighbor[id][0:num_h_neighbor] = np.array(self.h2rt_train[h])
mask[id][0:num_h_neighbor] = np.ones([num_h_neighbor])
neg_tail = self.tail_batch(h, r, t, self.args.num_neg)
neg_ent_sample.append(neg_tail)
self.triples = data
self.neg_sample = neg_ent_sample
self.h_neighbor = h_neighbor[:, :self.max_neighbor]
self.h_mask = mask[:, :self.max_neighbor]
batch_data["positive_sample"] = torch.LongTensor(self.triples)
batch_data['negative_sample'] = torch.LongTensor(self.neg_sample)
batch_data['neighbor'] = torch.LongTensor(self.h_neighbor)
batch_data['mask'] = torch.LongTensor(self.h_mask)
batch_data['mode'] = "tail-batch"
return batch_data
def get_sampling_keys(self):
return ['positive_sample', 'negative_sample', 'neighbor', 'mask', 'mode']
class GraphSampler(RevSampler):
"""Graph based sampling in neural network.
Attributes:
entity: The entities of sampled triples.
relation: The relation of sampled triples.
triples: The sampled triples.
graph: The graph structured sampled triples by dgl.graph in DGL.
norm: The edge norm in graph.
label: Mask the false tail as negative samples.
"""
def __init__(self, args):
super().__init__(args)
self.entity = None
self.relation = None
self.triples = None
self.graph = None
self.norm = None
self.label = None
def sampling(self, pos_triples):
"""Graph based sampling in neural network.
Args:
pos_triples: The triples used to be sampled.
Returns:
batch_data: The training data.
"""
batch_data = {}
pos_triples = np.array(pos_triples)
pos_triples, self.entity = self.sampling_positive(pos_triples)
head_triples = self.sampling_negative('head', pos_triples, self.args.num_neg)
tail_triples = self.sampling_negative('tail', pos_triples, self.args.num_neg)
self.triples = np.concatenate((pos_triples,head_triples,tail_triples))
batch_data['entity'] = self.entity
batch_data['triples'] = self.triples
self.label = torch.zeros((len(self.triples),1))
self.label[0 : self.args.train_bs] = 1
batch_data['label'] = self.label
split_size = int(self.args.train_bs * 0.5)
graph_split_ids = np.random.choice(
self.args.train_bs,
size=split_size,
replace=False
)
head,rela,tail = pos_triples.transpose()
head = torch.tensor(head[graph_split_ids], dtype=torch.long).contiguous()
rela = torch.tensor(rela[graph_split_ids], dtype=torch.long).contiguous()
tail = torch.tensor(tail[graph_split_ids], dtype=torch.long).contiguous()
self.graph, self.relation, self.norm = self.build_graph(len(self.entity), (head,rela,tail), -1)
batch_data['graph'] = self.graph
batch_data['relation'] = self.relation
batch_data['norm'] = self.norm
return batch_data
def get_sampling_keys(self):
return ['graph','triples','label','entity','relation','norm']
def sampling_negative(self, mode, pos_triples, num_neg):
"""Random negative sampling without filtering
Args:
mode: The mode of negtive sampling.
pos_triples: The positive triples.
num_neg: The number of negative samples corresponding to each triple.
Results:
neg_samples: The negative triples.
"""
neg_random = np.random.choice(
len(self.entity),
size = num_neg * len(pos_triples)
)
neg_samples = np.tile(pos_triples, (num_neg, 1))
if mode == 'head':
neg_samples[:,0] = neg_random
elif mode == 'tail':
neg_samples[:,2] = neg_random
return neg_samples
def build_graph(self, num_ent, triples, power):
"""Using sampled triples to build a graph by dgl.graph in DGL.
Args:
num_ent: The number of entities.
triples: The positive sampled triples.
power: The power index for normalization.
Returns:
rela: The relation of sampled triples.
graph: The graph structured sampled triples by dgl.graph in DGL.
edge_norm: The edge norm in graph.
"""
head, rela, tail = triples[0], triples[1], triples[2]
graph = dgl.graph(([], []))
graph.add_nodes(num_ent)
graph.add_edges(head, tail)
node_norm = self.comp_deg_norm(graph, power)
edge_norm = self.node_norm_to_edge_norm(graph,node_norm)
rela = torch.tensor(rela)
return graph, rela, edge_norm
def comp_deg_norm(self, graph, power=-1):
"""Calculating the normalization node weight.
Args:
graph: The graph structured sampled triples by dgl.graph in DGL.
power: The power index for normalization.
Returns:
tensor: The node weight of normalization.
"""
graph = graph.local_var()
in_deg = graph.in_degrees(range(graph.number_of_nodes())).float().numpy()
norm = in_deg.__pow__(power)
norm[np.isinf(norm)] = 0
return torch.from_numpy(norm)
def node_norm_to_edge_norm(slef, graph, node_norm):
"""Calculating the normalization edge weight.
Args:
graph: The graph structured sampled triples by dgl.graph in DGL.
node_norm: The node weight of normalization.
Returns:
tensor: The edge weight of normalization.
"""
graph = graph.local_var()
# convert to edge norm
graph.ndata['norm'] = node_norm.view(-1,1)
graph.apply_edges(lambda edges : {'norm' : edges.dst['norm']})
return graph.edata['norm']
def sampling_positive(self,positive_triples):
"""Regenerate positive sampling.
Args:
positive_triples: The positive sampled triples.
Results:
The regenerate triples and entities filter invisible entities.
"""
edges = np.random.choice(
np.arange(len(positive_triples)),
size = self.args.train_bs,
replace=False
)
edges = positive_triples[edges]
head, rela, tail = np.array(edges).transpose()
entity, index = np.unique((head, tail), return_inverse=True)
head, tail = np.reshape(index, (2, -1))
return np.stack((head,rela,tail)).transpose(), \
torch.from_numpy(entity).view(-1,1).long()
class KBATSampler(BaseSampler):
"""Graph based n_hop neighbours in neural network.
Attributes:
n_hop: The graph of n_hop neighbours.
graph: The adjacency graph.
neighbours: The neighbours of sampled triples.
adj_matrix:The triples of sampled.
triples: The sampled triples.
triples_GAT_pos: Positive triples.
triples_GAT_neg: Negative triples.
triples_Con: All triples including positive triples and negative triples.
label: Mask the false tail as negative samples.
"""
def __init__(self, args):
super().__init__(args)
self.n_hop = None
self.graph = None
self.neighbours = None
self.adj_matrix = None
self.entity = None
self.triples_GAT_pos = None
self.triples_GAT_neg = None
self.triples_Con = None
self.label = None
self.get_neighbors()
def sampling(self, pos_triples):
"""Graph based n_hop neighbours in neural network.
Args:
pos_triples: The triples used to be sampled.
Returns:
batch_data: The training data.
"""
batch_data = {}
#--------------------KBAT-Sampler------------------------------------------
self.entity = self.get_unique_entity(pos_triples)
head_triples = self.sam_negative('head', pos_triples, self.args.num_neg)
tail_triples = self.sam_negative('tail', pos_triples, self.args.num_neg)
self.triples_GAT_neg = torch.tensor(np.concatenate((head_triples, tail_triples)))
batch_data['triples_GAT_pos'] = torch.tensor(pos_triples)
batch_data['triples_GAT_neg'] = self.triples_GAT_neg
head, rela, tail = torch.tensor(self.train_triples).t()
self.adj_matrix = (torch.stack((tail, head)), rela)
batch_data['adj_matrix'] = self.adj_matrix
self.n_hop = self.get_batch_nhop_neighbors_all()
batch_data['n_hop'] = self.n_hop
#--------------------ConvKB-Sampler------------------------------------------
head_triples = self.sampling_negative('head', pos_triples, self.args.num_neg)
tail_triples = self.sampling_negative('tail', pos_triples, self.args.num_neg)
self.triples_Con = np.concatenate((pos_triples, head_triples, tail_triples))
self.label = -torch.ones((len(self.triples_Con),1))
self.label[0 : self.args.train_bs] = 1
batch_data['triples_Con'] = self.triples_Con
batch_data['label'] = self.label
return batch_data
def get_sampling_keys(self):
return ['adj_matrix', 'n_hop', 'triples_GAT_pos',
'triples_GAT_neg', 'triples_Con' , 'label']
def bfs(self, graph, source, nbd_size=2):
"""Using depth first search algorithm to generate n_hop neighbor graph.
Args:
graph: The adjacency graph.
source: Head node.
nbd_size: The number of hops.
Returns:
neighbors: N_hop neighbor graph.
"""
visit = {}
distance = {}
parent = {}
distance_lengths = {}
visit[source] = 1
distance[source] = 0
parent[source] = (-1, -1)
q = queue.Queue()
q.put((source, -1))
while(not q.empty()):
top = q.get()
if top[0] in graph.keys():
for target in graph[top[0]].keys():
if(target in visit.keys()):
continue
else:
q.put((target, graph[top[0]][target]))
distance[target] = distance[top[0]] + 1
visit[target] = 1
if distance[target] > 2:
continue
parent[target] = (top[0], graph[top[0]][target]) # 记录父亲节点id和关系id
if distance[target] not in distance_lengths.keys():
distance_lengths[distance[target]] = 1
neighbors = {}
for target in visit.keys():
if(distance[target] != nbd_size):
continue
edges = [-1, parent[target][1]]
relations = []
entities = [target]
temp = target
while(parent[temp] != (-1, -1)):
relations.append(parent[temp][1])
entities.append(parent[temp][0])
temp = parent[temp][0]
if(distance[target] in neighbors.keys()):
neighbors[distance[target]].append(
(tuple(relations), tuple(entities[:-1]))) #删除已知的source 记录前两跳实体及关系
else:
neighbors[distance[target]] = [
(tuple(relations), tuple(entities[:-1]))]
return neighbors
def get_neighbors(self, nbd_size=2):
"""Getting the relation and entity of the source in the n_hop neighborhood.
Args:
nbd_size: The number of hops.
Returns:
self.neighbours: Record the relation and entity of the source in the n_hop neighborhood.
"""
self.graph = {}
for triple in self.train_triples:
head = triple[0]
rela = triple[1]
tail = triple[2]
if(head not in self.graph.keys()):
self.graph[head] = {}
self.graph[head][tail] = rela
else:
self.graph[head][tail] = rela
neighbors = {}
'''
import pickle
print("Opening node_neighbors pickle object")
file = self.args.data_path + "/2hop.pickle"
with open(file, 'rb') as handle:
self.neighbours = pickle.load(handle)
return
'''
start_time = time.time()
print("Start Graph BFS")
for head in self.graph.keys():
temp_neighbors = self.bfs(self.graph, head, nbd_size)
for distance in temp_neighbors.keys():
if(head in neighbors.keys()):
if(distance in neighbors[head].keys()):
neighbors[head][distance].append(
temp_neighbors[distance])
else:
neighbors[head][distance] = temp_neighbors[distance]
else:
neighbors[head] = {}
neighbors[head][distance] = temp_neighbors[distance]
print("Finish BFS, time taken ", time.time() - start_time)
self.neighbours = neighbors
def get_unique_entity(self, triples):
"""Getting the set of entity.
Args:
triples: The sampled triples.
Returns:
numpy.array: The set of entity
"""
train_triples = np.array(triples)
train_entities = np.concatenate((train_triples[:,0], train_triples[:,2]))
return np.unique(train_entities)
def get_batch_nhop_neighbors_all(self, nbd_size=2):
"""Getting n_hop neighbors of all entities in batch.
Args:
nbd_size: The number of hops.
Returns:
The set of n_hop neighbors.
"""
batch_source_triples = []
for source in self.entity:
if source in self.neighbours.keys():
nhop_list = self.neighbours[source][nbd_size]
for i, tup in enumerate(nhop_list):
if(self.args.partial_2hop and i >= 2):
break
batch_source_triples.append([source,
tup[0][-1],
tup[0][0],
tup[1][0]])
n_hop = np.array(batch_source_triples).astype(np.int32)
return torch.autograd.Variable(torch.LongTensor(n_hop))
def sampling_negative(self, mode, pos_triples, num_neg):
"""Random negative sampling.
Args:
mode: The mode of negtive sampling.
pos_triples: The positive triples.
num_neg: The number of negative samples corresponding to each triple.
Results:
neg_samples: The negative triples.
"""
neg_samples = np.tile(pos_triples, (num_neg, 1))
if mode == 'head':
neg_head = []
for h, r, t in pos_triples:
neg_head.append(self.head_batch(h, r, t, num_neg))
neg_samples[:,0] = torch.tensor(neg_head).t().reshape(-1)
elif mode == 'tail':
neg_tail = []
for h, r, t in pos_triples:
neg_tail.append(self.tail_batch(h, r, t, num_neg))
neg_samples[:,2] = torch.tensor(neg_tail).t().reshape(-1)
return neg_samples
def sam_negative(self, mode, pos_triples, num_neg):
"""Random negative sampling without filter.
Args:
mode: The mode of negtive sampling.
pos_triples: The positive triples.
num_neg: The number of negative samples corresponding to each triple.
Results:
neg_samples: The negative triples.
"""
neg_random = np.random.choice(
len(self.entity),
size = num_neg * len(pos_triples)
)
neg_samples = np.tile(pos_triples, (num_neg, 1))
if mode == 'head':
neg_samples[:,0] = neg_random
elif mode == 'tail':
neg_samples[:,2] = neg_random
return neg_samples
class CompGCNSampler(GraphSampler):
"""Graph based sampling in neural network.
Attributes:
relation: The relation of sampled triples.
triples: The sampled triples.
graph: The graph structured sampled triples by dgl.graph in DGL.
norm: The edge norm in graph.
label: Mask the false tail as negative samples.
"""
def __init__(self, args):
super().__init__(args)
self.relation = None
self.triples = None
self.graph = None
self.norm = None
self.label = None
super().get_hr_trian()
self.graph, self.relation, self.norm = \
self.build_graph(self.args.num_ent, np.array(self.t_triples).transpose(), -0.5)
def sampling(self, pos_hr_t):
"""Graph based n_hop neighbours in neural network.
Args:
pos_hr_t: The triples(hr, t) used to be sampled.
Returns:
batch_data: The training data.
"""
batch_data = {}
self.label = torch.zeros(self.args.train_bs, self.args.num_ent)
self.triples = torch.LongTensor([hr for hr , _ in pos_hr_t])
for id, hr_sample in enumerate([t for _ ,t in pos_hr_t]):
self.label[id][hr_sample] = 1
batch_data['sample'] = self.triples
batch_data['label'] = self.label
batch_data['graph'] = self.graph
batch_data['relation'] = self.relation
batch_data['norm'] = self.norm
return batch_data
def get_sampling_keys(self):
return ['sample','label','graph','relation','norm']
def node_norm_to_edge_norm(self, graph, node_norm):
"""Calculating the normalization edge weight.
Args:
graph: The graph structured sampled triples by dgl.graph in DGL.
node_norm: The node weight of normalization.
Returns:
norm: The edge weight of normalization.
"""
graph.ndata['norm'] = node_norm
graph.apply_edges(lambda edges: {'norm': edges.dst['norm'] * edges.src['norm']})
norm = graph.edata.pop('norm').squeeze()
return norm
class TestSampler(object):
"""Sampling triples and recording positive triples for testing.
Attributes:
sampler: The function of training sampler.
hr2t_all: Record the tail corresponding to the same head and relation.
rt2h_all: Record the head corresponding to the same tail and relation.
num_ent: The count of entities.
"""
def __init__(self, sampler):
self.sampler = sampler
self.hr2t_all = ddict(set)
self.rt2h_all = ddict(set)
self.get_hr2t_rt2h_from_all()
self.num_ent = sampler.args.num_ent
def get_hr2t_rt2h_from_all(self):
"""Get the set of hr2t and rt2h from all datasets(train, valid, and test), the data type is tensor.
Update:
self.hr2t_all: The set of hr2t.
self.rt2h_all: The set of rt2h.
"""
self.all_true_triples = self.sampler.get_all_true_triples()
for h, r, t in self.all_true_triples:
self.hr2t_all[(h, r)].add(t)
self.rt2h_all[(r, t)].add(h)
for h, r in self.hr2t_all:
self.hr2t_all[(h, r)] = torch.tensor(list(self.hr2t_all[(h, r)]))
for r, t in self.rt2h_all:
self.rt2h_all[(r, t)] = torch.tensor(list(self.rt2h_all[(r, t)]))
def sampling(self, data):
"""Sampling triples and recording positive triples for testing.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The data used to be evaluated.
"""
batch_data = {}
head_label = torch.zeros(len(data), self.num_ent)
tail_label = torch.zeros(len(data), self.num_ent)
for idx, triple in enumerate(data):
head, rel, tail = triple
head_label[idx][self.rt2h_all[(rel, tail)]] = 1.0
tail_label[idx][self.hr2t_all[(head, rel)]] = 1.0
batch_data["positive_sample"] = torch.tensor(data)
batch_data["head_label"] = head_label
batch_data["tail_label"] = tail_label
return batch_data
def get_sampling_keys(self):
return ["positive_sample", "head_label", "tail_label"]
class GraphTestSampler(object):
"""Sampling graph for testing.
Attributes:
sampler: The function of training sampler.
hr2t_all: Record the tail corresponding to the same head and relation.
rt2h_all: Record the head corresponding to the same tail and relation.
num_ent: The count of entities.
triples: The training triples.
"""
def __init__(self, sampler):
self.sampler = sampler
self.hr2t_all = ddict(set)
self.rt2h_all = ddict(set)
self.get_hr2t_rt2h_from_all()
self.num_ent = sampler.args.num_ent
self.triples = sampler.train_triples
def get_hr2t_rt2h_from_all(self):
"""Get the set of hr2t and rt2h from all datasets(train, valid, and test), the data type is tensor.
Update:
self.hr2t_all: The set of hr2t.
self.rt2h_all: The set of rt2h.
"""
self.all_true_triples = self.sampler.get_all_true_triples()
for h, r, t in self.all_true_triples:
self.hr2t_all[(h, r)].add(t)
self.rt2h_all[(r, t)].add(h)
for h, r in self.hr2t_all:
self.hr2t_all[(h, r)] = torch.tensor(list(self.hr2t_all[(h, r)]))
for r, t in self.rt2h_all:
self.rt2h_all[(r, t)] = torch.tensor(list(self.rt2h_all[(r, t)]))
def sampling(self, data):
"""Sampling graph for testing.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The data used to be evaluated.
"""
batch_data = {}
head_label = torch.zeros(len(data), self.num_ent)
tail_label = torch.zeros(len(data), self.num_ent)
for idx, triple in enumerate(data):
# from IPython import embed;embed();exit()
head, rel, tail = triple
head_label[idx][self.rt2h_all[(rel, tail)]] = 1.0
tail_label[idx][self.hr2t_all[(head, rel)]] = 1.0
batch_data["positive_sample"] = torch.tensor(data)
batch_data["head_label"] = head_label
batch_data["tail_label"] = tail_label
head, rela, tail = np.array(self.triples).transpose()
graph, rela, norm = self.sampler.build_graph(self.num_ent, (head, rela, tail), -1)
batch_data["graph"] = graph
batch_data["rela"] = rela
batch_data["norm"] = norm
batch_data["entity"] = torch.arange(0, self.num_ent, dtype=torch.long).view(-1,1)
return batch_data
def get_sampling_keys(self):
return ["positive_sample", "head_label", "tail_label",\
"graph", "rela", "norm", "entity"]
class CompGCNTestSampler(object):
"""Sampling graph for testing.
Attributes:
sampler: The function of training sampler.
hr2t_all: Record the tail corresponding to the same head and relation.
rt2h_all: Record the head corresponding to the same tail and relation.
num_ent: The count of entities.
triples: The training triples.
"""
def __init__(self, sampler):
self.sampler = sampler
self.hr2t_all = ddict(set)
self.rt2h_all = ddict(set)
self.get_hr2t_rt2h_from_all()
self.num_ent = sampler.args.num_ent
self.triples = sampler.t_triples
def get_hr2t_rt2h_from_all(self):
"""Get the set of hr2t and rt2h from all datasets(train, valid, and test), the data type is tensor.
Update:
self.hr2t_all: The set of hr2t.
self.rt2h_all: The set of rt2h.
"""
self.all_true_triples = self.sampler.get_all_true_triples()
for h, r, t in self.all_true_triples:
self.hr2t_all[(h, r)].add(t)
self.rt2h_all[(r, t)].add(h)
for h, r in self.hr2t_all:
self.hr2t_all[(h, r)] = torch.tensor(list(self.hr2t_all[(h, r)]))
for r, t in self.rt2h_all:
self.rt2h_all[(r, t)] = torch.tensor(list(self.rt2h_all[(r, t)]))
def sampling(self, data):
"""Sampling graph for testing.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The data used to be evaluated.
"""
batch_data = {}
head_label = torch.zeros(len(data), self.num_ent)
tail_label = torch.zeros(len(data), self.num_ent)
for idx, triple in enumerate(data):
# from IPython import embed;embed();exit()
head, rel, tail = triple
head_label[idx][self.rt2h_all[(rel, tail)]] = 1.0
tail_label[idx][self.hr2t_all[(head, rel)]] = 1.0
batch_data["positive_sample"] = torch.tensor(data)
batch_data["head_label"] = head_label
batch_data["tail_label"] = tail_label
graph, relation, norm = \
self.sampler.build_graph(self.num_ent, np.array(self.triples).transpose(), -0.5)
batch_data["graph"] = graph
batch_data["rela"] = relation
batch_data["norm"] = norm
batch_data["entity"] = torch.arange(0, self.num_ent, dtype=torch.long).view(-1,1)
return batch_data
def get_sampling_keys(self):
return ["positive_sample", "head_label", "tail_label",\
"graph", "rela", "norm", "entity"]
class SEGNNTrainProcess(RevSampler):
def __init__(self, args):
super().__init__(args)
self.args = args
self.use_weight = self.args.use_weight
#Parameters when constructing graph
self.src_list = []
self.dst_list = []
self.rel_list = []
self.hr2eid = ddict(list)
self.rt2eid = ddict(list)
self.ent_head = []
self.ent_tail = []
self.rel = []
self.query = []
self.label = []
self.rm_edges = []
self.set_scaling_weight = []
self.hr2t_train_1 = ddict(set)
self.ht2r_train_1 = ddict(set)
self.rt2h_train_1 = ddict(set)
self.get_h2rt_t2hr_from_train()
self.construct_kg()
self.get_sampling()
def get_h2rt_t2hr_from_train(self):
for h, r, t in self.train_triples:
if r <= self.args.num_rel:
self.ent_head.append(h)
self.rel.append(r)
self.ent_tail.append(t)
self.hr2t_train_1[(h, r)].add(t)
self.rt2h_train_1[(r, t)].add(h)
for h, r in self.hr2t_train:
self.hr2t_train_1[(h, r)] = np.array(list(self.hr2t_train[(h, r)]))
for r, t in self.rt2h_train:
self.rt2h_train_1[(r, t)] = np.array(list(self.rt2h_train[(r, t)]))
def __len__(self):
return len(self.label)
def __getitem__(self, item):
h, r, t = self.query[item]
label = self.get_onehot_label(self.label[item])
rm_edges = torch.tensor(self.rm_edges[item], dtype=torch.int64)
rm_num = math.ceil(rm_edges.shape[0] * self.args.rm_rate)
rm_inds = torch.randperm(rm_edges.shape[0])[:rm_num]
rm_edges = rm_edges[rm_inds]
return (h, r, t), label, rm_edges
def get_onehot_label(self, label):
onehot_label = torch.zeros(self.args.num_ent)
onehot_label[label] = 1
if self.args.label_smooth != 0.0:
onehot_label = (1.0 - self.args.label_smooth) * onehot_label + (1.0 / self.args.num_ent)
return onehot_label
def get_sampling(self):
for k, v in self.hr2t_train_1.items():
self.query.append((k[0], k[1], -1))
self.label.append(list(v))
self.rm_edges.append(self.hr2eid[k])
for k, v in self.rt2h_train_1.items():
self.query.append((k[1], k[0] + self.args.num_rel, -1))
self.label.append(list(v))
self.rm_edges.append(self.rt2eid[k])
def construct_kg(self, directed=False):
"""
construct kg.
:param directed: whether add inverse version for each edge, to make a undirected graph.
False when training SE-GNN model, True for comuting SE metrics.
:return:
"""
# eid: record the edge id of queries, for randomly removing some edges when training
eid = 0
for h, t, r in zip(self.ent_head, self.ent_tail, self.rel):
if directed:
self.src_list.extend([h])
self.dst_list.extend([t])
self.rel_list.extend([r])
self.hr2eid[(h, r)].extend([eid])
self.rt2eid[(r, t)].extend([eid])
eid += 1
else:
# include the inverse edges
# inverse rel id: original id + rel num
self.src_list.extend([h, t])
self.dst_list.extend([t, h])
self.rel_list.extend([r, r + self.args.num_rel])
self.hr2eid[(h, r)].extend([eid, eid + 1])
self.rt2eid[(r, t)].extend([eid, eid + 1])
eid += 2
self.src_list, self.dst_list,self.rel_list = torch.tensor(self.src_list), torch.tensor(self.dst_list), torch.tensor(self.rel_list)
class SEGNNTrainSampler(object):
def __init__(self, args):
self.args = args
self.get_train_1 = SEGNNTrainProcess(args)
self.get_valid_1 = SEGNNTrainProcess(args).get_valid()
self.get_test_1 = SEGNNTrainProcess(args).get_test()
def get_train(self):
return self.get_train_1
def get_valid(self):
return self.get_valid_1
def get_test(self):
return self.get_test_1
def sampling(self, data):
src = [d[0][0] for d in data]
rel = [d[0][1] for d in data]
dst = [d[0][2] for d in data]
label = [d[1] for d in data] # list of list
rm_edges = [d[2] for d in data]
src = torch.tensor(src, dtype=torch.int64)
rel = torch.tensor(rel, dtype=torch.int64)
dst = torch.tensor(dst, dtype=torch.int64)
label = torch.stack(label, dim=0)
rm_edges = torch.cat(rm_edges, dim=0)
return (src, rel, dst), label, rm_edges
class SEGNNTestSampler(Dataset):
def __init__(self, sampler):
super().__init__()
self.sampler = sampler
#Parameters when constructing graph
self.hr2t_all = ddict(set)
self.rt2h_all = ddict(set)
self.get_hr2t_rt2h_from_all()
def get_hr2t_rt2h_from_all(self):
"""Get the set of hr2t and rt2h from all datasets(train, valid, and test), the data type is tensor.
Update:
self.hr2t_all: The set of hr2t.
self.rt2h_all: The set of rt2h.
"""
for h, r, t in self.sampler.get_train_1.all_true_triples:
self.hr2t_all[(h, r)].add(t)
# self.rt2h_all[(r, t)].add(h)
for h, r in self.hr2t_all:
self.hr2t_all[(h, r)] = torch.tensor(list(self.hr2t_all[(h, r)]))
# for r, t in self.rt2h_all:
# self.rt2h_all[(r, t)] = torch.tensor(list(self.rt2h_all[(r, t)]))
def sampling(self, data):
"""Sampling triples and recording positive triples for testing.
Args:
data: The triples used to be sampled.
Returns:
batch_data: The data used to be evaluated.
"""
batch_data = {}
head_label = torch.zeros(len(data), self.sampler.args.num_ent)
tail_label = torch.zeros(len(data), self.sampler.args.num_ent)
filter_head = torch.zeros(len(data), self.sampler.args.num_ent)
filter_tail = torch.zeros(len(data), self.sampler.args.num_ent)
for idx, triple in enumerate(data):
head, rel, tail = triple
filter_tail[idx][self.hr2t_all[(head, rel)]] = -float('inf')
filter_tail[idx][tail] = 0
tail_label[idx][self.hr2t_all[(head, rel)]] = 1.0
batch_data["positive_sample"] = torch.tensor(data)
batch_data["filter_tail"] = filter_tail
batch_data["tail_label"] = tail_label
return batch_data
def get_sampling_keys(self):
return ["positive_sample", "filter_tail", "tail_label"]
'''继承torch.Dataset'''
class KGDataset(Dataset):
def __init__(self, triples):
self.triples = triples
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
return self.triples[idx] | 46,126 | 34.757364 | 278 | py |
NeuralKG | NeuralKG-main/src/neuralkg/data/__init__.py | from .Sampler import *
from .KGDataModule import KGDataModule
from .DataPreprocess import *
from .base_data_module import BaseDataModule
from .RuleDataLoader import RuleDataLoader
| 180 | 29.166667 | 44 | py |
NeuralKG | NeuralKG-main/src/neuralkg/data/RuleDataLoader.py | import random
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import os
from collections import defaultdict as ddict
from IPython import embed
class RuleDataset(Dataset):
def __init__(self, args):
self.args = args
self.rule_p, self.rule_q, self.rule_r, self.confidences, self.tripleNum = [], [], [], [], []
with open(os.path.join(args.data_path, 'groudings.txt')) as f:
for line in f.readlines():
token = line.strip().split('\t')
for i in range(len(token)):
token[i] = token[i].strip('(').strip(')')
iUnseenPos = int(token[0])
self.tripleNum.append(iUnseenPos)
iFstHead = int(token[1])
iFstTail = int(token[3])
iFstRelation = int(token[2])
self.rule_p.append([iFstHead, iFstRelation, iFstTail])
iSndHead = int(token[4])
iSndTail = int(token[6])
iSndRelation = int(token[5])
self.rule_q.append([iSndHead, iSndRelation, iSndTail])
if len(token) == 8:
confidence = float(token[7])
self.rule_r.append([0, 0, 0])
else:
confidence = float(token[10])
iTrdHead = int(token[7])
iTrdTail = int(token[9])
iTrdRelation = int(token[8])
self.rule_r.append([iTrdHead, iTrdRelation, iTrdTail])
self.confidences.append(confidence)
self.len = len(self.confidences)
self.rule_p = torch.tensor(self.rule_p).to(self.args.gpu)
self.rule_q = torch.tensor(self.rule_q).to(self.args.gpu)
self.rule_r = torch.tensor(self.rule_r).to(self.args.gpu)
self.confidences = torch.tensor(self.confidences).to(self.args.gpu)
self.tripleNum = torch.tensor(self.tripleNum).to(self.args.gpu)
def __len__(self):
return self.len
def __getitem__(self, idx):
return (self.rule_p[idx], self.rule_q[idx], self.rule_r[idx]), self.confidences[idx], self.tripleNum[idx]
class RuleDataLoader(DataLoader):
def __init__(self, args):
dataset = RuleDataset(args)
super(RuleDataLoader, self).__init__(
dataset=dataset,
batch_size=int(dataset.__len__()/args.num_batches),
shuffle=args.shuffle) | 2,474 | 37.671875 | 113 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/__init__.py | from .KGEModel import *
from .GNNModel import *
from .RuleModel import *
| 73 | 17.5 | 24 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/GNNModel/SEGNN.py | import torch
import torch.nn as nn
import dgl
import dgl.function as fn
from neuralkg import utils
from neuralkg.utils.tools import get_param
from neuralkg.model import ConvE
class SEGNN(nn.Module):
def __init__(self, args):
super(SEGNN, self).__init__()
self.device = torch.device("cuda:0")
self.args = args #得到的全部配置参数
self.dataset = self.args.dataset_name #数据集的名称 WN18RR
self.n_ent = self.args.num_ent #WN18RR数据集实体数量 40943
self.n_rel = self.args.num_rel #关系数量 11
self.emb_dim = self.args.emb_dim
# entity embedding
self.ent_emb = get_param(self.n_ent, self.emb_dim) #初始化实体的embedding
# gnn layer
self.kg_n_layer = self.args.kg_layer #1
# relation SE layer
self.edge_layers = nn.ModuleList([EdgeLayer(self.args) for _ in range(self.kg_n_layer)])
# entity SE layer
self.node_layers = nn.ModuleList([NodeLayer(self.args) for _ in range(self.kg_n_layer)])
# triple SE layer
self.comp_layers = nn.ModuleList([CompLayer(self.args) for _ in range(self.kg_n_layer)])
# relation embedding for aggregation
self.rel_embs = nn.ParameterList([get_param(self.n_rel * 2, self.emb_dim) for _ in range(self.kg_n_layer)])
#parameterList()就是一种和列表、元组之类一样的一种新的数据格式,用于保存神经网络权重及参数。
# relation embedding for prediction
if self.args.pred_rel_w: #true
self.rel_w = get_param(self.emb_dim * self.kg_n_layer, self.emb_dim).to(self.device)
else:
self.pred_rel_emb = get_param(self.n_rel * 2, self.emb_dim)
self.predictor = ConvE(self.args) #(200, 250, 7)
self.ent_drop = nn.Dropout(self.args.ent_drop) #0.2
self.rel_drop = nn.Dropout(self.args.rel_drop) #0
self.act = nn.Tanh()
def forward(self, h_id, r_id, kg):
"""
matching computation between query (h, r) and answer t.
:param h_id: head entity id, (bs, )
:param r_id: relation id, (bs, )
:param kg: aggregation graph
:return: matching score, (bs, n_ent)
"""
# aggregate embedding
kg = kg.to(self.device)
ent_emb, rel_emb = self.aggragate_emb(kg)
head = ent_emb[h_id]
rel = rel_emb[r_id]
# (bs, n_ent)
score = self.predictor.score_func(head, rel, ent_emb) #[256, 40943]
return score
def aggragate_emb(self, kg):
"""
aggregate embedding.
:param kg:
:return:
"""
ent_emb = self.ent_emb
rel_emb_list = []
for edge_layer, node_layer, comp_layer, rel_emb in zip(self.edge_layers, self.node_layers, self.comp_layers, self.rel_embs):
ent_emb, rel_emb = self.ent_drop(ent_emb), self.rel_drop(rel_emb)
ent_emb = ent_emb.to(self.device)
rel_emb = rel_emb.to(self.device)
edge_ent_emb = edge_layer(kg, ent_emb, rel_emb)
node_ent_emb = node_layer(kg, ent_emb)
comp_ent_emb = comp_layer(kg, ent_emb, rel_emb)
ent_emb = ent_emb + edge_ent_emb + node_ent_emb + comp_ent_emb
rel_emb_list.append(rel_emb)
if self.args.pred_rel_w:
pred_rel_emb = torch.cat(rel_emb_list, dim=1).to(self.device)
pred_rel_emb = pred_rel_emb.mm(self.rel_w)
else:
pred_rel_emb = self.pred_rel_emb
return ent_emb, pred_rel_emb
class CompLayer(nn.Module):
def __init__(self, args):
super(CompLayer, self).__init__()
self.device = torch.device("cuda:0")
self.args = args
self.dataset = self.args.dataset_name
self.n_ent = self.args.num_ent
self.n_rel = self.args.num_rel
self.emb_dim = self.args.emb_dim
self.comp_op = self.args.comp_op #'mul'
assert self.comp_op in ['add', 'mul']
self.neigh_w = get_param(self.emb_dim, self.emb_dim).to(self.device)
self.act = nn.Tanh()
if self.args.bn:
self.bn = torch.nn.BatchNorm1d(self.emb_dim).to(self.device)
else:
self.bn = None
def forward(self, kg, ent_emb, rel_emb):
assert kg.number_of_nodes() == ent_emb.shape[0]
assert rel_emb.shape[0] == 2 * self.n_rel
ent_emb = ent_emb.to(self.device)
rel_emb = rel_emb.to(self.device)
kg = kg.to(self.device)
with kg.local_scope():
kg.ndata['emb'] = ent_emb
rel_id = kg.edata['rel_id']
kg.edata['emb'] = rel_emb[rel_id]
# neihgbor entity and relation composition
if self.args.comp_op == 'add':
kg.apply_edges(fn.u_add_e('emb', 'emb', 'comp_emb'))
elif self.args.comp_op == 'mul':
kg.apply_edges(fn.u_mul_e('emb', 'emb', 'comp_emb'))
else:
raise NotImplementedError
# attention
kg.apply_edges(fn.e_dot_v('comp_emb', 'emb', 'norm')) # (n_edge, 1)
kg.edata['norm'] = dgl.ops.edge_softmax(kg, kg.edata['norm'])
# agg
kg.edata['comp_emb'] = kg.edata['comp_emb'] * kg.edata['norm']
kg.update_all(fn.copy_e('comp_emb', 'm'), fn.sum('m', 'neigh'))
neigh_ent_emb = kg.ndata['neigh']
neigh_ent_emb = neigh_ent_emb.mm(self.neigh_w)
if callable(self.bn):
neigh_ent_emb = self.bn(neigh_ent_emb)
neigh_ent_emb = self.act(neigh_ent_emb)
return neigh_ent_emb
class NodeLayer(nn.Module):
def __init__(self, args):
super(NodeLayer, self).__init__()
self.device = torch.device("cuda:0")
self.args = args
self.dataset = self.args.dataset_name
self.n_ent = self.args.num_ent
self.n_rel = self.args.num_rel
self.emb_dim = self.args.emb_dim
self.neigh_w = get_param(self.emb_dim, self.emb_dim).to(self.device)
self.act = nn.Tanh()
if self.args.bn:
self.bn = torch.nn.BatchNorm1d(self.emb_dim).to(self.device)
else:
self.bn = None
def forward(self, kg, ent_emb):
assert kg.number_of_nodes() == ent_emb.shape[0]
kg = kg.to(self.device)
ent_emb = ent_emb.to(self.device)
with kg.local_scope():
kg.ndata['emb'] = ent_emb
# attention
kg.apply_edges(fn.u_dot_v('emb', 'emb', 'norm')) # (n_edge, 1)
kg.edata['norm'] = dgl.ops.edge_softmax(kg, kg.edata['norm'])
# agg
kg.update_all(fn.u_mul_e('emb', 'norm', 'm'), fn.sum('m', 'neigh'))
neigh_ent_emb = kg.ndata['neigh']
neigh_ent_emb = neigh_ent_emb.mm(self.neigh_w)
if callable(self.bn):
neigh_ent_emb = self.bn(neigh_ent_emb)
neigh_ent_emb = self.act(neigh_ent_emb)
return neigh_ent_emb
class EdgeLayer(nn.Module):
def __init__(self, args):
super(EdgeLayer, self).__init__()
self.device = torch.device("cuda:0")
self.args = args
self.dataset = self.args.dataset_name
self.n_ent = self.args.num_ent
self.n_rel = self.args.num_rel
self.emb_dim = self.args.emb_dim
self.neigh_w = utils.get_param(self.emb_dim, self.emb_dim).to(self.device)
self.act = nn.Tanh()
if self.args.bn: # True
self.bn = torch.nn.BatchNorm1d(self.emb_dim).to(self.device)
else:
self.bn = None
def forward(self, kg, ent_emb, rel_emb):
assert kg.number_of_nodes() == ent_emb.shape[0]
assert rel_emb.shape[0] == 2 * self.n_rel
kg = kg.to(self.device)
ent_emb = ent_emb.to(self.device)
rel_emb = rel_emb.to(self.device)
with kg.local_scope():
kg.ndata['emb'] = ent_emb
rel_id = kg.edata['rel_id']
kg.edata['emb'] = rel_emb[rel_id]
# attention
kg.apply_edges(fn.e_dot_v('emb', 'emb', 'norm')) # (n_edge, 1)
kg.edata['norm'] = dgl.ops.edge_softmax(kg, kg.edata['norm'])
# agg
kg.edata['emb'] = kg.edata['emb'] * kg.edata['norm']
kg.update_all(fn.copy_e('emb', 'm'), fn.sum('m', 'neigh'))
neigh_ent_emb = kg.ndata['neigh']
neigh_ent_emb = neigh_ent_emb.mm(self.neigh_w)
if callable(self.bn):
neigh_ent_emb = self.bn(neigh_ent_emb)
neigh_ent_emb = self.act(neigh_ent_emb)
return neigh_ent_emb
| 8,520 | 35.105932 | 132 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/GNNModel/CompGCN.py | import torch
from torch import nn
import dgl
import dgl.function as fn
import torch.nn.functional as F
from neuralkg.model import ConvE
class CompGCN(nn.Module):
"""`Composition-based multi-relational graph convolutional networks`_ (CompGCN),
which jointly embeds both nodes and relations in a relational graph.
Attributes:
args: Model configuration parameters.
.. _Composition-based multi-relational graph convolutional networks:
https://arxiv.org/pdf/1911.03082.pdf
"""
def __init__(self, args):
super(CompGCN, self).__init__()
self.args = args
self.ent_emb = None
self.rel_emb = None
self.GraphCov = None
self.init_model()
def init_model(self):
"""Initialize the CompGCN model and embeddings
Args:
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
GraphCov: The comp graph convolution layers.
conv1: The convolution layer.
fc: The full connection layer.
bn0, bn1, bn2: The batch Normalization layer.
inp_drop, hid_drop, feg_drop: The dropout layer.
"""
#------------------------------CompGCN--------------------------------------------------------------------
self.ent_emb = nn.Parameter(torch.Tensor(self.args.num_ent, self.args.emb_dim))
self.rel_emb = nn.Parameter(torch.Tensor(self.args.num_rel, self.args.emb_dim))
nn.init.xavier_normal_(self.ent_emb, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_normal_(self.rel_emb, gain=nn.init.calculate_gain('relu'))
self.GraphCov = CompGCNCov(self.args.emb_dim, self.args.emb_dim * 2, torch.tanh, \
bias = 'False', drop_rate = 0.1, opn = self.args.opn)
self.bias = nn.Parameter(torch.zeros(self.args.num_ent))
self.drop = nn.Dropout(0.3)
#-----------------------------ConvE-----------------------------------------------------------------------
self.emb_ent = torch.nn.Embedding(self.args.num_ent, self.args.emb_dim*2)
self.inp_drop = torch.nn.Dropout(self.args.inp_drop)
self.hid_drop = torch.nn.Dropout(self.args.hid_drop)
self.feg_drop = torch.nn.Dropout2d(self.args.fet_drop)
self.conv1 = torch.nn.Conv2d(1, 200, (7, 7), 1, 0, bias=False)
self.bn0 = torch.nn.BatchNorm2d(1)
self.bn1 = torch.nn.BatchNorm2d(200)
self.bn2 = torch.nn.BatchNorm1d(200)
self.register_parameter('b', torch.nn.Parameter(torch.zeros(self.args.num_ent)))
self.fc = torch.nn.Linear(39200, self.args.out_dim)
def forward(self, graph, relation, norm, triples):
"""The functions used in the training phase
Args:
graph: The knowledge graph recorded in dgl.graph()
relation: The relation id sampled in triples
norm: The edge norm in graph
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
Returns:
score: The score of triples.
"""
head, rela = triples[:,0], triples[:, 1]
x, r = self.ent_emb, self.rel_emb # embedding of relations
x, r = self.GraphCov(graph, x, r, relation, norm)
x = self.drop(x) # embeddings of entities [num_ent, dim]
head_emb = torch.index_select(x, 0, head) # filter out embeddings of subjects in this batch
#head_in_emb = head_emb.view(-1, 1, 10, 20)
rela_emb = torch.index_select(r, 0, rela) # filter out embeddings of relations in this batch
#rela_in_emb = rela_emb.view(-1, 1, 10, 20)
if self.args.decoder_model.lower() == 'conve':
# score = ConvE.score_func(self, head_in_emb, rela_in_emb, x)
score = self.ConvE(head_emb, rela_emb, x)
elif self.args.decoder_model.lower() == 'distmult':
score = self.DistMult(head_emb, rela_emb)
else:
raise ValueError("please choose decoder (DistMult/ConvE)")
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch['positive_sample']
graph = batch['graph']
relation = batch['rela']
norm = batch['norm']
head, rela = triples[:,0], triples[:, 1]
x, r = self.ent_emb, self.rel_emb # embedding of relations
x, r = self.GraphCov(graph, x, r, relation, norm)
x = self.drop(x) # embeddings of entities [num_ent, dim]
head_emb = torch.index_select(x, 0, head) # filter out embeddings of subjects in this batch
#head_in_emb = head_emb.view(-1, 1, 10, 20)
rela_emb = torch.index_select(r, 0, rela) # filter out embeddings of relations in this batch
#rela_in_emb = rela_emb.view(-1, 1, 10, 20)
if self.args.decoder_model.lower() == 'conve':
# score = ConvE.score_func(self, head_in_emb, rela_in_emb, x)
score = self.ConvE(head_emb, rela_emb, x)
elif self.args.decoder_model.lower() == 'distmult':
score = self.DistMult(head_emb, rela_emb)
else:
raise ValueError("please choose decoder (DistMult/ConvE)")
return score
def DistMult(self, head_emb, rela_emb):
"""Calculating the score of triples with DistMult model."""
obj_emb = head_emb * rela_emb # [batch_size, emb_dim]
x = torch.mm(obj_emb, self.emb_ent.weight.transpose(1, 0)) # [batch_size, ent_num]
x += self.bias.expand_as(x)
score = torch.sigmoid(x)
return score
def concat(self, ent_embed, rel_embed):
ent_embed = ent_embed.view(-1, 1, 200)
rel_embed = rel_embed.view(-1, 1, 200)
stack_input = torch.cat([ent_embed, rel_embed], 1) # [batch_size, 2, embed_dim]
stack_input = stack_input.reshape(-1, 1, 2 * 10, 20) # reshape to 2D [batch, 1, 2*k_h, k_w]
return stack_input
def ConvE(self, sub_emb, rel_emb, all_ent):
"""Calculating the score of triples with ConvE model."""
stack_input = self.concat(sub_emb, rel_emb) # [batch_size, 1, 2*k_h, k_w]
x = self.bn0(stack_input)
x = self.conv1(x) # [batch_size, num_filt, flat_sz_h, flat_sz_w]
x = self.bn1(x)
x = F.relu(x)
x = self.feg_drop(x)
x = x.view(x.shape[0], -1) # [batch_size, flat_sz]
x = self.fc(x) # [batch_size, embed_dim]
x = self.hid_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, all_ent.transpose(1, 0)) # [batch_size, ent_num]
x += self.bias.expand_as(x)
score = torch.sigmoid(x)
return score
class CompGCNCov(nn.Module):
""" The comp graph convolution layers, similar to https://github.com/malllabiisc/CompGCN"""
def __init__(self, in_channels, out_channels, act=lambda x: x, bias=True, drop_rate=0., opn='corr'):
super(CompGCNCov, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.act = act # activation function
self.device = None
self.rel = None
self.opn = opn
# relation-type specific parameter
self.in_w = self.get_param([in_channels, out_channels])
self.out_w = self.get_param([in_channels, out_channels])
self.loop_w = self.get_param([in_channels, out_channels])
self.w_rel = self.get_param([in_channels, out_channels]) # transform embedding of relations to next layer
self.loop_rel = self.get_param([1, in_channels]) # self-loop embedding
self.drop = nn.Dropout(drop_rate)
self.bn = torch.nn.BatchNorm1d(out_channels)
self.bias = nn.Parameter(torch.zeros(out_channels)) if bias else None
self.rel_wt = None
def get_param(self, shape):
param = nn.Parameter(torch.Tensor(*shape))
nn.init.xavier_normal_(param, gain=nn.init.calculate_gain('relu'))
return param
def message_func(self, edges: dgl.udf.EdgeBatch):
edge_type = edges.data['type'] # [E, 1]
edge_num = edge_type.shape[0]
edge_data = self.comp(edges.src['h'], self.rel[edge_type]) # [E, in_channel]
# msg = torch.bmm(edge_data.unsqueeze(1),
# self.w[edge_dir.squeeze()]).squeeze() # [E, 1, in_c] @ [E, in_c, out_c]
# msg = torch.bmm(edge_data.unsqueeze(1),
# self.w.index_select(0, edge_dir.squeeze())).squeeze() # [E, 1, in_c] @ [E, in_c, out_c]
# first half edges are all in-directions, last half edges are out-directions.
msg = torch.cat([torch.matmul(edge_data[:edge_num // 2, :], self.in_w),
torch.matmul(edge_data[edge_num // 2:, :], self.out_w)])
msg = msg * edges.data['norm'].reshape(-1, 1) # [E, D] * [E, 1]
return {'msg': msg}
def reduce_func(self, nodes: dgl.udf.NodeBatch):
return {'h': self.drop(nodes.data['h']) / 3}
def comp(self, h, edge_data):
def com_mult(a, b):
r1, i1 = a.real, a.imag
r2, i2 = b.real, b.imag
real = r1 * r2 - i1 * i2
imag = r1 * i2 + i1 * r2
return torch.complex(real, imag)
def conj(a):
a.imag = -a.imag
return a
def ccorr(a, b):
return torch.fft.irfft(com_mult(conj(torch.fft.rfft(a)), torch.fft.rfft(b)), a.shape[-1])
if self.opn == 'mult':
return h * edge_data
elif self.opn == 'sub':
return h - edge_data
elif self.opn == 'corr':
return ccorr(h, edge_data.expand_as(h))
else:
raise KeyError(f'composition operator {self.opn} not recognized.')
def forward(self, g: dgl.graph, x, rel_repr, edge_type, edge_norm):
self.device = x.device
g = g.local_var()
g.ndata['h'] = x
g.edata['type'] = edge_type
g.edata['norm'] = edge_norm
if self.rel_wt is None:
self.rel = rel_repr
else:
self.rel = torch.mm(self.rel_wt, rel_repr) # [num_rel*2, num_base] @ [num_base, in_c]
g.update_all(self.message_func, fn.sum(msg='msg', out='h'), self.reduce_func)
x = g.ndata.pop('h') + torch.mm(self.comp(x, self.loop_rel), self.loop_w) / 3
if self.bias is not None:
x = x + self.bias
x = self.bn(x)
return self.act(x), torch.matmul(self.rel, self.w_rel) | 10,731 | 41.251969 | 114 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/GNNModel/RGCN.py | import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import RelGraphConv
from neuralkg.model import DistMult
class RGCN(nn.Module):
"""`Modeling Relational Data with Graph Convolutional Networks`_ (RGCN), which use GCN framework to model relation data.
Attributes:
args: Model configuration parameters.
.. _Modeling Relational Data with Graph Convolutional Networks: https://arxiv.org/pdf/1703.06103.pdf
"""
def __init__(self, args):
super(RGCN, self).__init__()
self.args = args
self.ent_emb = None
self.rel_emb = None
self.RGCN = None
self.Loss_emb = None
self.build_model()
def build_model(self):
"""Initialize the RGCN model and embeddings
Args:
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
RGCN: the relation graph convolution model.
"""
self.ent_emb = nn.Embedding(self.args.num_ent,self.args.emb_dim)
self.rel_emb = nn.Parameter(torch.Tensor(self.args.num_rel, self.args.emb_dim))
nn.init.xavier_uniform_(self.rel_emb, gain=nn.init.calculate_gain('relu'))
self.RGCN = nn.ModuleList()
for idx in range(self.args.num_layers):
RGCN_idx = self.build_hidden_layer(idx)
self.RGCN.append(RGCN_idx)
def forward(self, graph, ent, rel, norm, triples, mode='single'):
"""The functions used in the training and testing phase
Args:
graph: The knowledge graph recorded in dgl.graph()
ent: The entitiy ids sampled in triples
rel: The relation ids sampled in triples
norm: The edge norm in graph
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
embedding = self.ent_emb(ent.squeeze())
for layer in self.RGCN:
embedding = layer(graph, embedding, rel, norm)
self.Loss_emb = embedding
head_emb, rela_emb, tail_emb = self.tri2emb(embedding, triples, mode)
score = DistMult.score_func(self,head_emb, rela_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch['positive_sample']
graph = batch['graph']
ent = batch['entity']
rel = batch['rela']
norm = batch['norm']
embedding = self.ent_emb(ent.squeeze())
for layer in self.RGCN:
embedding = layer(graph, embedding, rel, norm)
self.Loss_emb = embedding
head_emb, rela_emb, tail_emb = self.tri2emb(embedding, triples, mode)
score = DistMult.score_func(self,head_emb, rela_emb, tail_emb, mode)
return score
def tri2emb(self, embedding, triples, mode="single"): #TODO:和XTransE合并
"""Get embedding of triples.
This function get the embeddings of head, relation, and tail
respectively. each embedding has three dimensions.
Args:
embedding(tensor): This embedding save the entity embeddings.
triples (tensor): This tensor save triples id, which dimension is
[triples number, 3].
mode (str, optional): This arg indicates that the negative entity
will replace the head or tail entity. when it is 'single', it
means that entity will not be replaced. Defaults to 'single'.
Returns:
head_emb: Head entity embedding.
rela_emb: Relation embedding.
tail_emb: Tail entity embedding.
"""
rela_emb = self.rel_emb[triples[:, 1]].unsqueeze(1) # [bs, 1, dim]
head_emb = embedding[triples[:, 0]].unsqueeze(1) # [bs, 1, dim]
tail_emb = embedding[triples[:, 2]].unsqueeze(1) # [bs, 1, dim]
if mode == "head-batch" or mode == "head_predict":
head_emb = embedding.unsqueeze(0) # [1, num_ent, dim]
elif mode == "tail-batch" or mode == "tail_predict":
tail_emb = embedding.unsqueeze(0) # [1, num_ent, dim]
return head_emb, rela_emb, tail_emb
def build_hidden_layer(self, idx):
"""The functions used to initialize the RGCN model
Args:
idx: it`s used to identify rgcn layers. The last rgcn layer should use
relu as activation function.
Returns:
the relation graph convolution layer
"""
act = F.relu if idx < self.args.num_layers - 1 else None
return RelGraphConv(self.args.emb_dim, self.args.emb_dim, self.args.num_rel, "bdd",
num_bases=100, activation=act, self_loop=True,dropout=0.2 )
| 5,155 | 35.309859 | 124 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/GNNModel/KBAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import time
import os
class KBAT(nn.Module):
"""`Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs`_ (KBAT),
which introduces the attention to aggregate the neighbor node representation.
Attributes:
args: Model configuration parameters.
.. _Learning Attention-based Embeddings for Relation Prediction in Knowledge Graphs:
https://arxiv.org/pdf/1906.01195.pdf
"""
def __init__(self, args):
super(KBAT,self).__init__()
self.args = args
self.entity_embeddings = None
self.relation_embeddings = None
self.init_GAT_emb()
self.init_ConvKB_emb()
def init_GAT_emb(self):
"""Initialize the GAT model and embeddings
Args:
ent_emb_out: Entity embedding, shape:[num_ent, emb_dim].
rel_emb_out: Relation_embedding, shape:[num_rel, emb_dim].
entity_embeddings: The final embedding used in ConvKB.
relation_embeddings: The final embedding used in ConvKB.
attentions, out_att: The graph attention layers.
"""
self.num_ent = self.args.num_ent
self.num_rel = self.args.num_rel
self.emb_dim = self.args.emb_dim
self.ent_emb_out = nn.Parameter(torch.randn(self.num_ent,self.emb_dim))
self.rel_emb_out = nn.Parameter(torch.randn(self.num_rel,self.emb_dim))
self.drop = 0.3
self.alpha = 0.2
self.nheads_GAT = 2
self.out_dim = 100
self.entity_embeddings = nn.Parameter(
torch.randn(self.num_ent, self.out_dim * self.nheads_GAT))
self.relation_embeddings = nn.Parameter(
torch.randn(self.num_rel, self.out_dim * self.nheads_GAT))
self.dropout_layer = nn.Dropout(self.drop)
self.attentions = [GraphAttentionLayer(self.num_ent,
self.emb_dim,
self.out_dim,
self.emb_dim,
dropout=self.drop,
alpha=self.alpha,
concat=True)
for _ in range(self.nheads_GAT)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
# W matrix to convert h_input to h_output dimension 变换矩阵
self.W = nn.Parameter(torch.zeros(
size=(self.emb_dim, self.nheads_GAT * self.out_dim)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.out_att = GraphAttentionLayer(self.num_ent,
self.out_dim * self.nheads_GAT,
self.out_dim * self.nheads_GAT,
self.out_dim * self.nheads_GAT,
dropout=self.drop,
alpha=self.alpha,
concat=False
)
self.W_entities = nn.Parameter(torch.zeros(
size=(self.emb_dim, self.out_dim * self.nheads_GAT)))
nn.init.xavier_uniform_(self.W_entities.data, gain=1.414)
def init_ConvKB_emb(self):
"""Initialize the ConvKB model.
Args:
conv_layer: The convolution layer.
dropout: The dropout layer.
ReLU: Relu activation function.
fc_layer: The full connection layer.
"""
self.conv_layer = nn.Conv2d(1, 50, (1,3))
self.dropout = nn.Dropout(0.3)
self.ReLU = nn.ReLU()
self.fc_layer = nn.Linear(10000, 1)
nn.init.xavier_uniform_(self.fc_layer.weight, gain=1.414)
nn.init.xavier_uniform_(self.conv_layer.weight, gain=1.414)
def forward(self, triples, mode, adj_matrix=None, n_hop=None):
"""The functions used in the training and testing phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
mode: The mode indicates that the model will be used, when it
is 'GAT', it means graph attetion model, when it is 'ConvKB',
it means ConvKB model.
Returns:
score: The score of triples.
"""
if mode == 'GAT': # gat
score = self.forward_GAT(triples, adj_matrix, n_hop)
else:
score = self.forward_Con(triples, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
score = self.forward_Con(triples, mode)
return score
def forward_Con(self, triples, mode):
score = None
if mode == 'ConvKB':
head_emb = self.entity_embeddings[triples[:, 0]].unsqueeze(1)
rela_emb = self.relation_embeddings[triples[:, 1]].unsqueeze(1)
tail_emb = self.entity_embeddings[triples[:, 2]].unsqueeze(1)
score = self.cal_Con_score(head_emb, rela_emb, tail_emb)
elif mode == 'head_predict':
head_emb = self.entity_embeddings.unsqueeze(1) # [1, num_ent, dim]
for triple in triples:
rela_emb = self.relation_embeddings[triple[1]].\
unsqueeze(0).tile(dims=(self.num_ent,1,1))
tail_emb = self.entity_embeddings[triple[2]].\
unsqueeze(0).tile(dims=(self.num_ent,1,1))
s = self.cal_Con_score(head_emb, rela_emb, tail_emb).t()
if score == None:
score = s
else:
score = torch.cat((score, s), dim=0)
elif mode == 'tail_predict':
tail_emb = self.entity_embeddings.unsqueeze(1) # [1, num_ent, dim]
for triple in triples:
head_emb = self.entity_embeddings[triple[0]].\
unsqueeze(0).tile(dims=(self.num_ent,1,1))
rela_emb = self.relation_embeddings[triple[1]].\
unsqueeze(0).tile(dims=(self.num_ent,1,1))
s = self.cal_Con_score(head_emb, rela_emb, tail_emb).t()
if score == None:
score = s
else:
score = torch.cat((score, s), dim=0)
return score
def forward_GAT(self, triples, adj_matrix, n_hop):
edge_list = adj_matrix[0] #边节点
edge_type = adj_matrix[1] #边种类
edge_list_nhop = torch.cat((n_hop[:, 3].unsqueeze(-1),
n_hop[:, 0].unsqueeze(-1)), dim=1).t()
edge_type_nhop = torch.cat([n_hop[:, 1].unsqueeze(-1),
n_hop[:, 2].unsqueeze(-1)], dim=1)
edge_emb = self.rel_emb_out[edge_type]
self.ent_emb_out.data = F.normalize(self.ent_emb_out.data, p=2, dim=1).detach()
edge_embed_nhop = self.rel_emb_out[edge_type_nhop[:, 0]] + \
self.rel_emb_out[edge_type_nhop[:, 1]]
ent_emb_out = torch.cat([att(self.ent_emb_out, edge_list, edge_emb, edge_list_nhop,
edge_embed_nhop) for att in self.attentions], dim=1)
ent_emb_out = self.dropout_layer(ent_emb_out)
rel_emb_out = self.rel_emb_out.mm(self.W)
edge_emb = rel_emb_out[edge_type]
edge_embed_nhop = rel_emb_out[edge_type_nhop[:, 0]] + \
rel_emb_out[edge_type_nhop[:, 1]]
ent_emb_out = F.elu(self.out_att(ent_emb_out, edge_list, edge_emb,
edge_list_nhop, edge_embed_nhop))
mask_indices = torch.unique(triples[:, 2])
mask = torch.zeros(self.ent_emb_out.shape[0]).type_as(self.ent_emb_out)
mask[mask_indices] = 1.0
entities_upgraded = self.ent_emb_out.mm(self.W_entities)
ent_emb_out = entities_upgraded + \
mask.unsqueeze(-1).expand_as(ent_emb_out) * ent_emb_out
ent_emb_out = F.normalize(ent_emb_out, p=2, dim=1)
self.entity_embeddings.data = ent_emb_out.data
self.relation_embeddings.data = rel_emb_out.data
head_emb = ent_emb_out[triples[:, 0]]
rela_emb = rel_emb_out[triples[:, 1]]
tail_emb = ent_emb_out[triples[:, 2]]
return self.cal_GAT_score(head_emb, rela_emb, tail_emb)
def cal_Con_score(self, head_emb, rela_emb, tail_emb):
"""Calculating the score of triples with ConvKB model.
Args:
head_emb: The head entity embedding.
rela_emb: The relation embedding.
tail_emb: The tail entity embedding.
Returns:
score: The score of triples.
"""
conv_input = torch.cat((head_emb, rela_emb, tail_emb), dim=1)
batch_size= conv_input.shape[0]
conv_input = conv_input.transpose(1, 2)
conv_input = conv_input.unsqueeze(1)
out_conv = self.conv_layer(conv_input)
out_conv = self.ReLU(out_conv)
out_conv = self.dropout(out_conv)
out_conv = out_conv.squeeze(-1).view(batch_size, -1)
score = self.fc_layer(out_conv)
return score
def cal_GAT_score(self, head_emb, relation_emb, tail_emb):
"""Calculating the score of triples with TransE model.
Args:
head_emb: The head entity embedding.
rela_emb: The relation embedding.
tail_emb: The tail entity embedding.
Returns:
score: The score of triples.
"""
score = (head_emb + relation_emb) - tail_emb
score = torch.norm(score, p=1, dim=1)
return score
class SpecialSpmmFunctionFinal(torch.autograd.Function):
"""
Special function for only sparse region backpropataion layer, similar to https://arxiv.org/abs/1710.10903
"""
@staticmethod
def forward(ctx, edge, edge_w, N, E, out_features):
a = torch.sparse_coo_tensor(
edge, edge_w, torch.Size([N, N, out_features]))
b = torch.sparse.sum(a, dim=1)
ctx.N = b.shape[0]
ctx.outfeat = b.shape[1]
ctx.E = E
ctx.indices = a._indices()[0, :]
return b.to_dense()
@staticmethod
def backward(ctx, grad_output):
grad_values = None
if ctx.needs_input_grad[1]:
edge_sources = ctx.indices
grad_values = grad_output[edge_sources]
return None, grad_values, None, None, None
class SpecialSpmmFinal(nn.Module):
"""
Special spmm final layer, similar to https://arxiv.org/abs/1710.10903.
"""
def forward(self, edge, edge_w, N, E, out_features):
return SpecialSpmmFunctionFinal.apply(edge, edge_w, N, E, out_features)
class GraphAttentionLayer(nn.Module):
"""
Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903.
"""
def __init__(self, num_nodes, in_features, out_features, nrela_dim, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.num_nodes = num_nodes
self.alpha = alpha
self.concat = concat
self.nrela_dim = nrela_dim
self.a = nn.Parameter(torch.zeros(
size=(out_features, 2 * in_features + nrela_dim)))
nn.init.xavier_normal_(self.a.data, gain=1.414)
self.a_2 = nn.Parameter(torch.zeros(size=(1, out_features)))
nn.init.xavier_normal_(self.a_2.data, gain=1.414)
self.dropout = nn.Dropout(dropout)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.special_spmm_final = SpecialSpmmFinal()
def forward(self, input, edge, edge_embed, edge_list_nhop, edge_embed_nhop):
N = input.size()[0]
# Self-attention on the nodes - Shared attention mechanism
edge = torch.cat((edge[:, :], edge_list_nhop[:, :]), dim=1)
edge_embed = torch.cat(
(edge_embed[:, :], edge_embed_nhop[:, :]), dim=0)
edge_h = torch.cat(
(input[edge[0, :], :], input[edge[1, :], :], edge_embed[:, :]), dim=1).t()
# edge_h: (2*in_dim + nrela_dim) x E
edge_m = self.a.mm(edge_h)
# edge_m: D * E
# to be checked later
powers = -self.leakyrelu(self.a_2.mm(edge_m).squeeze())
edge_e = torch.exp(powers).unsqueeze(1)
assert not torch.isnan(edge_e).any()
# edge_e: E
e_rowsum = self.special_spmm_final(
edge, edge_e, N, edge_e.shape[0], 1)
e_rowsum[e_rowsum == 0.0] = 1e-12
e_rowsum = e_rowsum
# e_rowsum: N x 1
edge_e = edge_e.squeeze(1)
edge_e = self.dropout(edge_e)
# edge_e: E
edge_w = (edge_e * edge_m).t()
# edge_w: E * D
h_prime = self.special_spmm_final(
edge, edge_w, N, edge_w.shape[0], self.out_features)
assert not torch.isnan(h_prime).any()
# h_prime: N x out
h_prime = h_prime.div(e_rowsum)
# h_prime: N x out
assert not torch.isnan(h_prime).any()
if self.concat:
# if this layer is not last layer,
return F.elu(h_prime)
else:
# if this layer is last layer,
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')' | 13,971 | 36.258667 | 109 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/GNNModel/XTransE.py | import torch.nn as nn
import torch
from IPython import embed
from neuralkg.model.KGEModel.model import Model
class XTransE(Model):
"""`Explainable Knowledge Graph Embedding for Link Prediction with Lifestyles in e-Commerce`_ (XTransE), which introduces the attention to aggregate the neighbor node representation.
Attributes:
args: Model configuration parameters.
.. _Explainable Knowledge Graph Embedding for Link Prediction with Lifestyles in e-Commerce: https://link.springer.com/content/pdf/10.1007%2F978-981-15-3412-6_8.pdf
"""
def __init__(self, args):
super(XTransE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
Args:
margin: Caculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
"""
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([6.0 / float(self.args.emb_dim).__pow__(0.5)]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, triples, neighbor=None, mask=None, negs=None, mode='single'):
"""Calculating the score of triples.
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
neighbor: The neighbors of tail entities.
mask: The mask of neighbor nodes
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head = triples[:,0]
rela = triples[:,1]
tail = triples[:,2]
if mode == 'tail-batch':
tail = negs.squeeze(1)
norm_emb_ent = nn.functional.normalize(self.ent_emb.weight, dim=1, p=2) # [ent, dim]
norm_emb_rel = nn.functional.normalize(self.rel_emb.weight, dim=1, p=2) # [rel, dim]
neighbor_tail_emb = norm_emb_ent[neighbor[:, :, 1]] # [batch, neighbor, dim]
neighbor_rela_emb = norm_emb_rel[neighbor[:, :, 0]] # [batch, neighbor, dim]
neighbor_head_emb = neighbor_tail_emb - neighbor_rela_emb
rela_emb = norm_emb_rel[rela] # [batch, dim]
tail_emb = norm_emb_ent[tail] # [batch, dim]
head_emb = norm_emb_ent[head]
h_rt_embedding = tail_emb - rela_emb
attention_rt = torch.zeros([self.args.train_bs, 200]).type_as(self.ent_emb.weight)
attention_rt = (neighbor_head_emb * h_rt_embedding.unsqueeze(1)).sum(dim=2) * mask
attention_rt = nn.functional.softmax(attention_rt, dim=1).unsqueeze(2)
head_emb = head_emb + \
torch.bmm(neighbor_head_emb.permute(0,2,1), attention_rt).reshape([-1,self.args.emb_dim])
score = self.margin.item() - torch.norm(head_emb + rela_emb - tail_emb, p=2, dim=1)
return score.unsqueeze(1)
def transe_func(self, head_emb, rela_emb, tail_emb):
"""Calculating the score of triples with TransE model.
Args:
head_emb: The head entity embedding.
rela_emb: The relation embedding.
tail_emb: The tail entity embedding.
Returns:
score: The score of triples.
"""
score = (head_emb + rela_emb) - tail_emb
score = self.margin.item() - torch.norm(score, p=2, dim=-1)
return score
def forward(self, triples, neighbor=None, mask=None, negs=None, mode='single'):
"""The functions used in the training and testing phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
neighbor: The neighbors of tail entities.
mask: The mask of neighbor nodes
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
TransE_score = self.transe_func(head_emb, relation_emb, tail_emb)
XTransE_score = self.score_func(triples, neighbor, mask, negs, mode)
return TransE_score + XTransE_score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.transe_func(head_emb, relation_emb, tail_emb)
return score
| 5,638 | 36.845638 | 248 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/GNNModel/__init__.py | from .RGCN import RGCN
from .KBAT import KBAT
from .CompGCN import CompGCN
from .XTransE import XTransE
from .SEGNN import SEGNN | 128 | 24.8 | 28 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/DistMult.py | import torch.nn as nn
import torch
from .model import Model
from IPython import embed
class DistMult(Model):
"""`Embedding Entities and Relations for Learning and Inference in Knowledge Bases`_ (DistMult)
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
.. _Embedding Entities and Relations for Learning and Inference in Knowledge Bases: https://arxiv.org/abs/1412.6575
"""
def __init__(self, args):
super(DistMult, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution."""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`h^{\top} \operatorname{diag}(r) t`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
if mode == 'head-batch':
score = head_emb * (relation_emb * tail_emb)
else:
score = (head_emb * relation_emb) * tail_emb
score = score.sum(dim = -1)
return score
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
| 3,476 | 34.121212 | 120 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/PairRE.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .model import Model
class PairRE(Model):
"""`PairRE: Knowledge Graph Embeddings via Paired Relation Vectors`_ (PairRE), which paired vectors for each relation representation to model complex patterns.
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation embedding, shape:[num_rel, emb_dim * 2].
.. _PairRE: Knowledge Graph Embeddings via Paired Relation Vectors: https://arxiv.org/pdf/2011.03798.pdf
"""
def __init__(self, args):
super(PairRE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
"""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False,
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False,
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
nn.init.uniform_(
tensor=self.ent_emb.weight.data,
a = -self.embedding_range.item(),
b = self.embedding_range.item(),
)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * 2)
nn.init.uniform_(
tensor=self.rel_emb.weight.data,
a = -self.embedding_range.item(),
b = self.embedding_range.item(),
)
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\gamma - || h \circ r^H - t \circ r^T ||`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
re_head, re_tail = torch.chunk(relation_emb, 2, dim=2)
head = F.normalize(head_emb, 2, -1)
tail = F.normalize(tail_emb, 2, -1)
score = head * re_head - tail * re_tail
return self.margin.item() - torch.norm(score, p=1, dim=2)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch['positive_sample']
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
| 3,716 | 35.087379 | 163 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/ComplEx.py | import torch.nn as nn
import torch
from .model import Model
from IPython import embed
class ComplEx(Model):
def __init__(self, args):
"""`Complex Embeddings for Simple Link Prediction`_ (ComplEx), which is a simple approach to matrix and tensor factorization for link prediction data that uses vectors with complex values and retains the mathematical definition of the dot product.
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim * 2].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim * 2].
.. _Complex Embeddings for Simple Link Prediction: http://proceedings.mlr.press/v48/trouillon16.pdf
"""
super(ComplEx, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution."""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim * 2)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * 2)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\operatorname{Re}\left(h^{\top} \operatorname{diag}(r) \overline{t}\right)`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
re_head, im_head = torch.chunk(head_emb, 2, dim=-1)
re_relation, im_relation = torch.chunk(relation_emb, 2, dim=-1)
re_tail, im_tail = torch.chunk(tail_emb, 2, dim=-1)
return torch.sum(
re_head * re_tail * re_relation
+ im_head * im_tail * re_relation
+ re_head * im_tail * im_relation
- im_head * re_tail * im_relation,
-1
)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score | 3,926 | 37.5 | 255 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/RotatE.py | import torch.nn as nn
import torch
from .model import Model
from IPython import embed
class RotatE(Model):
"""`RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space`_ (RotatE), which defines each relation as a rotation from the source entity to the target entity in the complex vector space.
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim * 2].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
.. _RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space: https://openreview.net/forum?id=HkgEQnRqYQ
"""
def __init__(self, args):
super(RotatE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution."""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim * 2)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\gamma - \|h \circ r - t\|`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head_emb, 2, dim=-1)
re_tail, im_tail = torch.chunk(tail_emb, 2, dim=-1)
#Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation_emb/(self.embedding_range.item()/pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim = 0)
score = score.norm(dim = 0)
score = self.margin.item() - score.sum(dim = -1)
return score
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
| 4,433 | 36.897436 | 208 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/BoxE.py | import torch.nn as nn
import torch
from torch.autograd import Variable
from .model import Model
class BoxE(Model):
"""`A Box Embedding Model for Knowledge Base Completion`_ (BoxE), which represents the bump embedding as translations in the super rectangle space.
Attributes:
args: Model configuration parameters.
.. _A Box Embedding Model for Knowledge Base Completion: https://arxiv.org/pdf/2007.06267.pdf
"""
def __init__(self, args):
super(BoxE, self).__init__(args)
self.args = args
self.arity = None
self.order = None
self.ent_emb = None
self.rel_emb = None
self.init_emb(args)
def init_emb(self,args):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
Args:
arity: The maximum ary of the knowledge graph.
epsilon: Caculate embedding_range.
order: The distance order of score.
margin: Caculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim * 2].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim * arity * 2].
"""
self.arity = 2
self.epsilon = 2.0
self.order = self.args.dis_order
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim*2)
nn.init.uniform_(tensor=self.ent_emb.weight.data[:, :self.args.emb_dim], a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.ent_emb.weight.data[:, self.args.emb_dim:], a=-self.embedding_range.item(), b=self.embedding_range.item())
size_factor = self.arity * 2
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * size_factor)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb_raw, relation_emb, tail_emb_raw = self.tri2emb(triples, negs, mode)
head_emb = head_emb_raw[:, :, :self.args.emb_dim] + tail_emb_raw[:, :, self.args.emb_dim:]
tail_emb = tail_emb_raw[:, :, :self.args.emb_dim] + head_emb_raw[:, :, self.args.emb_dim:]
score = self.score_func(head_emb, relation_emb, tail_emb)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb_raw, relation_emb, tail_emb_raw = self.tri2emb(triples, mode=mode)
head_emb = head_emb_raw[:, :, :self.args.emb_dim] + tail_emb_raw[:, :, self.args.emb_dim:]
tail_emb = tail_emb_raw[:, :, :self.args.emb_dim] + head_emb_raw[:, :, self.args.emb_dim:]
score = self.score_func(head_emb, relation_emb, tail_emb)
return score
def score_func(self, head_emb, relation_emb, tail_emb):
"""Calculate the score of the triple embedding.
Args:
head_emb: The embedding of head entity.
relation_emb:The embedding of relation.
tail_emb: The embedding of tail entity.
Returns:
score: The score of triples.
"""
box_bas, box_del = torch.chunk(relation_emb, 2, dim = -1)
box_sec = box_bas + 0.5 * box_del
box_fir = box_bas - 0.5 * box_del
box_low = torch.min(box_fir, box_sec)
box_hig = torch.max(box_fir, box_sec)
head_low, tail_low = torch.chunk(box_low, 2, dim = -1)
head_hig, tail_hig = torch.chunk(box_hig, 2, dim = -1)
head_score = self.calc_score(head_emb, head_low, head_hig, self.order)
tail_score = self.calc_score(tail_emb, tail_low, tail_hig, self.order)
score = self.margin.item() - (head_score + tail_score)
return score
def calc_score(self, ent_emb, box_low, box_hig, order = 2):
"""Calculate the norm of distance.
Args:
ent_emb: The embedding of entity.
box_low: The lower boundaries of the super rectangle.
box_hig: The upper boundaries of the super rectangle.
order: The order of this distance.
Returns:
The norm of distance.
"""
return torch.norm(self.dist(ent_emb, box_low, box_hig), p=order, dim=-1)
def dist(self, ent_emb, lb, ub):
"""Calculate the distance.
This function calculate the distance between the entity
and the super rectangle. If the entity is in its target
box, distance inversely correlates with box size, to
maintain low distance inside large boxes and provide a
gradient to keep points inside; if the entity is not in
its target box, box size linearly correlates with distance,
to penalize points outside larger boxes more severely.
Args:
ent_emb: The embedding of entity.
lb: The lower boundaries of the super rectangle.
ub: The upper boundaries of the super rectangle.
Returns:
The distance between entity and super rectangle.
"""
c = (lb + ub) / 2
w = ub - lb + 1
k = 0.5 * (w - 1) * (w - 1 / w)
return torch.where(torch.logical_and(torch.ge(ent_emb, lb), torch.le(ent_emb, ub)),
torch.abs(ent_emb - c) / w,
torch.abs(ent_emb - c) * w - k)
| 6,177 | 35.994012 | 151 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/SimplE.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import math
from .model import Model
from IPython import embed
class SimplE(Model):
"""`SimplE Embedding for Link Prediction in Knowledge Graphs`_ (SimpleE), which presents a simple enhancement of CP (which we call SimplE) to allow the two embeddings of each entity to be learned dependently.
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_h_emb: Entity embedding, shape:[num_ent, emb_dim].
ent_t_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
rel_inv_emb: Inverse Relation_embedding, shape:[num_rel, emb_dim].
.. _SimplE Embedding for Link Prediction in Knowledge Graphs: http://papers.neurips.cc/paper/7682-simple-embedding-for-link-prediction-in-knowledge-graphs.pdf
"""
def __init__(self, args):
super(SimplE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution."""
self.ent_h_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.ent_t_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
self.rel_inv_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
sqrt_size = 6.0 / math.sqrt(self.args.emb_dim)
nn.init.uniform_(tensor=self.ent_h_emb.weight.data, a=-sqrt_size, b=sqrt_size)
nn.init.uniform_(tensor=self.ent_t_emb.weight.data, a=-sqrt_size, b=sqrt_size)
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-sqrt_size, b=sqrt_size)
nn.init.uniform_(tensor=self.rel_inv_emb.weight.data, a=-sqrt_size, b=sqrt_size)
def score_func(self, hh_emb, rel_emb, tt_emb, ht_emb, rel_inv_emb, th_emb):
"""Calculating the score of triples.
Args:
hh_emb: The head entity embedding on embedding h.
rel_emb: The relation embedding.
tt_emb: The tail entity embedding on embedding t.
ht_emb: The tail entity embedding on embedding h.
rel_inv_emb: The tail entity embedding.
th_emb: The head entity embedding on embedding t.
Returns:
score: The score of triples.
"""
# return -(torch.sum(head_emb * relation_emb * tail_emb, -1) + \
# torch.sum(head_emb * rel_inv_emb * tail_emb, -1))/2
scores1 = torch.sum(hh_emb * rel_emb * tt_emb, dim=-1)
scores2 = torch.sum(ht_emb * rel_inv_emb * th_emb, dim=-1)
return torch.clamp((scores1 + scores2) / 2, -20, 20)
def l2_loss(self):
return (self.ent_h_emb.weight.norm(p = 2) ** 2 + \
self.ent_t_emb.weight.norm(p = 2) ** 2 + \
self.rel_emb.weight.norm(p = 2) ** 2 + \
self.rel_inv_emb.weight.norm(p = 2) ** 2)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
rel_emb, rel_inv_emb, hh_emb, th_emb, ht_emb, tt_emb = self.get_emb(triples, negs, mode)
return self.score_func(hh_emb, rel_emb, tt_emb, ht_emb, rel_inv_emb, th_emb)
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
rel_emb, rel_inv_emb, hh_emb, th_emb, ht_emb, tt_emb = self.get_emb(triples, mode=mode)
return self.score_func(hh_emb, rel_emb, tt_emb, ht_emb, rel_inv_emb, th_emb)
def get_emb(self, triples, negs=None, mode='single'):
if mode == 'single':
rel_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
rel_inv_emb = self.rel_inv_emb(triples[:, 1]).unsqueeze(1)
hh_emb = self.ent_h_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
th_emb = self.ent_t_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
ht_emb = self.ent_h_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
tt_emb = self.ent_t_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
elif mode == 'head-batch' or mode == "head_predict":
if negs is None: # 说明这个时候是在evluation,所以需要直接用所有的entity embedding
hh_emb = self.ent_h_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
th_emb = self.ent_t_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
else:
hh_emb = self.ent_h_emb(negs) # [bs, num_neg, dim]
th_emb = self.ent_t_emb(negs) # [bs, num_neg, dim]
rel_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
rel_inv_emb = self.rel_inv_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
ht_emb = self.ent_h_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
tt_emb = self.ent_t_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
elif mode == 'tail-batch' or mode == "tail_predict":
if negs is None: # 说明这个时候是在evluation,所以需要直接用所有的entity embedding
ht_emb = self.ent_h_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
tt_emb = self.ent_t_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
else:
ht_emb = self.ent_h_emb(negs) # [bs, num_neg, dim]
tt_emb = self.ent_t_emb(negs) # [bs, num_neg, dim]
rel_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
rel_inv_emb = self.rel_inv_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
hh_emb = self.ent_h_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
th_emb = self.ent_t_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
return rel_emb, rel_inv_emb, hh_emb, th_emb, ht_emb, tt_emb
| 6,453 | 47.893939 | 212 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/model.py | import torch.nn as nn
import torch
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
def init_emb(self):
raise NotImplementedError
def score_func(self, head_emb, relation_emb, tail_emb):
raise NotImplementedError
def forward(self, triples, negs, mode):
raise NotImplementedError
def tri2emb(self, triples, negs=None, mode="single"):
"""Get embedding of triples.
This function get the embeddings of head, relation, and tail
respectively. each embedding has three dimensions.
Args:
triples (tensor): This tensor save triples id, which dimension is
[triples number, 3].
negs (tensor, optional): This tenosr store the id of the entity to
be replaced, which has one dimension. when negs is None, it is
in the test/eval phase. Defaults to None.
mode (str, optional): This arg indicates that the negative entity
will replace the head or tail entity. when it is 'single', it
means that entity will not be replaced. Defaults to 'single'.
Returns:
head_emb: Head entity embedding.
relation_emb: Relation embedding.
tail_emb: Tail entity embedding.
"""
if mode == "single":
head_emb = self.ent_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
relation_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
tail_emb = self.ent_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
elif mode == "head-batch" or mode == "head_predict":
if negs is None: # 说明这个时候是在evluation,所以需要直接用所有的entity embedding
head_emb = self.ent_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
else:
head_emb = self.ent_emb(negs) # [bs, num_neg, dim]
relation_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
tail_emb = self.ent_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
elif mode == "tail-batch" or mode == "tail_predict":
head_emb = self.ent_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
relation_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
if negs is None:
tail_emb = self.ent_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
else:
tail_emb = self.ent_emb(negs) # [bs, num_neg, dim]
return head_emb, relation_emb, tail_emb
| 2,572 | 40.5 | 85 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/CrossE.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import math
from .model import Model
from IPython import embed
class CrossE(Model):
"""`Interaction Embeddings for Prediction and Explanation in Knowledge Graphs`_ (CrossE), which simulates crossover interactions(bi-directional effects between entities and relations)
to select related information when predicting a new triple
Attributes:
args: Model configuration parameters.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation embedding, shape:[num_rel, emb_dim].
rel_reverse_emb: Reverse Relation embedding, shape:[num_rel, emb_dim].
h_weighted_vector: Interaction matrix for head entities and relations, shape:[num_rel, emb_dim]
t_weighted_vector: Interaction matrix for tail entities and relations, shape:[num_rel, emb_dim]
hr_bias: Bias for head entity and relation
tr_bias: Bias for tail entity and relation
.. _Interaction Embeddings for Prediction and Explanation in Knowledge Graphs: https://arxiv.org/abs/1903.04750
"""
def __init__(self, args):
super(CrossE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
self.dropout = nn.Dropout(self.args.dropout)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim) #关系的rel emb
self.rel_reverse_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim) #reverse关系的rel emb
self.h_weighted_vector = nn.Embedding(self.args.num_rel, self.args.emb_dim) #interaction mactrix
self.t_weighted_vector = nn.Embedding(self.args.num_rel, self.args.emb_dim) #interaction mactrix
# self.bias = nn.Embedding(2, self.args.emb_dim)
self.hr_bias = nn.Parameter(torch.zeros([self.args.emb_dim]))
self.tr_bias = nn.Parameter(torch.zeros([self.args.emb_dim]))
sqrt_size = 6.0 / math.sqrt(self.args.emb_dim)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-sqrt_size, b=sqrt_size)
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-sqrt_size, b=sqrt_size)
nn.init.uniform_(tensor=self.rel_reverse_emb.weight.data, a=-sqrt_size, b=sqrt_size)
nn.init.uniform_(tensor=self.h_weighted_vector.weight.data, a=-sqrt_size, b=sqrt_size)
nn.init.uniform_(tensor=self.t_weighted_vector.weight.data, a=-sqrt_size, b=sqrt_size)
def score_func(self, ent_emb, rel_emb, weighted_vector, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\sigma(tanh(c_r \circ h + c_r \circ h \circ r + b)t ^ T)`
Args:
ent_emb: entity embedding
rel_emb: relation embedding
weighted_vector: Interaction matrix for entities and relations
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
x = ent_emb * weighted_vector + rel_emb * ent_emb * weighted_vector
if mode == "tail_predict":
x = torch.tanh(x + self.hr_bias)
else:
x = torch.tanh(x + self.tr_bias)
x = self.dropout(x)
x = torch.mm(x, self.ent_emb.weight.data.t())
x = torch.sigmoid(x)
return x
def forward(self, triples, mode="single"):
"""The functions used in the training phase, calculate hr_score and tr_score simultaneously
"""
head_emb = self.ent_emb(triples[:, 0])
tail_emb = self.ent_emb(triples[:, 2])
rel_emb = self.rel_emb(triples[:, 1])
rel_reverse_emb = self.rel_reverse_emb(triples[:, 1])
h_weighted_vector = self.h_weighted_vector(triples[:, 1])
t_weighted_vector = self.t_weighted_vector(triples[:, 1])
hr_score = self.score_func(head_emb, rel_emb, h_weighted_vector, "tail_predict")
tr_score = self.score_func(tail_emb, rel_reverse_emb, t_weighted_vector, "head_predict")
# bias = self.bias(triples_id)
return hr_score, tr_score
def get_score(self, batch, mode):
"""The functions used in the testing phase, predict triple score
"""
triples = batch["positive_sample"]
if mode == "tail_predict":
head_emb = self.ent_emb(triples[:, 0])
rel_emb = self.rel_emb(triples[:, 1])
h_weighted_vector = self.h_weighted_vector(triples[:, 1])
return self.score_func(head_emb, rel_emb, h_weighted_vector, "tail_predict")
else:
tail_emb = self.ent_emb(triples[:, 2])
rel_reverse_emb = self.rel_reverse_emb(triples[:, 1])
t_weighted_vector = self.t_weighted_vector(triples[:, 1])
return self.score_func(tail_emb, rel_reverse_emb, t_weighted_vector, "head_predict")
def regularize_loss(self, norm=2):
"""Add regularization to loss
"""
return (self.ent_emb.weight.norm(p = norm) ** norm + \
self.rel_emb.weight.norm(p = norm) ** norm + \
self.rel_reverse_emb.weight.norm(p = norm) ** norm + \
self.h_weighted_vector.weight.norm(p = norm) ** norm + \
self.t_weighted_vector.weight.norm(p = norm) ** norm + \
self.hr_bias.norm(p = norm) ** norm + \
self.tr_bias.norm(p=norm) ** norm)
| 5,460 | 44.890756 | 187 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/TransR.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from .model import Model
from IPython import embed
class TransR(Model):
"""Learning Entity and Relation Embeddings for Knowledge Graph Completion`_ (TransR), which building entity and relation embeddings in separate entity space and relation spaces
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation embedding, shape:[num_rel, emb_dim].
transfer_matrix: Transfer entity and relation embedding, shape:[num_rel, emb_dim*emb_dim]
.. _Translating Embeddings for Modeling Multi-relational Data: http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/download/9571/9523/
"""
def __init__(self, args):
super(TransR, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.norm_flag = args.norm_flag
self.init_emb()
def init_emb(self):
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]), requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False,
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
self.transfer_matrix = nn.Embedding(
self.args.num_rel, self.args.emb_dim * self.args.emb_dim
)
nn.init.uniform_(
tensor=self.ent_emb.weight.data,
a=-self.embedding_range.item(),
b=self.embedding_range.item(),
)
nn.init.uniform_(
tensor=self.rel_emb.weight.data,
a=-self.embedding_range.item(),
b=self.embedding_range.item(),
)
diag_matrix = torch.eye(self.args.emb_dim)
diag_matrix = diag_matrix.flatten().repeat(self.args.num_rel, 1)
self.transfer_matrix.weight.data = diag_matrix
# nn.init.uniform_(tensor=self.transfer_matrix.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\gamma - \| M_{r} {e}_h + r_r - M_{r}e_t \|_{p}^2`
"""
if self.norm_flag:
head_emb = F.normalize(head_emb, 2, -1)
relation_emb = F.normalize(relation_emb, 2, -1)
tail_emb = F.normalize(tail_emb, 2, -1)
if mode == "head-batch" or mode == "head_predict":
score = head_emb + (relation_emb - tail_emb)
else:
score = (head_emb + relation_emb) - tail_emb
score = self.margin.item() - torch.norm(score, p=1, dim=-1)
return score
def forward(self, triples, negs=None, mode="single"):
"""The functions used in the training phase, calculate triple score."""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
rel_transfer = self.transfer_matrix(triples[:, 1]) # shape:[bs, dim]
head_emb = self._transfer(head_emb, rel_transfer, mode)
tail_emb = self._transfer(tail_emb, rel_transfer, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase, predict triple score."""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
rel_transfer = self.transfer_matrix(triples[:, 1]) # shape:[bs, dim]
head_emb = self._transfer(head_emb, rel_transfer, mode)
tail_emb = self._transfer(tail_emb, rel_transfer, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def _transfer(self, emb, rel_transfer, mode):
"""Transfer entity embedding with relation-specific matrix.
Args:
emb: Entity embeddings, shape:[batch_size, emb_dim]
rel_transfer: Relation-specific projection matrix, shape:[batch_size, emb_dim]
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
transfered entity emb: Shape:[batch_size, emb_dim]
"""
rel_transfer = rel_transfer.view(-1, self.args.emb_dim, self.args.emb_dim)
rel_transfer = rel_transfer.unsqueeze(dim=1)
emb = emb.unsqueeze(dim=-2)
emb = torch.matmul(emb, rel_transfer)
return emb.squeeze(dim=-2)
| 4,844 | 40.410256 | 180 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/DualE.py | import torch.nn as nn
import torch
from .model import Model
from numpy.random import RandomState
import numpy as np
class DualE(Model):
"""`Dual Quaternion Knowledge Graph Embeddings`_ (DualE), which introduces dual quaternions into knowledge graph embeddings.
Attributes:
args: Model configuration parameters.
ent_emb: Entity embedding, shape:[num_ent, emb_dim * 8].
rel_emb: Relation embedding, shape:[num_rel, emb_dim * 8].
.. Dual Quaternion Knowledge Graph Embeddings: https://ojs.aaai.org/index.php/AAAI/article/view/16850
"""
def __init__(self, args):
super(DualE, self).__init__(args)
self.args = args
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim*8)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim*8)
self.criterion = nn.Softplus()
self.fc = nn.Linear(100, 50, bias=False)
self.ent_dropout = torch.nn.Dropout(0)
self.rel_dropout = torch.nn.Dropout(0)
self.bn = torch.nn.BatchNorm1d(self.args.emb_dim)
self.init_weights()
def init_weights(self):
r, i, j, k,r_1,i_1,j_1,k_1 = self.quaternion_init(self.args.num_ent, self.args.emb_dim)
r, i, j, k,r_1,i_1,j_1,k_1 = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j), torch.from_numpy(k),\
torch.from_numpy(r_1), torch.from_numpy(i_1), torch.from_numpy(j_1), torch.from_numpy(k_1)
tmp_ent_emb = torch.cat((r, i, j, k,r_1,i_1,j_1,k_1),1)
self.ent_emb.weight.data = tmp_ent_emb.type_as(self.ent_emb.weight.data)
s, x, y, z,s_1,x_1,y_1,z_1 = self.quaternion_init(self.args.num_ent, self.args.emb_dim)
s, x, y, z,s_1,x_1,y_1,z_1 = torch.from_numpy(s), torch.from_numpy(x), torch.from_numpy(y), torch.from_numpy(z), \
torch.from_numpy(s_1), torch.from_numpy(x_1), torch.from_numpy(y_1), torch.from_numpy(z_1)
tmp_rel_emb = torch.cat((s, x, y, z,s_1,x_1,y_1,z_1),1)
self.rel_emb.weight.data = tmp_rel_emb.type_as(self.ent_emb.weight.data)
#Calculate the Dual Hamiltonian product
def _omult(self, a_0, a_1, a_2, a_3, b_0, b_1, b_2, b_3, c_0, c_1, c_2, c_3, d_0, d_1, d_2, d_3):
h_0=a_0*c_0-a_1*c_1-a_2*c_2-a_3*c_3
h1_0=a_0*d_0+b_0*c_0-a_1*d_1-b_1*c_1-a_2*d_2-b_2*c_2-a_3*d_3-b_3*c_3
h_1=a_0*c_1+a_1*c_0+a_2*c_3-a_3*c_2
h1_1=a_0*d_1+b_0*c_1+a_1*d_0+b_1*c_0+a_2*d_3+b_2*c_3-a_3*d_2-b_3*c_2
h_2=a_0*c_2-a_1*c_3+a_2*c_0+a_3*c_1
h1_2=a_0*d_2+b_0*c_2-a_1*d_3-b_1*c_3+a_2*d_0+b_2*c_0+a_3*d_1+b_3*c_1
h_3=a_0*c_3+a_1*c_2-a_2*c_1+a_3*c_0
h1_3=a_0*d_3+b_0*c_3+a_1*d_2+b_1*c_2-a_2*d_1-b_2*c_1+a_3*d_0+b_3*c_0
return (h_0,h_1,h_2,h_3,h1_0,h1_1,h1_2,h1_3)
#Normalization of relationship embedding
def _onorm(self,r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8):
denominator_0 = r_1 ** 2 + r_2 ** 2 + r_3 ** 2 + r_4 ** 2
denominator_1 = torch.sqrt(denominator_0)
deno_cross = r_5 * r_1 + r_6 * r_2 + r_7 * r_3 + r_8 * r_4
r_5 = r_5 - deno_cross / denominator_0 * r_1
r_6 = r_6 - deno_cross / denominator_0 * r_2
r_7 = r_7 - deno_cross / denominator_0 * r_3
r_8 = r_8 - deno_cross / denominator_0 * r_4
r_1 = r_1 / denominator_1
r_2 = r_2 / denominator_1
r_3 = r_3 / denominator_1
r_4 = r_4 / denominator_1
return r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8
#Calculate the inner product of the head entity and the relationship Hamiltonian product and the tail entity
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:` <\boldsymbol{Q}_h \otimes \boldsymbol{W}_r^{\diamond}, \boldsymbol{Q}_t> `
Args:
head_emb: The head entity embedding with 8 dimensionalities.
relation_emb: The relation embedding with 8 dimensionalities.
tail_emb: The tail entity embedding with 8 dimensionalities.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples with regul_1 and regul_2
"""
e_1_h,e_2_h,e_3_h,e_4_h,e_5_h,e_6_h,e_7_h,e_8_h = torch.chunk(head_emb, 8, dim=-1)
e_1_t,e_2_t,e_3_t,e_4_t,e_5_t,e_6_t,e_7_t,e_8_t = torch.chunk(tail_emb, 8, dim=-1)
r_1,r_2,r_3,r_4,r_5,r_6,r_7,r_8 = torch.chunk(relation_emb, 8, dim=-1)
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 = self._onorm(r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 )
o_1, o_2, o_3, o_4, o_5, o_6, o_7, o_8 = self._omult(e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h,
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8)
score_r = (o_1 * e_1_t + o_2 * e_2_t + o_3 * e_3_t + o_4 * e_4_t
+ o_5 * e_5_t + o_6 * e_6_t + o_7 * e_7_t + o_8 * e_8_t)
regul_1 = (torch.mean(torch.abs(e_1_h) ** 2)
+ torch.mean(torch.abs(e_2_h) ** 2)
+ torch.mean(torch.abs(e_3_h) ** 2)
+ torch.mean(torch.abs(e_4_h) ** 2)
+ torch.mean(torch.abs(e_5_h) ** 2)
+ torch.mean(torch.abs(e_6_h) ** 2)
+ torch.mean(torch.abs(e_7_h) ** 2)
+ torch.mean(torch.abs(e_8_h) ** 2)
+ torch.mean(torch.abs(e_1_t) ** 2)
+ torch.mean(torch.abs(e_2_t) ** 2)
+ torch.mean(torch.abs(e_3_t) ** 2)
+ torch.mean(torch.abs(e_4_t) ** 2)
+ torch.mean(torch.abs(e_5_t) ** 2)
+ torch.mean(torch.abs(e_6_t) ** 2)
+ torch.mean(torch.abs(e_7_t) ** 2)
+ torch.mean(torch.abs(e_8_t) ** 2)
)
regul_2 = (torch.mean(torch.abs(r_1) ** 2)
+ torch.mean(torch.abs(r_2) ** 2)
+ torch.mean(torch.abs(r_3) ** 2)
+ torch.mean(torch.abs(r_4) ** 2)
+ torch.mean(torch.abs(r_5) ** 2)
+ torch.mean(torch.abs(r_6) ** 2)
+ torch.mean(torch.abs(r_7) ** 2)
+ torch.mean(torch.abs(r_8) ** 2))
return (torch.sum(score_r, -1), regul_1, regul_2)
def forward(self, triples, negs=None, mode='single'):
if negs != None:
head_emb, relation_emb, tail_emb = self.tri2emb(negs)
else:
head_emb, relation_emb, tail_emb = self.tri2emb(triples)
score, regul_1, regul_2 = self.score_func(head_emb, relation_emb, tail_emb, mode)
return (score, regul_1, regul_2)
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
e_1_h,e_2_h,e_3_h,e_4_h,e_5_h,e_6_h,e_7_h,e_8_h = torch.chunk(head_emb, 8, dim=-1)
e_1_t,e_2_t,e_3_t,e_4_t,e_5_t,e_6_t,e_7_t,e_8_t = torch.chunk(tail_emb, 8, dim=-1)
r_1,r_2,r_3,r_4,r_5,r_6,r_7,r_8 = torch.chunk(relation_emb, 8, dim=-1)
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 = self._onorm(r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8 )
o_1, o_2, o_3, o_4, o_5, o_6, o_7, o_8 = self._omult(e_1_h, e_2_h, e_3_h, e_4_h, e_5_h, e_6_h, e_7_h, e_8_h,
r_1, r_2, r_3, r_4, r_5, r_6, r_7, r_8)
score_r = (o_1 * e_1_t + o_2 * e_2_t + o_3 * e_3_t + o_4 * e_4_t
+ o_5 * e_5_t + o_6 * e_6_t + o_7 * e_7_t + o_8 * e_8_t)
return torch.sum(score_r, -1)
def quaternion_init(self, in_features, out_features, criterion='he'):
"""
Quaternion-valued weight initialization
the initialization scheme is optional on these four datasets,
random initialization can get the same performance. This initialization
scheme might be useful for the case which needs fewer epochs.
"""
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1. / np.sqrt(2 * (fan_in + fan_out))
elif criterion == 'he':
s = 1. / np.sqrt(2 * fan_in)
else:
raise ValueError('Invalid criterion: ', criterion)
rng = RandomState(2020)
# Generating randoms and purely imaginary quaternions :
kernel_shape = (in_features, out_features)
number_of_weights = np.prod(kernel_shape) # in_features*out_features
v_i = np.random.uniform(0.0, 1.0, number_of_weights) #(low,high,size)
v_j = np.random.uniform(0.0, 1.0, number_of_weights)
v_k = np.random.uniform(0.0, 1.0, number_of_weights)
# Purely imaginary quaternions unitary
for i in range(0, number_of_weights):
norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2) + 0.0001
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
modulus = rng.uniform(low=-s, high=s, size=kernel_shape)
# Calculate the three parts about t
kernel_shape1 = (in_features, out_features)
number_of_weights1 = np.prod(kernel_shape1)
t_i = np.random.uniform(0.0, 1.0, number_of_weights1)
t_j = np.random.uniform(0.0, 1.0, number_of_weights1)
t_k = np.random.uniform(0.0, 1.0, number_of_weights1)
# Purely imaginary quaternions unitary
for i in range(0, number_of_weights1):
norm1 = np.sqrt(t_i[i] ** 2 + t_j[i] ** 2 + t_k[i] ** 2) + 0.0001
t_i[i] /= norm1
t_j[i] /= norm1
t_k[i] /= norm1
t_i = t_i.reshape(kernel_shape1)
t_j = t_j.reshape(kernel_shape1)
t_k = t_k.reshape(kernel_shape1)
tmp_t = rng.uniform(low=-s, high=s, size=kernel_shape1)
phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
phase1 = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape1)
weight_r = modulus * np.cos(phase)
weight_i = modulus * v_i * np.sin(phase)
weight_j = modulus * v_j * np.sin(phase)
weight_k = modulus * v_k * np.sin(phase)
wt_i = tmp_t * t_i * np.sin(phase1)
wt_j = tmp_t * t_j * np.sin(phase1)
wt_k = tmp_t * t_k * np.sin(phase1)
i_0=weight_r
i_1=weight_i
i_2=weight_j
i_3=weight_k
i_4=(-wt_i*weight_i-wt_j*weight_j-wt_k*weight_k)/2
i_5=(wt_i*weight_r+wt_j*weight_k-wt_k*weight_j)/2
i_6=(-wt_i*weight_k+wt_j*weight_r+wt_k*weight_i)/2
i_7=(wt_i*weight_j-wt_j*weight_i+wt_k*weight_r)/2
return (i_0,i_1,i_2,i_3,i_4,i_5,i_6,i_7)
| 11,028 | 43.471774 | 131 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/ConvE.py | import torch
import torch.nn as nn
from .model import Model
from IPython import embed
from torch.autograd import Variable
from inspect import stack
#TODO: ConvE and SEGNN
class ConvE(Model):
"""`Convolutional 2D Knowledge Graph Embeddings`_ (ConvE), which use a 2D convolution network for embedding representation.
Attributes:
args: Model configuration parameters.
.. _Convolutional 2D Knowledge Graph Embeddings: https://arxiv.org/pdf/1707.01476.pdf
"""
def __init__(self, args):
super(ConvE, self).__init__(args)
self.args = args
self.emb_ent = None
self.emb_rel = None
self.init_emb(args)
def init_emb(self,args):
"""Initialize the convolution layer and embeddings .
Args:
conv1: The convolution layer.
fc: The full connection layer.
bn0, bn1, bn2: The batch Normalization layer.
inp_drop, hid_drop, feg_drop: The dropout layer.
emb_ent: Entity embedding, shape:[num_ent, emb_dim].
emb_rel: Relation_embedding, shape:[num_rel, emb_dim].
"""
self.emb_dim1 = self.args.emb_shape
self.emb_dim2 = self.args.emb_dim // self.emb_dim1
self.emb_ent = torch.nn.Embedding(self.args.num_ent, self.args.emb_dim, padding_idx=0)
self.emb_rel = torch.nn.Embedding(self.args.num_rel, self.args.emb_dim, padding_idx=0)
torch.nn.init.xavier_normal_(self.emb_ent.weight.data)
torch.nn.init.xavier_normal_(self.emb_rel.weight.data)
# Setting dropout
self.inp_drop = torch.nn.Dropout(self.args.inp_drop)
self.hid_drop = torch.nn.Dropout(self.args.hid_drop)
self.feg_drop = torch.nn.Dropout2d(self.args.fet_drop)
self.ent_drop = torch.nn.Dropout(self.args.ent_drop_pred)
self.fc_drop = torch.nn.Dropout(self.args.fc_drop)
# Setting net model
self.conv1 = torch.nn.Conv2d(1, out_channels=self.args.out_channel, kernel_size=self.args.ker_sz, stride=1, padding=0, bias=False)
self.bn0 = torch.nn.BatchNorm2d(1)
self.bn1 = torch.nn.BatchNorm2d(self.args.out_channel)
self.bn2 = torch.nn.BatchNorm1d(self.args.emb_dim)
self.register_parameter('b', torch.nn.Parameter(torch.zeros(self.args.num_ent)))
self.fc = torch.nn.Linear(self.args.hid_size,self.args.emb_dim, bias=False)
def score_func(self, head_emb, relation_emb, choose_emb = None):
"""Calculate the score of the triple embedding.
This function calculate the score of the embedding.
First, the entity and relation embeddings are reshaped
and concatenated; the resulting matrix is then used as
input to a convolutional layer; the resulting feature
map tensor is vectorised and projected into a k-dimensional
space.
Args:
head_emb: The embedding of head entity.
relation_emb:The embedding of relation.
Returns:
score: Final score of the embedding.
"""
if self.args.model_name == "SEGNN":
head_emb = head_emb.view(-1, 1, head_emb.shape[-1])
relation_emb = relation_emb.view(-1, 1, relation_emb.shape[-1])
stacked_inputs = torch.cat([head_emb, relation_emb], 1)
stacked_inputs = torch.transpose(stacked_inputs, 2, 1).reshape((-1, 1, 2 * self.args.k_h, self.args.k_w))
else:
stacked_inputs = torch.cat([head_emb, relation_emb], 2)
stacked_inputs = self.bn0(stacked_inputs)
x = self.inp_drop(stacked_inputs)
#print(x==stacked_inputs)
x = self.conv1(x)
x = self.bn1(x)
x = torch.nn.functional.relu(x)
if self.args.model_name == 'SEGNN':
x = self.fc_drop(x)
else:
x = self.feg_drop(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
if self.args.model_name == 'SEGNN':
x = self.bn2(x)
x = torch.nn.functional.relu(x)
x = self.hid_drop(x)
choose_emb = self.ent_drop(choose_emb)
x = torch.mm(x, choose_emb.transpose(1,0))
else:
x = self.hid_drop(x)
x = self.bn2(x)
x = torch.nn.functional.relu(x)
x = torch.mm(x, self.emb_ent.weight.transpose(1,0)) if choose_emb == None \
else torch.mm(x, choose_emb.transpose(1, 0))
x += self.b.expand_as(x)
x = torch.sigmoid(x)
return x
def forward(self, triples):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
Returns:
score: The score of triples.
"""
head_emb = self.emb_ent(triples[:, 0]).view(-1, 1, self.emb_dim1, self.emb_dim2)
rela_emb = self.emb_rel(triples[:, 1]).view(-1, 1, self.emb_dim1, self.emb_dim2)
score = self.score_func(head_emb, rela_emb)
return score
def get_score(self, batch, mode="tail_predict"):
"""The functions used in the testing phase
Args:
batch: A batch of data.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb = self.emb_ent(triples[:, 0]).view(-1, 1, self.emb_dim1, self.emb_dim2)
rela_emb = self.emb_rel(triples[:, 1]).view(-1, 1, self.emb_dim1, self.emb_dim2)
score = self.score_func(head_emb, rela_emb)
return score | 5,548 | 36.493243 | 138 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/TransE.py | import torch.nn as nn
import torch
from .model import Model
from IPython import embed
class TransE(Model):
"""`Translating Embeddings for Modeling Multi-relational Data`_ (TransE), which represents the relationships as translations in the embedding space.
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation embedding, shape:[num_rel, emb_dim].
.. _Translating Embeddings for Modeling Multi-relational Data: http://papers.nips.cc/paper/5071-translating-embeddings-for-modeling-multi-rela
"""
def __init__(self, args):
super(TransE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
"""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\gamma - ||h + r - t||_F`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
score = (head_emb + relation_emb) - tail_emb
score = self.margin.item() - torch.norm(score, p=1, dim=-1)
return score
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
| 3,488 | 34.969072 | 152 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/__init__.py | from .ComplEx import ComplEx
from .TransE import TransE
from .DistMult import DistMult
from .RotatE import RotatE
from .TransH import TransH
from .TransR import TransR
from .SimplE import SimplE
from .BoxE import BoxE
from .ConvE import ConvE
from .CrossE import CrossE
from .HAKE import HAKE
from .PairRE import PairRE
from .DualE import DualE
| 345 | 23.714286 | 30 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/TransH.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from .model import Model
from IPython import embed
class TransH(Model):
"""`Knowledge Graph Embedding by Translating on Hyperplanes`_ (TransH), which apply the translation from head to tail entity in a
relational-specific hyperplane in order to address its inability to model one-to-many, many-to-one, and many-to-many relations.
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation embedding, shape:[num_rel, emb_dim].
norm_vector: Relation-specific projection matrix, shape:[num_rel, emb_dim]
.. _Knowledge Graph Embedding by Translating on Hyperplanes: https://ojs.aaai.org/index.php/AAAI/article/view/8870
"""
def __init__(self, args):
super(TransH, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.norm_flag = args.norm_flag
self.init_emb()
def init_emb(self):
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]), requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False,
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
self.norm_vector = nn.Embedding(self.args.num_rel, self.args.emb_dim)
nn.init.uniform_(
tensor=self.ent_emb.weight.data,
a=-self.embedding_range.item(),
b=self.embedding_range.item(),
)
nn.init.uniform_(
tensor=self.rel_emb.weight.data,
a=-self.embedding_range.item(),
b=self.embedding_range.item(),
)
nn.init.uniform_(
tensor=self.norm_vector.weight.data,
a=-self.embedding_range.item(),
b=self.embedding_range.item(),
)
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\gamma - \|e'_{h,r} + d_r - e'_{t,r}\|_{p}^2`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
if self.norm_flag:
head_emb = F.normalize(head_emb, 2, -1)
relation_emb = F.normalize(relation_emb, 2, -1)
tail_emb = F.normalize(tail_emb, 2, -1)
if mode == "head-batch" or mode == "head_predict":
score = head_emb + (relation_emb - tail_emb)
else:
score = (head_emb + relation_emb) - tail_emb
score = self.margin.item() - torch.norm(score, p=1, dim=-1)
return score
def forward(self, triples, negs=None, mode="single"):
"""The functions used in the training phase, same as TransE"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
norm_vector = self.norm_vector(triples[:, 1]).unsqueeze(
dim=1
) # shape:[bs, 1, dim]
head_emb = self._transfer(head_emb, norm_vector)
tail_emb = self._transfer(tail_emb, norm_vector)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase, same as TransE"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
norm_vector = self.norm_vector(triples[:, 1]).unsqueeze(
dim=1
) # shape:[bs, 1, dim]
head_emb = self._transfer(head_emb, norm_vector)
tail_emb = self._transfer(tail_emb, norm_vector)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def _transfer(self, emb, norm_vector):
"""Projecting entity embeddings onto the relation-specific hyperplane
The formula for Projecting entity embeddings is :math:`e'_{r} = e - w_r^\Top e w_r`
Args:
emb: Entity embeddings, shape:[batch_size, emb_dim]
norm_vector: Relation-specific projection matrix, shape:[num_rel, emb_dim]
Returns:
projected entity emb: Shape:[batch_size, emb_dim]
"""
if self.norm_flag:
norm_vector = F.normalize(norm_vector, p=2, dim=-1)
return emb - torch.sum(emb * norm_vector, -1, True) * norm_vector
| 4,937 | 37.578125 | 133 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/KGEModel/HAKE.py | import torch
import torch.nn as nn
from .model import Model
class HAKE(Model):
"""`Learning Hierarchy-Aware Knowledge Graph Embeddings for Link Prediction`_ (HAKE), which maps entities into the polar coordinate system.
Attributes:
args: Model configuration parameters.
epsilon: Calculate embedding_range.
margin: Calculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim * 2].
rel_emb: Relation embedding, shape:[num_rel, emb_dim * 3].
phase_weight: Calculate phase score.
modules_weight: Calculate modulus score.
.. _Learning Hierarchy-Aware Knowledge Graph Embeddings for Link Prediction: https://arxiv.org/pdf/1911.09419.pdf
"""
def __init__(self, args):
super(HAKE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
"""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False,
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False,
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim * 2)
nn.init.uniform_(
tensor = self.ent_emb.weight.data,
a = -self.embedding_range.item(),
b = self.embedding_range.item(),
)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * 3)
nn.init.uniform_(
tensor = self.rel_emb.weight.data,
a = -self.embedding_range.item(),
b = self.embedding_range.item(),
)
nn.init.ones_(
tensor=self.rel_emb.weight[:, self.args.emb_dim: 2*self.args.emb_dim],
)
nn.init.zeros_(
tensor=self.rel_emb.weight[:, 2*self.args.emb_dim: 3*self.args.emb_dim]
)
self.phase_weight = nn.Parameter(
torch.Tensor([self.args.phase_weight * self.embedding_range.item()])
)
self.modules_weight = nn.Parameter(
torch.Tensor([self.args.modulus_weight])
)
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`\gamma - ||h_m \circ r_m- t_m||_2 - \lambda ||\sin((h_p + r_p - t_p)/2)||_1`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
phase_head, mod_head = torch.chunk(head_emb, 2, dim=-1)
phase_tail, mod_tail = torch.chunk(tail_emb, 2, dim=-1)
phase_rela, mod_rela, bias_rela = torch.chunk(relation_emb, 3, dim=-1)
pi = 3.141592653589793
phase_head = phase_head / (self.embedding_range.item() / pi)
phase_tail = phase_tail / (self.embedding_range.item() / pi)
phase_rela = phase_rela / (self.embedding_range.item() / pi)
if mode == 'head-batch':
phase_score = phase_head + (phase_rela - phase_tail)
else:
phase_score = (phase_head + phase_rela) - phase_tail
mod_rela = torch.abs(mod_rela)
bias_rela = torch.clamp(bias_rela, max=1)
indicator = (bias_rela < -mod_rela)
bias_rela[indicator] = -mod_rela[indicator]
r_score = mod_head * (mod_rela + bias_rela) - mod_tail * (1 - bias_rela)
phase_score = torch.sum(torch.abs(torch.sin(phase_score /2)), dim=2) * self.phase_weight
r_score = torch.norm(r_score, dim=2) * self.modules_weight
return self.margin.item() - (phase_score + r_score)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch['positive_sample']
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
| 5,161 | 35.352113 | 143 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/RuleModel/ComplEx_NNE_AER.py | import torch.nn as nn
import torch
import os
from .model import Model
from IPython import embed
class ComplEx_NNE_AER(Model):
"""`Improving Knowledge Graph Embedding Using Simple Constraints`_ (/ComplEx-NNE_AER), which examines non-negativity constraints on entity representations and approximate entailment constraints on relation representations.
Attributes:
args: Model configuration parameters.
epsilon: Caculate embedding_range.
margin: Caculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
.. _Improving Knowledge Graph Embedding Using Simple Constraints: https://arxiv.org/pdf/1805.02408.pdf
"""
def __init__(self, args, rel2id):
super(ComplEx_NNE_AER, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
self.rule, self.conf = self.get_rule(rel2id)
def get_rule(self, rel2id):
"""Get rule for rule_base KGE models, such as ComplEx_NNE model.
Get rule and confidence from _cons.txt file.
Update:
(rule_p, rule_q): Rule.
confidence: The confidence of rule.
"""
rule_p, rule_q, confidence = [], [], []
with open(os.path.join(self.args.data_path, '_cons.txt')) as file:
lines = file.readlines()
for line in lines:
rule_str, trust = line.strip().split()
body, head = rule_str.split(',')
if '-' in body:
rule_p.append(rel2id[body[1:]])
rule_q.append(rel2id[head])
else:
rule_p.append(rel2id[body])
rule_q.append(rel2id[head])
confidence.append(float(trust))
rule_p = torch.tensor(rule_p).cuda()
rule_q = torch.tensor(rule_q).cuda()
confidence = torch.tensor(confidence).cuda()
return (rule_p, rule_q), confidence
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
"""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim * 2)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * 2)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`Re(< wr, es, e¯o >)`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
re_head, im_head = torch.chunk(head_emb, 2, dim=-1)
re_relation, im_relation = torch.chunk(relation_emb, 2, dim=-1)
re_tail, im_tail = torch.chunk(tail_emb, 2, dim=-1)
return torch.sum(
re_head * re_tail * re_relation
+ im_head * im_tail * re_relation
+ re_head * im_tail * im_relation
- im_head * re_tail * im_relation,
-1
)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score | 4,959 | 37.153846 | 226 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/RuleModel/IterE.py | import torch.nn as nn
import torch
import os
from .model import Model
from IPython import embed
from collections import defaultdict
import numpy as np
import pickle
import copy
class IterE(Model):
"""`Iteratively Learning Embeddings and Rules for Knowledge Graph Reasoning. (WWW'19)`_ (IterE).
Attributes:
args: Model configuration parameters.
epsilon: Caculate embedding_range.
margin: Caculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
.. _Iteratively Learning Embeddings and Rules for Knowledge Graph Reasoning. (WWW'19): https://dl.acm.org/doi/10.1145/3308558.3313612
"""
def __init__(self, args, train_sampler, test_sampler):
super(IterE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
#print(self.args)
#print(train_sampler)
#print('run get_axiom()')
self.train_sampler = train_sampler
self.train_triples_base = copy.deepcopy(train_sampler.train_triples)
self.select_probability = self.args.select_probability
self.max_entialments = self.args.max_entialments
self.axiom_types = self.args.axiom_types
self.axiom_weight = self.args.axiom_weight
self.inject_triple_percent = self.args.inject_triple_percent
self.sparsity = 0.995
self.num_entity = self.args.num_ent
self.relation2id=train_sampler.rel2id
self.train_ids=train_sampler.train_triples
self.valid_ids=train_sampler.valid_triples
self.test_ids=train_sampler.test_triples
#print(len(self.train_ids))
#print(len(self.valid_ids))
#print(len(self.test_ids))
self.train_ids_labels_inject = np.reshape([], [-1, 4])
# generate r_ht, hr_t
print('# generate r_ht, hr_t')
self.r_ht, self.hr_t, self.tr_h, self.hr_t_all, self.tr_h_all = self._generate(self.train_ids, self.valid_ids, self.test_ids)
# generate entity2frequency and entity2sparsity dict
print('# generate entity2frequency and entity2sparsity dict')
self.entity2frequency, self.entity2sparsity = self._entity2frequency()
print('# get_axiom')
self.get_axiom()
#self.rule, self.conf = self.get_rule(self.relation2id)
def _entity2frequency(self):
ent2freq = {ent:0 for ent in range(self.num_entity)}
ent2sparsity = {ent:-1 for ent in range(self.num_entity)}
for h,r,t in self.train_ids:
ent2freq[h] += 1
ent2freq[t] += 1
ent_freq_list = np.asarray([ent2freq[ent] for ent in range(self.num_entity)])
ent_freq_list_sort = np.argsort(ent_freq_list)
max_freq = max(list(ent2freq))
min_freq = min(list(ent2freq))
for ent, freq in ent2freq.items():
sparsity = 1 - (freq-min_freq)/(max_freq - min_freq)
ent2sparsity[ent] = sparsity
return ent2freq, ent2sparsity
def _generate(self, train, valid, test):
r_ht = defaultdict(set)
hr_t = defaultdict(set)
tr_h = defaultdict(set)
hr_t_all = defaultdict(list)
tr_h_all = defaultdict(list)
for (h,r,t) in train:
r_ht[r].add((h,t))
hr_t[(h,r)].add(t)
tr_h[(t,r)].add(h)
hr_t_all[(h,r)].append(t)
tr_h_all[(t,r)].append(h)
for (h,r,t) in test+valid:
hr_t_all[(h,r)].append(t)
tr_h_all[(t, r)].append(h)
return r_ht, hr_t, tr_h, hr_t_all, tr_h_all
def get_axiom(self, ):
self.axiom_dir = os.path.join(self.args.data_path, 'axiom_pool')
self.reflexive_dir, self.symmetric_dir, self.transitive_dir, self.inverse_dir, self.subproperty_dir, self.equivalent_dir, self.inferencechain1, self.inferencechain2, self.inferencechain3, self.inferencechain4 = map(lambda x: os.path.join(self.axiom_dir, x),
['axiom_reflexive.txt',
'axiom_symmetric.txt',
'axiom_transitive.txt',
'axiom_inverse.txt',
'axiom_subProperty.txt',
'axiom_equivalent.txt',
'axiom_inferenceChain1.txt',
'axiom_inferenceChain2.txt',
'axiom_inferenceChain3.txt',
'axiom_inferenceChain4.txt'])
# read and materialize axioms
print('# self._read_axioms()')
self._read_axioms()
print('# self._read_axioms()')
self._materialize_axioms()
print('# self._read_axioms()')
self._init_valid_axioms()
def _read_axioms(self):
# for each axiom, the first id is the basic relation
self.axiompool_reflexive = self._read_axiompool_file(self.reflexive_dir)
self.axiompool_symmetric = self._read_axiompool_file(self.symmetric_dir)
self.axiompool_transitive = self._read_axiompool_file(self.transitive_dir)
self.axiompool_inverse = self._read_axiompool_file(self.inverse_dir)
self.axiompool_equivalent = self._read_axiompool_file(self.equivalent_dir)
self.axiompool_subproperty = self._read_axiompool_file(self.subproperty_dir)
self.axiompool_inferencechain1 = self._read_axiompool_file(self.inferencechain1)
self.axiompool_inferencechain2 = self._read_axiompool_file(self.inferencechain2)
self.axiompool_inferencechain3 = self._read_axiompool_file(self.inferencechain3)
self.axiompool_inferencechain4 = self._read_axiompool_file(self.inferencechain4)
self.axiompool = [self.axiompool_reflexive, self.axiompool_symmetric, self.axiompool_transitive,
self.axiompool_inverse, self.axiompool_subproperty, self.axiompool_equivalent,
self.axiompool_inferencechain1,self.axiompool_inferencechain2,
self.axiompool_inferencechain3,self.axiompool_inferencechain4]
def _read_axiompool_file(self, file):
f = open(file, 'r')
axioms = []
for line in f.readlines():
line_list = line.strip().split('\t')
axiom_ids = list(map(lambda x: self.relation2id[x], line_list))
#axiom_ids = self.relation2id[line_list]
axioms.append(axiom_ids)
# for the case reflexive pool is empty
if len(axioms) == 0:
np.reshape(axioms, [-1, 3])
return axioms
# for each axioms in axiom pool
# generate a series of entailments for each axiom
def _materialize_axioms(self, generate=True, dump=True, load=False):
if generate:
self.reflexive2entailment = defaultdict(list)
self.symmetric2entailment = defaultdict(list)
self.transitive2entailment = defaultdict(list)
self.inverse2entailment = defaultdict(list)
self.equivalent2entailment = defaultdict(list)
self.subproperty2entailment = defaultdict(list)
self.inferencechain12entailment = defaultdict(list)
self.inferencechain22entailment = defaultdict(list)
self.inferencechain32entailment = defaultdict(list)
self.inferencechain42entailment = defaultdict(list)
self.reflexive_entailments, self.reflexive_entailments_num = self._materialize_sparse(self.axiompool_reflexive, type='reflexive')
self.symmetric_entailments, self.symmetric_entailments_num = self._materialize_sparse(self.axiompool_symmetric, type='symmetric')
self.transitive_entailments, self.transitive_entailments_num = self._materialize_sparse(self.axiompool_transitive, type='transitive')
self.inverse_entailments, self.inverse_entailments_num = self._materialize_sparse(self.axiompool_inverse, type='inverse')
self.subproperty_entailments, self.subproperty_entailments_num = self._materialize_sparse(self.axiompool_subproperty, type='subproperty')
self.equivalent_entailments, self.equivalent_entailments_num = self._materialize_sparse(self.axiompool_equivalent, type='equivalent')
self.inferencechain1_entailments, self.inferencechain1_entailments_num = self._materialize_sparse(self.axiompool_inferencechain1, type='inferencechain1')
self.inferencechain2_entailments, self.inferencechain2_entailments_num = self._materialize_sparse(self.axiompool_inferencechain2, type='inferencechain2')
self.inferencechain3_entailments, self.inferencechain3_entailments_num = self._materialize_sparse(self.axiompool_inferencechain3, type='inferencechain3')
self.inferencechain4_entailments, self.inferencechain4_entailments_num = self._materialize_sparse(self.axiompool_inferencechain4, type='inferencechain4')
print('reflexive entailments for sparse: ', self.reflexive_entailments_num)
print('symmetric entailments for sparse: ', self.symmetric_entailments_num)
print('transitive entailments for sparse: ', self.transitive_entailments_num)
print('inverse entailments for sparse: ', self.inverse_entailments_num)
print('subproperty entailments for sparse: ', self.subproperty_entailments_num)
print('equivalent entailments for sparse: ', self.equivalent_entailments_num)
print('inferencechain1 entailments for sparse: ', self.inferencechain1_entailments_num)
print('inferencechain2 entailments for sparse: ', self.inferencechain2_entailments_num)
print('inferencechain3 entailments for sparse: ', self.inferencechain3_entailments_num)
print('inferencechain4 entailments for sparse: ', self.inferencechain4_entailments_num)
print("finish generate axioms entailments for sparse")
if dump:
pickle.dump(self.reflexive_entailments, open(os.path.join(self.axiom_dir, 'reflexive_entailments'), 'wb'))
pickle.dump(self.symmetric_entailments, open(os.path.join(self.axiom_dir, 'symmetric_entailments'), 'wb'))
pickle.dump(self.transitive_entailments, open(os.path.join(self.axiom_dir, 'transitive_entailments'), 'wb'))
pickle.dump(self.inverse_entailments, open(os.path.join(self.axiom_dir, 'inverse_entailments'), 'wb'))
pickle.dump(self.subproperty_entailments, open(os.path.join(self.axiom_dir, 'subproperty_entailments'), 'wb'))
#pickle.dump(self.inferencechain_entailments, open(os.path.join(self.axiom_dir, 'inferencechain_entailments'), 'wb'))
pickle.dump(self.equivalent_entailments, open(os.path.join(self.axiom_dir, 'equivalent_entailments'), 'wb'))
pickle.dump(self.inferencechain1_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain1_entailments'), 'wb'))
pickle.dump(self.inferencechain2_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain2_entailments'), 'wb'))
pickle.dump(self.inferencechain3_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain3_entailments'), 'wb'))
pickle.dump(self.inferencechain4_entailments,
open(os.path.join(self.axiom_dir, 'inferencechain4_entailments'), 'wb'))
print("finish dump axioms entialments")
if load:
print("load refexive entailments...")
self.reflexive_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'reflexive_entailments'), 'rb'))
print(self.reflexive_entailments)
print('load symmetric entailments...')
self.symmetric_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'symmetric_entailments'), 'rb'))
print("load transitive entialments... ")
self.transitive_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'transitive_entailments'), 'rb'))
print("load inverse entailments...")
self.inverse_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'inverse_entailments'), 'rb'))
print("load subproperty entailments...")
self.subproperty_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'subproperty_entailments'), 'rb'))
#print("load inferencechain entailments...")
#self.inferencechain_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'inferencechain_entailments'), 'rb'))
print("load equivalent entialments...")
self.equivalent_entailments = pickle.load(open(os.path.join(self.axiom_dir, 'equivalent_entailments'), 'rb'))
print("load inferencechain1 entailments...")
self.inferencechain1_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain1_entailments'), 'rb'))
print("load inferencechain2 entailments...")
self.inferencechain2_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain2_entailments'), 'rb'))
print("load inferencechain3 entailments...")
self.inferencechain3_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain3_entailments'), 'rb'))
print("load inferencechain4 entailments...")
self.inferencechain4_entailments = pickle.load(
open(os.path.join(self.axiom_dir, 'inferencechain4_entailments'), 'rb'))
print("finish load axioms entailments")
def _materialize_sparse(self, axioms, type=None, sparse = False):
inference = []
# axiom2entailment is a dict
# with the all axioms in the axiom pool as keys
# and all the entailments for each axiom as values
axiom_list = axioms
length = len(axioms)
max_entailments = self.max_entialments
num = 0
if length == 0:
if type == 'reflexive':
np.reshape(inference, [-1, 3])
elif type == 'symmetric' or type =='inverse' or type =='equivalent' or type =='subproperty':
np.reshape(inference, [-1, 6])
elif type=='transitive' or type=='inferencechain':
np.reshape(inference, [-1, 9])
else:
raise NotImplementedError
return inference, num
if type == 'reflexive':
for axiom in axiom_list:
axiom_key =tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h,t) in self.r_ht[r]:
# filter the axiom with too much entailments
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if h != t and self.entity2sparsity[h]>self.sparsity:
num += 1
inference_tmp.append([h,r,h])
for entailment in inference_tmp:
self.reflexive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'symmetric':
#self.symmetric2entailment = defaultdict(list)
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h,t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if (t,h) not in self.r_ht[r] and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[t]>self.sparsity):
num += 1
inference_tmp.append([h,r,t,t,r,h])
for entailment in inference_tmp:
self.symmetric2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'transitive':
#self.transitive2entailment = defaultdict(list)
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h,t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
# (t,r,e) exist but (h,r,e) not exist and e!=h
for e in self.hr_t[(t,r)]- self.hr_t[(h,r)]:
if e != h and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[e]>self.sparsity):
num += 1
inference_tmp.append([h,r,t,t,r,e,h,r,e])
for entailment in inference_tmp:
self.transitive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inverse':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1,r2 = axiom
inference_tmp = []
for (h,t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (t,h) not in self.r_ht[r2] and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[t]>self.sparsity):
num += 1
inference_tmp.append([h,r1,t, t,r2,h])
#self.inverse2entailment[axiom_key].append([h,r1,t, t,r2,h])
for entailment in inference_tmp:
self.inverse2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'equivalent' or type =='subproperty':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1,r2 = axiom
inference_tmp = []
for (h,t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (h,t) not in self.r_ht[r2] and (self.entity2sparsity[h]>self.sparsity or self.entity2sparsity[t]>self.sparsity):
num += 1
inference_tmp.append([h,r1,t, h,r2,t])
for entailment in inference_tmp:
self.equivalent2entailment[axiom_key].append(entailment)
self.subproperty2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain1':
self.inferencechain12entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([e, r2, h, e, r3, t, h, r1, t])
#self.inferencechain12entailment[axiom_key].append([[e, r2, h, e, r3, t, h, r1, t]])
for entailment in inference_tmp:
self.inferencechain12entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain2':
self.inferencechain22entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([e, r2, h, t, r3, e, h, r1, t])
#self.inferencechain22entailment[axiom_key].append([[e, r2, h, t, r3, e, h, r1, t]])
for entailment in inference_tmp:
self.inferencechain22entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain3':
self.inferencechain32entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([h, r2, e, e, r3, t, h, r1, t])
for entailment in inference_tmp:
self.inferencechain32entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain4':
self.inferencechain42entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1] and (
self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([h, r2, e, t, r3, e, h, r1, t])
for entailment in inference_tmp:
self.inferencechain42entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
return inference, num
def _materialize(self, axioms, type=None, sparse=False):
inference = []
# axiom2entailment is a dict
# with the all axioms in the axiom pool as keys
# and all the entailments for each axiom as values
axiom_list = axioms
# print('axiom_list', axiom_list)
length = len(axioms)
max_entailments = 5000
num = 0
if length == 0:
if type == 'reflexive':
np.reshape(inference, [-1, 3])
elif type == 'symmetric' or type == 'inverse' or type == 'equivalent' or type == 'subproperty':
np.reshape(inference, [-1, 6])
elif type == 'transitive' or type == 'inferencechain':
np.reshape(inference, [-1, 9])
else:
raise NotImplementedError
return inference, num
if type == 'reflexive':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h, t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if h != t: #and self.entity2sparsity[h] > self.sparsity:
num += 1
inference_tmp.append([h, r, h])
for entailment in inference_tmp:
self.reflexive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'symmetric':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h, t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 2])
break
if (t, h) not in self.r_ht[r]: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[t] > self.sparsity):
num += 1
inference_tmp.append([h, r, t, t, r, h])
for entailment in inference_tmp:
self.symmetric2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'transitive':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r = axiom[0]
inference_tmp = []
for (h, t) in self.r_ht[r]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
# (t,r,e) exist but (h,r,e) not exist and e!=h
for e in self.hr_t[(t, r)] - self.hr_t[(h, r)]:
if e != h: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[e] > self.sparsity):
num += 1
inference_tmp.append([h, r, t, t, r, e, h, r, e])
for entailment in inference_tmp:
self.transitive2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inverse':
# self.inverse2entailment = defaultdict(list)
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1, r2 = axiom
inference_tmp = []
for (h, t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (t, h) not in self.r_ht[r2]: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[t] > self.sparsity):
num += 1
inference_tmp.append([h, r1, t, t, r2, h])
for entailment in inference_tmp:
self.inverse2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'equivalent' or type == 'subproperty':
for axiom in axiom_list:
axiom_key = tuple(axiom)
r1, r2 = axiom
inference_tmp = []
for (h, t) in self.r_ht[r1]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 6])
break
if (h, t) not in self.r_ht[r2]: #and (self.entity2sparsity[h] > self.sparsity or self.entity2sparsity[t] > self.sparsity):
num += 1
inference_tmp.append([h, r1, t, h, r2, t])
for entailment in inference_tmp:
self.equivalent2entailment[axiom_key].append(entailment)
self.subproperty2entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain1':
self.inferencechain12entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([e, r2, h, e, r3, t, h, r1, t])
for entailment in inference_tmp:
self.inferencechain12entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain2':
self.inferencechain22entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (e, h) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([e, r2, h, t, r3, e, h, r1, t])
for entailment in inference_tmp:
self.inferencechain22entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain3':
self.inferencechain32entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
# print('%d/%d' % (i, length))
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.hr_t[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([h, r2, e, e, r3, t, h, r1, t])
for entailment in inference_tmp:
self.inferencechain32entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
if type == 'inferencechain4':
self.inferencechain42entailment = defaultdict(list)
i = 0
for axiom in axiom_list:
axiom_key = tuple(axiom)
i += 1
r1, r2, r3 = axiom
inference_tmp = []
for (h, e) in self.r_ht[r2]:
if len(inference_tmp) > max_entailments:
inference_tmp = np.reshape([], [-1, 9])
break
for t in self.tr_h[(e, r3)]:
if (h, t) not in self.r_ht[r1]:
num += 1
inference_tmp.append([h, r2, e, t, r3, e, h, r1, t])
for entailment in inference_tmp:
self.inferencechain42entailment[axiom_key].append(entailment)
inference.append(inference_tmp)
return inference, num
def _init_valid_axioms(self):
# init valid axioms
self.valid_reflexive, self.valid_symmetric, self.valid_transitive,\
self.valid_inverse, self.valid_subproperty, self.valid_equivalent,\
self.valid_inferencechain1, self.valid_inferencechain2, \
self.valid_inferencechain3, self.valid_inferencechain4 = [[] for x in range(self.axiom_types)]
# init valid axiom entailments
self.valid_reflexive2entailment, self.valid_symmetric2entailment, self.valid_transitive2entailment, \
self.valid_inverse2entailment, self.valid_subproperty2entailment, self.valid_equivalent2entailment, \
self.valid_inferencechain12entailment, self.valid_inferencechain22entailment, \
self.valid_inferencechain32entailment, self.valid_inferencechain42entailment = [[] for x in range(self.axiom_types)]
# init valid axiom entailments probability
self.valid_reflexive_p, self.valid_symmetric_p, self.valid_transitive_p, \
self.valid_inverse_p, self.valid_subproperty_p, self.valid_equivalent_p, \
self.valid_inferencechain1_p, self.valid_inferencechain2_p,\
self.valid_inferencechain3_p, self.valid_inferencechain4_p= [[] for x in range(self.axiom_types)]
# init valid axiom batchsize
self.reflexive_batchsize = 1
self.symmetric_batchsize = 1
self.transitive_batchsize = 1
self.inverse_batchsize = 1
self.subproperty_batchsize = 1
self.equivalent_batchsize = 1
#self.inferencechain_batchsize = 1
self.inferencechain1_batchsize = 1
self.inferencechain2_batchsize = 1
self.inferencechain3_batchsize = 1
self.inferencechain4_batchsize = 1
# add the new triples from axioms to training triple
def update_train_triples(self, epoch=0, update_per = 10):
"""add the new triples from axioms to training triple
Args:
epoch (int, optional): epoch in training process. Defaults to 0.
update_per (int, optional): Defaults to 10.
Returns:
updated_train_data: training triple after adding the new triples from axioms
"""
reflexive_triples, symmetric_triples, transitive_triples, inverse_triples,\
equivalent_triples, subproperty_triples, inferencechain1_triples, \
inferencechain2_triples, inferencechain3_triples, inferencechain4_triples = [ np.reshape(np.asarray([]), [-1, 3]) for i in range(self.axiom_types)]
reflexive_p, symmetric_p, transitive_p, inverse_p, \
equivalent_p, subproperty_p, inferencechain1_p, \
inferencechain2_p, inferencechain3_p, inferencechain4_p = [np.reshape(np.asarray([]), [-1, 1]) for i in
range(self.axiom_types)]
updated_train_data=None
if epoch >= 5:
print("len(self.valid_reflexive2entailment):", len(self.valid_reflexive2entailment))
print("len(self.valid_symmetric2entailment):", len(self.valid_symmetric2entailment))
print("len(self.valid_transitive2entailment)", len(self.valid_transitive2entailment))
print("len(self.valid_inverse2entailment)", len(self.valid_inverse2entailment))
print("len(self.valid_equivalent2entailment)", len(self.valid_equivalent2entailment))
print("len(self.valid_subproperty2entailment)", len(self.valid_subproperty2entailment))
valid_reflexive2entailment, valid_symmetric2entailment, valid_transitive2entailment,\
valid_inverse2entailment, valid_equivalent2entailment, valid_subproperty2entailment, \
valid_inferencechain12entailment, valid_inferencechain22entailment,\
valid_inferencechain32entailment, valid_inferencechain42entailment = [[] for i in range(10)]
if len(self.valid_reflexive2entailment)>0:
valid_reflexive2entailment = np.reshape(np.asarray(self.valid_reflexive2entailment), [-1, 3])
reflexive_triples = np.asarray(valid_reflexive2entailment)[:, -3:]
reflexive_p = np.reshape(np.asarray(self.valid_reflexive_p),[-1,1])
if len(self.valid_symmetric2entailment) > 0:
valid_symmetric2entailment = np.reshape(np.asarray(self.valid_symmetric2entailment), [-1, 6])
symmetric_triples = np.asarray(valid_symmetric2entailment)[:, -3:]
symmetric_p = np.reshape(np.asarray(self.valid_symmetric_p),[-1,1])
if len(self.valid_transitive2entailment) > 0:
valid_transitive2entailment = np.reshape(np.asarray(self.valid_transitive2entailment), [-1, 9])
transitive_triples = np.asarray(valid_transitive2entailment)[:, -3:]
transitive_p = np.reshape(np.asarray(self.valid_transitive_p), [-1, 1])
if len(self.valid_inverse2entailment) > 0:
valid_inverse2entailment = np.reshape(np.asarray(self.valid_inverse2entailment), [-1, 6])
inverse_triples = np.asarray(valid_inverse2entailment)[:, -3:]
inverse_p = np.reshape(np.asarray(self.valid_inverse_p), [-1, 1])
if len(self.valid_equivalent2entailment) > 0:
valid_equivalent2entailment = np.reshape(np.asarray(self.valid_equivalent2entailment), [-1, 6])
equivalent_triples = np.asarray(valid_equivalent2entailment)[:, -3:]
equivalent_p = np.reshape(np.asarray(self.valid_equivalent_p), [-1, 1])
if len(self.valid_subproperty2entailment) > 0:
valid_subproperty2entailment = np.reshape(np.asarray(self.valid_subproperty2entailment), [-1, 6])
subproperty_triples = np.asarray(valid_subproperty2entailment)[:, -3:]
subproperty_p = np.reshape(np.asarray(self.valid_subproperty_p),[-1,1])
if len(self.valid_inferencechain12entailment) > 0:
valid_inferencechain12entailment = np.reshape(np.asarray(self.valid_inferencechain12entailment), [-1, 9])
inferencechain1_triples = np.asarray(valid_inferencechain12entailment)[:, -3:]
inferencechain1_p = np.reshape(np.asarray(self.valid_inferencechain1_p), [-1, 1])
if len(self.valid_inferencechain22entailment) > 0:
valid_inferencechain22entailment = np.reshape(np.asarray(self.valid_inferencechain22entailment), [-1, 9])
inferencechain2_triples = np.asarray(valid_inferencechain22entailment)[:, -3:]
inferencechain2_p = np.reshape(np.asarray(self.valid_inferencechain2_p), [-1, 1])
if len(self.valid_inferencechain32entailment) > 0:
valid_inferencechain32entailment = np.reshape(np.asarray(self.valid_inferencechain32entailment), [-1, 9])
inferencechain3_triples = np.asarray(valid_inferencechain32entailment)[:, -3:]
inferencechain3_p = np.reshape(np.asarray(self.valid_inferencechain3_p), [-1, 1])
if len(self.valid_inferencechain42entailment) > 0:
valid_inferencechain42entailment = np.reshape(np.asarray(self.valid_inferencechain42entailment), [-1, 9])
inferencechain4_triples = np.asarray(valid_inferencechain42entailment)[:, -3:]
inferencechain4_p = np.reshape(np.asarray(self.valid_inferencechain4_p), [-1, 1])
# pickle.dump(self.reflexive_entailments, open(os.path.join(self.axiom_dir, 'reflexive_entailments'), 'wb'))
# store all the injected triples
entailment_all = (valid_reflexive2entailment, valid_symmetric2entailment, valid_transitive2entailment,
valid_inverse2entailment, valid_equivalent2entailment, valid_subproperty2entailment,
valid_inferencechain12entailment,valid_inferencechain22entailment,
valid_inferencechain32entailment,valid_inferencechain42entailment)
pickle.dump(entailment_all, open(os.path.join(self.axiom_dir, 'valid_entailments.pickle'), 'wb'))
train_inject_triples = np.concatenate([reflexive_triples, symmetric_triples, transitive_triples, inverse_triples,
equivalent_triples, subproperty_triples, inferencechain1_triples,
inferencechain2_triples,inferencechain3_triples,inferencechain4_triples],
axis=0)
train_inject_triples_p = np.concatenate([reflexive_p,symmetric_p, transitive_p, inverse_p,
equivalent_p, subproperty_p, inferencechain1_p,
inferencechain2_p,inferencechain3_p,inferencechain4_p],
axis=0)
self.train_inject_triples = train_inject_triples
inject_labels = np.reshape(np.ones(len(train_inject_triples)), [-1, 1]) * self.axiom_weight * train_inject_triples_p
train_inject_ids_labels = np.concatenate([train_inject_triples, inject_labels],
axis=1)
self.train_ids_labels_inject = train_inject_triples#train_inject_ids_labels
print('num reflexive triples', len(reflexive_triples))
print('num symmetric triples', len(symmetric_triples))
print('num transitive triples', len(transitive_triples))
print('num inverse triples', len(inverse_triples))
print('num equivalent triples', len(equivalent_triples))
print('num subproperty triples', len(subproperty_triples))
print('num inferencechain1 triples', len(inferencechain1_triples))
print('num inferencechain2 triples', len(inferencechain2_triples))
print('num inferencechain3 triples', len(inferencechain3_triples))
print('num inferencechain4 triples', len(inferencechain4_triples))
#print(self.train_ids_labels_inject)
updated_train_data=self.generate_new_train_triples()
return updated_train_data
def split_embedding(self, embedding):
"""split embedding
Args:
embedding: embeddings need to be splited, shape:[None, dim].
Returns:
probability: The similrity between two matrices.
"""
# embedding: [None, dim]
assert self.args.emb_dim % 4 == 0
num_scalar = self.args.emb_dim // 2
num_block = self.args.emb_dim // 4
if len(embedding.size()) ==2:
embedding_scalar = embedding[:, 0:num_scalar]
embedding_x = embedding[:, num_scalar:-num_block]
embedding_y = embedding[:, -num_block:]
elif len(embedding.size()) ==3:
embedding_scalar = embedding[:, :, 0:num_scalar]
embedding_x = embedding[:, :, num_scalar:-num_block]
embedding_y = embedding[:, :, -num_block:]
else:
raise NotImplementedError
return embedding_scalar, embedding_x, embedding_y
# calculate the similrity between two matrices
# head: [?, dim]
# tail: [?, dim] or [1,dim]
def sim(self, head=None, tail=None, arity=None):
"""calculate the similrity between two matrices
Args:
head: embeddings of head, shape:[batch_size, dim].
tail: embeddings of tail, shape:[batch_size, dim] or [1, dim].
arity: 1,2 or 3
Returns:
probability: The similrity between two matrices.
"""
if arity == 1:
A_scalar, A_x, A_y = self.split_embedding(head)
elif arity == 2:
M1_scalar, M1_x, M1_y = self.split_embedding(head[0])
M2_scalar, M2_x, M2_y = self.split_embedding(head[1])
A_scalar= M1_scalar * M2_scalar
A_x = M1_x*M2_x - M1_y*M2_y
A_y = M1_x*M2_y + M1_y*M2_x
elif arity==3:
M1_scalar, M1_x, M1_y = self.split_embedding(head[0])
M2_scalar, M2_x, M2_y = self.split_embedding(head[1])
M3_scalar, M3_x, M3_y = self.split_embedding(head[2])
M1M2_scalar = M1_scalar * M2_scalar
M1M2_x = M1_x * M2_x - M1_y * M2_y
M1M2_y = M1_x * M2_y + M1_y * M2_x
A_scalar = M1M2_scalar * M3_scalar
A_x = M1M2_x * M3_x - M1M2_y * M3_y
A_y = M1M2_x * M3_y + M1M2_y * M3_x
else:
raise NotImplemented
B_scala, B_x, B_y = self.split_embedding(tail)
similarity = torch.cat([(A_scalar - B_scala)**2, (A_x - B_x)**2, (A_x - B_x)**2, (A_y - B_y)**2, (A_y - B_y)**2 ], dim=1)
similarity = torch.sqrt(torch.sum(similarity, dim=1))
#recale the probability
probability = (torch.max(similarity)-similarity)/(torch.max(similarity)-torch.min(similarity))
return probability
# generate a probality for each axiom in axiom pool
def run_axiom_probability(self):
"""this function is used to generate a probality for each axiom in axiom pool
"""
self.identity = torch.cat((torch.ones(int(self.args.emb_dim-self.args.emb_dim/4)),torch.zeros(int(self.args.emb_dim/4))),0).unsqueeze(0).cuda()
if len(self.axiompool_reflexive) != 0:
index = torch.LongTensor(self.axiompool_reflexive).cuda()
reflexive_embed = self.rel_emb(index)
reflexive_prob = self.sim(head=reflexive_embed[:, 0, :], tail=self.identity, arity=1)
else:
reflexive_prob = []
if len(self.axiompool_symmetric) != 0:
index = torch.LongTensor(self.axiompool_symmetric).cuda()
symmetric_embed = self.rel_emb(index)
symmetric_prob = self.sim(head=[symmetric_embed[:, 0, :], symmetric_embed[:, 0, :]], tail=self.identity, arity=2)
#symmetric_prob = sess.run(self.symmetric_probability, {self.symmetric_pool: self.axiompool_symmetric})
else:
symmetric_prob = []
if len(self.axiompool_transitive) != 0:
index = torch.LongTensor(self.axiompool_transitive).cuda()
transitive_embed = self.rel_emb(index)
transitive_prob = self.sim(head=[transitive_embed[:, 0, :], transitive_embed[:, 0, :]], tail=transitive_embed[:, 0, :], arity=2)
#transitive_prob = sess.run(self.transitive_probability, {self.transitive_pool: self.axiompool_transitive})
else:
transitive_prob = []
if len(self.axiompool_inverse) != 0:
index = torch.LongTensor(self.axiompool_inverse).cuda()
#inverse_prob = sess.run(self.inverse_probability, {self.inverse_pool: self.axiompool_inverse})
inverse_embed = self.rel_emb(index)
inverse_probability1 = self.sim(head=[inverse_embed[:, 0,:],inverse_embed[:, 1,:]], tail = self.identity, arity=2)
inverse_probability2 = self.sim(head=[inverse_embed[:,1,:],inverse_embed[:, 0,:]], tail=self.identity, arity=2)
inverse_prob = (inverse_probability1 + inverse_probability2)/2
else:
inverse_prob = []
if len(self.axiompool_subproperty) != 0:
index = torch.LongTensor(self.axiompool_subproperty).cuda()
#subproperty_prob = sess.run(self.subproperty_probability, {self.subproperty_pool: self.axiompool_subproperty})
subproperty_embed = self.rel_emb(index)
subproperty_prob = self.sim(head=subproperty_embed[:, 0,:], tail=subproperty_embed[:, 1, :], arity=1)
else:
subproperty_prob = []
if len(self.axiompool_equivalent) != 0:
index = torch.LongTensor(self.axiompool_equivalent).cuda()
#equivalent_prob = sess.run(self.equivalent_probability, {self.equivalent_pool: self.axiompool_equivalent})
equivalent_embed = self.rel_emb(index)
equivalent_prob = self.sim(head=equivalent_embed[:, 0,:], tail=equivalent_embed[:, 1,:], arity=1)
else:
equivalent_prob = []
if len(self.axiompool_inferencechain1) != 0:
index = torch.LongTensor(self.axiompool_inferencechain1).cuda()
inferencechain_embed = self.rel_emb(index)
inferencechain1_prob = self.sim(head=[inferencechain_embed[:, 1, :], inferencechain_embed[:, 0, :]], tail=inferencechain_embed[:, 2, :], arity=2)
else:
inferencechain1_prob = []
if len(self.axiompool_inferencechain2) != 0:
index = torch.LongTensor(self.axiompool_inferencechain2).cuda()
inferencechain_embed = self.rel_emb(index)
inferencechain2_prob = self.sim(head=[inferencechain_embed[:, 2, :], inferencechain_embed[:, 1, :], inferencechain_embed[:, 0, :]], tail=self.identity, arity=3)
else:
inferencechain2_prob = []
if len(self.axiompool_inferencechain3) != 0:
index = torch.LongTensor(self.axiompool_inferencechain3).cuda()
inferencechain_embed = self.rel_emb(index)
inferencechain3_prob = self.sim(head=[inferencechain_embed[:, 1, :], inferencechain_embed[:, 2, :]], tail=inferencechain_embed[:, 0, :], arity=2)
else:
inferencechain3_prob = []
if len(self.axiompool_inferencechain4) != 0:
index = torch.LongTensor(self.axiompool_inferencechain4).cuda()
inferencechain_embed = self.rel_emb(index)
inferencechain4_prob = self.sim(head=[inferencechain_embed[:, 0, :], inferencechain_embed[:, 2, :]],tail=inferencechain_embed[:, 1, :], arity=2)
else:
inferencechain4_prob = []
output = [reflexive_prob, symmetric_prob, transitive_prob, inverse_prob,
subproperty_prob,equivalent_prob,inferencechain1_prob, inferencechain2_prob,
inferencechain3_prob, inferencechain4_prob]
return output
def update_valid_axioms(self, input):
"""this function is used to select high probability axioms as valid axioms and record their scores
"""
#
#
valid_axioms = [self._select_high_probability(list(prob), axiom) for prob,axiom in zip(input, self.axiompool)]
self.valid_reflexive, self.valid_symmetric, self.valid_transitive, \
self.valid_inverse, self.valid_subproperty, self.valid_equivalent, \
self.valid_inferencechain1, self.valid_inferencechain2, \
self.valid_inferencechain3, self.valid_inferencechain4 = valid_axioms
# update the batchsize of axioms and entailments
self._reset_valid_axiom_entailment()
def _select_high_probability(self, prob, axiom):
# select the high probability axioms and recore their probabilities
valid_axiom = [[axiom[prob.index(p)],[p]] for p in prob if p>self.select_probability]
return valid_axiom
def _reset_valid_axiom_entailment(self):
self.infered_hr_t = defaultdict(set)
self.infered_tr_h = defaultdict(set)
self.valid_reflexive2entailment, self.valid_reflexive_p = \
self._valid_axiom2entailment(self.valid_reflexive, self.reflexive2entailment)
self.valid_symmetric2entailment, self.valid_symmetric_p = \
self._valid_axiom2entailment(self.valid_symmetric, self.symmetric2entailment)
self.valid_transitive2entailment, self.valid_transitive_p = \
self._valid_axiom2entailment(self.valid_transitive, self.transitive2entailment)
self.valid_inverse2entailment, self.valid_inverse_p = \
self._valid_axiom2entailment(self.valid_inverse, self.inverse2entailment)
self.valid_subproperty2entailment, self.valid_subproperty_p = \
self._valid_axiom2entailment(self.valid_subproperty, self.subproperty2entailment)
self.valid_equivalent2entailment, self.valid_equivalent_p = \
self._valid_axiom2entailment(self.valid_equivalent, self.equivalent2entailment)
self.valid_inferencechain12entailment, self.valid_inferencechain1_p = \
self._valid_axiom2entailment(self.valid_inferencechain1, self.inferencechain12entailment)
self.valid_inferencechain22entailment, self.valid_inferencechain2_p = \
self._valid_axiom2entailment(self.valid_inferencechain2, self.inferencechain22entailment)
self.valid_inferencechain32entailment, self.valid_inferencechain3_p = \
self._valid_axiom2entailment(self.valid_inferencechain3, self.inferencechain32entailment)
self.valid_inferencechain42entailment, self.valid_inferencechain4_p = \
self._valid_axiom2entailment(self.valid_inferencechain4, self.inferencechain42entailment)
def _valid_axiom2entailment(self, valid_axiom, axiom2entailment):
valid_axiom2entailment = []
valid_axiom_p = []
for axiom_p in valid_axiom:
axiom = tuple(axiom_p[0])
p = axiom_p[1]
for entailment in axiom2entailment[axiom]:
valid_axiom2entailment.append(entailment)
valid_axiom_p.append(p)
h,r,t = entailment[-3:]
self.infered_hr_t[(h,r)].add(t)
self.infered_tr_h[(t,r)].add(h)
return valid_axiom2entailment, valid_axiom_p
# updata new train triples:
def generate_new_train_triples(self):
"""The function is to updata new train triples and used after each training epoch end
Returns:
self.train_sampler.train_triples: The new training dataset (triples).
"""
self.train_sampler.train_triples = copy.deepcopy(self.train_triples_base)
print('generate_new_train_triples...')
#origin_triples = train_sampler.train_triples
inject_triples = self.train_ids_labels_inject
inject_num = int(self.inject_triple_percent*len(self.train_sampler.train_triples))
if len(inject_triples)> inject_num and inject_num >0:
np.random.shuffle(inject_triples)
inject_triples = inject_triples[:inject_num]
#train_triples = np.concatenate([origin_triples, inject_triples], axis=0)
print('当前train_sampler.train_triples数目',len(self.train_sampler.train_triples))
for h,r,t in inject_triples:
self.train_sampler.train_triples.append((int(h),int(r),int(t)))
print('添加后train_sampler.train_triples数目',len(self.train_sampler.train_triples))
return self.train_sampler.train_triples
def get_rule(self, rel2id):
"""Get rule for rule_base KGE models, such as ComplEx_NNE model.
Get rule and confidence from _cons.txt file.
Update:
(rule_p, rule_q): Rule.
confidence: The confidence of rule.
"""
rule_p, rule_q, confidence = [], [], []
with open(os.path.join(self.args.data_path, '_cons.txt')) as file:
lines = file.readlines()
for line in lines:
rule_str, trust = line.strip().split()
body, head = rule_str.split(',')
if '-' in body:
rule_p.append(rel2id[body[1:]])
rule_q.append(rel2id[head])
else:
rule_p.append(rel2id[body])
rule_q.append(rel2id[head])
confidence.append(float(trust))
rule_p = torch.tensor(rule_p).cuda()
rule_q = torch.tensor(rule_q).cuda()
confidence = torch.tensor(confidence).cuda()
return (rule_p, rule_q), confidence
"""def init_emb(self):
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim * 2)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * 2)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())"""
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
"""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is DistMult.
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
h_scalar, h_x ,h_y = self.split_embedding(head_emb)
r_scalar, r_x, r_y = self.split_embedding(relation_emb)
t_scalar, t_x, t_y = self.split_embedding(tail_emb)
score_scalar = torch.sum(h_scalar * r_scalar * t_scalar, axis=-1)
score_block = torch.sum(h_x * r_x * t_x
+ h_x * r_y * t_y
+ h_y * r_x * t_y
- h_y * r_y * t_x, axis=-1)
score = score_scalar + score_block
return score
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
| 59,788 | 49.242857 | 265 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/RuleModel/model.py | import torch.nn as nn
import torch
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
def init_emb(self):
raise NotImplementedError
def score_func(self, head_emb, relation_emb, tail_emb):
raise NotImplementedError
def forward(self, triples, negs, mode):
raise NotImplementedError
def tri2emb(self, triples, negs=None, mode="single"):
"""Get embedding of triples.
This function get the embeddings of head, relation, and tail
respectively. each embedding has three dimensions.
Args:
triples (tensor): This tensor save triples id, which dimension is
[triples number, 3].
negs (tensor, optional): This tenosr store the id of the entity to
be replaced, which has one dimension. when negs is None, it is
in the test/eval phase. Defaults to None.
mode (str, optional): This arg indicates that the negative entity
will replace the head or tail entity. when it is 'single', it
means that entity will not be replaced. Defaults to 'single'.
Returns:
head_emb: Head entity embedding.
relation_emb: Relation embedding.
tail_emb: Tail entity embedding.
"""
if mode == "single":
head_emb = self.ent_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
relation_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
tail_emb = self.ent_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
elif mode == "head-batch" or mode == "head_predict":
if negs is None: # 说明这个时候是在evluation,所以需要直接用所有的entity embedding
head_emb = self.ent_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
else:
head_emb = self.ent_emb(negs) # [bs, num_neg, dim]
relation_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
tail_emb = self.ent_emb(triples[:, 2]).unsqueeze(1) # [bs, 1, dim]
elif mode == "tail-batch" or mode == "tail_predict":
head_emb = self.ent_emb(triples[:, 0]).unsqueeze(1) # [bs, 1, dim]
relation_emb = self.rel_emb(triples[:, 1]).unsqueeze(1) # [bs, 1, dim]
if negs is None:
tail_emb = self.ent_emb.weight.data.unsqueeze(0) # [1, num_ent, dim]
else:
tail_emb = self.ent_emb(negs) # [bs, num_neg, dim]
return head_emb, relation_emb, tail_emb
| 2,572 | 40.5 | 85 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/RuleModel/RugE.py | import torch.nn as nn
import torch
from .model import Model
from IPython import embed
import pdb
class RugE(Model):
"""`Knowledge Graph Embedding with Iterative Guidance from Soft Rules`_ (RugE), which is a novel paradigm of KG embedding with iterative guidance from soft rules.
Attributes:
args: Model configuration parameters.
epsilon: Caculate embedding_range.
margin: Caculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
.. _Knowledge Graph Embedding with Iterative Guidance from Soft Rules: https://ojs.aaai.org/index.php/AAAI/article/view/11918
"""
def __init__(self, args):
super(RugE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
"""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim * 2)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * 2)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`Re(< wr, es, e¯o >)`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
re_head, im_head = torch.chunk(head_emb, 2, dim=-1)
re_relation, im_relation = torch.chunk(relation_emb, 2, dim=-1)
re_tail, im_tail = torch.chunk(tail_emb, 2, dim=-1)
return torch.sum(
re_head * re_tail * re_relation
+ im_head * im_tail * re_relation
+ re_head * im_tail * im_relation
- im_head * re_tail * im_relation,
-1
)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score | 3,796 | 35.161905 | 166 | py |
NeuralKG | NeuralKG-main/src/neuralkg/model/RuleModel/__init__.py | from .ComplEx_NNE_AER import ComplEx_NNE_AER
from .IterE import IterE
from .RugE import RugE
| 93 | 22.5 | 44 | py |
NeuralKG | NeuralKG-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
import sphinx_rtd_theme
import doctest
import neuralkg
# -- Project information -----------------------------------------------------
project = 'NeuralKG'
copyright = '2022, zjukg'
author = 'chenxn'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.todo',
'sphinx.ext.coverage',
# 'sphinx_copybutton',
'recommonmark',
'sphinx_markdown_tables',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
doctest_default_flags = doctest.NORMALIZE_WHITESPACE
autodoc_member_order = 'bysource'
intersphinx_mapping = {'python': ('https://docs.python.org/', None)}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/custom.css']
# html_logo = './_static/logo.png'
html_context = {
"display_github": True, # Integrate GitHub
"github_user": "chenxn2020", # Username
"github_repo": "test_doc", # Repo name
"github_version": "main", # Version
"conf_py_path": "/docs/source/", # Path in the checkout to the docs root
} | 2,913 | 32.113636 | 79 | py |
cmm_ts | cmm_ts-main/main.py | from models.utils import *
from models.AdjLR import AdjLR
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.optimizers import Adam
from Data import Data
from keras.layers import *
from keras.models import *
from constants import *
# IAED import
from models.IAED.mIAED import mIAED
from models.IAED.sIAED import sIAED
from models.IAED.config import config as cIAED
from MyParser import *
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
MODEL = args.model
MODEL_FOLDER = args.model_dir
DATA = args.data
N_PAST = args.npast
N_FUTURE = args.nfuture
N_DELAY = args.ndelay
INITDEC = args.noinit_dec
BATCH_SIZE = args.batch_size
TRAIN_PERC, VAL_PERC, TEST_PERC = args.percs
TRAIN_PERC, VAL_PERC, TEST_PERC = float(TRAIN_PERC), float(VAL_PERC), float(TEST_PERC)
PATIENCE = args.patience
EPOCHS = args.epochs
LR = args.learning_rate
ADJLR = args.adjLR
TARGETVAR = args.target_var
df, features = get_df(DATA)
use_att, use_cm, cm, cm_trainable, use_constraint, constraint = cmd_attention_map(args.att, args.catt)
if MODEL == Models.sIAED.value:
if TARGETVAR == None: raise ValueError('for models sIAED, target_var needs to be specified')
# Single-output data initialization
d = Data(df, N_PAST, N_DELAY, N_FUTURE, TRAIN_PERC, VAL_PERC, TEST_PERC, target = TARGETVAR)
d.downsample(step = 10)
d.smooth(window_size = 50)
X_train, y_train, X_val, y_val, X_test, y_test = d.get_timeseries()
# IAED Model definition
config = init_config(cIAED, folder = MODEL_FOLDER, npast = N_PAST, nfuture = N_FUTURE,
ndelay = N_DELAY, nfeatures = N_FEATURES, features = features, initDEC = INITDEC,
use_att = use_att, use_cm = use_cm, cm = cm, cm_trainable = cm_trainable, use_constraint = use_constraint, constraint = constraint)
model = sIAED(df = df, config = config)
model.create_model(target_var = TARGETVAR, loss = 'mse', optimizer = Adam(LR), metrics = ['mse', 'mae', 'mape'])
elif MODEL == Models.mIAED.value:
# Multi-output data initialization
d = Data(df, N_PAST, N_DELAY, N_FUTURE, TRAIN_PERC, VAL_PERC, TEST_PERC)
d.downsample(step = 10)
d.smooth(window_size = 50)
X_train, y_train, X_val, y_val, X_test, y_test = d.get_timeseries()
# IAED Model definition
config = init_config(cIAED, folder = MODEL_FOLDER, npast = N_PAST, nfuture = N_FUTURE,
ndelay = N_DELAY, nfeatures = N_FEATURES, features = features, initDEC = INITDEC,
use_att = use_att, use_cm = use_cm, cm = cm, cm_trainable = cm_trainable, use_constraint = use_constraint, constraint = constraint)
model = mIAED(df = df, config = config)
model.create_model(loss = 'mse', optimizer = Adam(LR), metrics = ['mse', 'mae', 'mape'])
# Create .txt file with model parameters
print_init(MODEL, TARGETVAR, MODEL_FOLDER, N_PAST, N_FUTURE, N_DELAY, INITDEC, TRAIN_PERC, VAL_PERC, TEST_PERC,
use_att, use_cm, cm, cm_trainable, use_constraint, constraint , BATCH_SIZE, PATIENCE, EPOCHS, LR, ADJLR)
# Model fit
cbs = list()
cbs.append(EarlyStopping(patience = PATIENCE))
cbs.append(ModelCheckpoint(RESULT_DIR + '/' + MODEL_FOLDER + '/', save_best_only = True))
if ADJLR is not None: cbs.append(AdjLR(model, int(ADJLR[0]), float(ADJLR[1]), bool(ADJLR[2]), 1))
model.fit(X = X_train, y = y_train, validation_data = (X_val, y_val), batch_size = BATCH_SIZE,
epochs = EPOCHS, callbacks = cbs)
# Save causal matrix
model.save_cmatrix()
# Model evaluation
model.MAE(X_test, y_test, d.scaler)
# Model predictions
model.predict(X_test, y_test, d.scaler) | 3,876 | 41.604396 | 160 | py |
cmm_ts | cmm_ts-main/main_bestparams.py | import pickle
from models.utils import *
from keras.optimizers import Adam
from Data import Data
from keras.layers import *
from keras.models import *
from constants import *
import pandas as pd
from kerashypetune import KerasGridSearch
from models.utils import Words as W
# IAED import
from models.IAED.mIAED import mIAED
from models.IAED.sIAED import sIAED
from models.IAED.config import config as cIAED
from MyParser import *
df, features = get_df(11)
parser = create_parser()
args = parser.parse_args()
# Parameters definition
MODEL = args.model
N_FUTURE = args.nfuture
N_PAST = args.npast
N_DELAY = 0
TRAIN_PERC, VAL_PERC, TEST_PERC = args.percs
MODEL_FOLDER = args.model_dir
TRAIN_AGENT = args.train_agent
df, features = get_df(TRAIN_AGENT)
if MODEL == Models.sIAED.value:
# Single-output data initialization
TARGETVAR = 'd_g'
d = Data(df, N_PAST, N_DELAY, N_FUTURE, TRAIN_PERC, VAL_PERC, TEST_PERC, target = TARGETVAR)
d.downsample(step = 10)
d.smooth(window_size = 50)
X_train, y_train, X_val, y_val, x_test, y_test = d.get_timeseries()
# IAED Model definition
config_grid = init_config(cIAED, folder = MODEL_FOLDER, npast = N_PAST, nfuture = N_FUTURE,
ndelay = N_DELAY, nfeatures = N_FEATURES, features = None, initDEC = False,
use_att = True, use_cm = True, cm = None, cm_trainable = True, use_constraint = True, constraint = [0.1, 0.2])
config_grid[W.ATTUNITS] = [128, 256, 512]
config_grid[W.ENCDECUNITS] = [64, 128, 256, 512]
config_grid[W.DECINIT] = True
config_grid["epochs"] = 25
config_grid["batch_size"] = 32
hypermodel = lambda x: sIAED(config = x).create_model(target_var = TARGETVAR, loss = 'mse', optimizer = Adam(0.0001),
metrics = ['mse', 'mae', 'mape'], searchBest = True)
elif MODEL == Models.mIAED.value:
# Multi-output data initialization
d = Data(df, N_PAST, N_DELAY, N_FUTURE, TRAIN_PERC, VAL_PERC, TEST_PERC)
d.downsample(step = 10)
d.smooth(window_size = 50)
X_train, y_train, X_val, y_val, x_test, y_test = d.get_timeseries()
# IAED Model definition
config_grid = init_config(cIAED, folder = MODEL_FOLDER, npast = N_PAST, nfuture = N_FUTURE,
ndelay = N_DELAY, nfeatures = N_FEATURES, features = None, initDEC = False,
use_att = True, use_cm = True, cm = None, cm_trainable = True, use_constraint = True, constraint = [0.1, 0.2])
config_grid[W.ATTUNITS] = [256, 300, 512]
config_grid[W.ENCDECUNITS] = [128, 256]
config_grid[W.DECINIT] = [False, True]
config_grid["epochs"] = 25
config_grid["batch_size"] = [64, 128, 256, 512]
hypermodel = lambda x: mIAED(config = x).create_model(loss = 'mse', optimizer = Adam(0.0001),
metrics = ['mse', 'mae', 'mape'], searchBest = True)
kgs = KerasGridSearch(hypermodel, config_grid, monitor = 'val_loss', greater_is_better = False, tuner_verbose = 1)
kgs.search(X_train, y_train, validation_data = (X_val, y_val), shuffle = False)
with open(RESULT_DIR + '/' + MODEL_FOLDER + '/best_param.pkl', 'rb') as pickle_file:
pickle.dump(kgs.best_params, pickle_file)
| 3,299 | 37.372093 | 140 | py |
cmm_ts | cmm_ts-main/MyParser.py | import argparse
from argparse import RawTextHelpFormatter
from models.utils import *
def print_init(model, targetvar, modeldir, npast, nfuture, ndelay, initdec, train_perc, val_perc, test_perc,
use_att, use_cm, cm, cm_trainable, use_constraint, constraint, batch_size, patience, epochs, lr, adjlr):
"""
Print network configuration in console and in config.pkl file
Args:
model (str): network configuration to use. choices = ['sIAED', 'mIAED']
targetvar (str): Target variable to forecast (used only if model = sIAED). Needs to match one of the columns defined in the csv file
modeldir (str): model folder that will be created in "training_result" folder
npast (int): observation window
nfuture (int): forecasting window
ndelay (int): forecasting delay
initdec (bool): use encoder final state as initial state for decoder
train_perc (float): training percentage
val_perc (float): validation percentage
test_perc (float): testion percentage
use_att (bool): use attention mechanism
use_cm (bool): use causal model
cm (str): string linked to the causal model
cm_trainable (bool): trainable causal model
use_constraint (bool): use constraint to train the causal model
constraint (float): constraint value
batch_size (int): batch size
patience (int): early stopping criteria
epochs (int): epochs
lr (float): learning rate
adjlr (_type_): _description_
"""
# PRINT TO CONSOLE
print("\n#")
print("# MODEL PARAMETERS")
print("# model =", model)
if model == Models.sIAED.value: print("# target var =", targetvar)
print("# model folder =", modeldir)
print("# past window steps =", npast)
print("# future window steps =", nfuture)
print("# delay steps =", ndelay)
print("# use encoder state for dec init =", initdec)
print("# dataset split (train, val, test) =", (train_perc, val_perc, test_perc))
print("#")
print("# ATTENTION PARAMETERS")
print("# attention =", use_att)
if use_att and use_cm and cm_trainable:
print('# trainable causal model =', cm)
if use_constraint:
print("# contraint =", constraint)
elif use_att and use_cm and not cm_trainable:
print("# Fixed causal model =", cm)
print("#")
print("# TRAINING PARAMETERS")
print("# batch size =", batch_size)
print("# early stopping patience =", patience)
print("# epochs =", epochs)
print("# learning rate =", lr)
print("# adjust learning rate =", adjlr)
print("#\n")
# PRINT TO FILE
f = open(RESULT_DIR + "/" + modeldir + "/parameters.txt", "w")
f.write("#\n")
f.write("# MODEL PARAMETERS\n")
f.write("# model = " + str(model) + "\n")
if model == Models.sIAED.value: f.write("# target var = " + str(targetvar) + "\n")
f.write("# model folder = " + str(modeldir) + "\n")
f.write("# past window steps = " + str(npast) + "\n")
f.write("# future window steps = " + str(nfuture) + "\n")
f.write("# delay steps = " + str(ndelay) + "\n")
f.write("# use encoder state for dec init = " + str(initdec) + "\n")
f.write("# dataset split (train, val, test) = " + str((train_perc, val_perc, test_perc)) + "\n")
f.write("#" + "\n")
f.write("# ATTENTION PARAMETERS" + "\n")
f.write("# attention = " + str(use_att) + "\n")
if use_att and use_cm and cm_trainable:
f.write("# trainable causal model = " + str(cm) + "\n")
if use_constraint:
f.write("# contraint = " + str(constraint) + "\n")
elif use_att and use_cm and not cm_trainable:
f.write("# Fixed causality = " + str(cm) + "\n")
f.write("#" + "\n")
f.write("# TRAINING PARAMETERS" + "\n")
f.write("# batch size = " + str(batch_size) + "\n")
f.write("# early stopping patience = " + str(patience) + "\n")
f.write("# epochs = " + str(epochs) + "\n")
f.write("# learning rate = " + str(lr) + "\n")
f.write("# adjust learning rate = " + str(adjlr) + "\n")
f.write("#\n")
f.close()
def create_parser():
"""
Create a parser
Returns:
ArgumentParser: parser
"""
model_description = "\n".join([k + " - " + MODELS[k] for k in MODELS])
parser = argparse.ArgumentParser(description = 'Multivariate Multistep Timeseries forecasting framework.', formatter_class = RawTextHelpFormatter)
parser.add_argument("model", type = str, choices = list(MODELS.keys()), help = model_description)
parser.add_argument("model_dir", type = str, help = "model folder")
parser.add_argument("--data", type = str, help = "CSV file to load positioned in data folder", required = True)
parser.add_argument("--npast", type = int, help = "observation window", required = True)
parser.add_argument("--nfuture", type = int, help = "forecasting window", required = True)
parser.add_argument("--ndelay", type = int, help = "forecasting delay [default 0]", required = False, default = 0)
parser.add_argument("--noinit_dec", action = 'store_false', help = "use ENC final state as init for DEC bit [default True]", required = False, default = True)
parser.add_argument("--att", action = 'store_true', help = "use attention bit [default False]", required = False, default = False)
parser.add_argument("--catt", nargs = 3, help = "use causal-attention [CAUSAL MATRIX, TRAINABLE, CONSTRAINT] [default None False None]", required = False, default = [None, False, None])
parser.add_argument("--target_var", type = str, help = "Target variable to forecast [used only if model = sIAED] [default None]", required = False, default = None)
parser.add_argument("--percs", nargs = 3, help = "[train, val, test percentages [default [0.7, 0.1, 0.2]]", required = False, default = [0.7, 0.1, 0.2])
parser.add_argument("--patience", type = int, help = "earlystopping patience [default 25]", required = False, default = 25)
parser.add_argument("--batch_size", type = int, help = "batch size [default 128]", required = False, default = 128)
parser.add_argument("--epochs", type = int, help = "epochs [default 300]", required = False, default = 300)
parser.add_argument("--learning_rate", type = float, help = "learning rate [default 0.0001]", required = False, default = 0.0001)
parser.add_argument("--adjLR", nargs = 3, help = "Modifying learning rate strategy (freq[epochs], factor, justOnce)", required = False)
return parser | 6,557 | 50.234375 | 189 | py |
cmm_ts | cmm_ts-main/constants.py | from enum import Enum
import numpy as np
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
RESULT_DIR = ROOT_DIR + "/training_result"
# Parameters definition
N_FEATURES = 8
LIST_FEATURES = ['d_g', 'v', 'risk', 'theta_g', 'omega', 'theta', 'g_seq', 'd_obs']
# MODELS
class Models(Enum):
sIAED = "sIAED"
mIAED = "mIAED"
MODELS = {
Models.sIAED.value : "Single-output Input Attention Encoder Decoder",
Models.mIAED.value : "Multi-output Input Attention Encoder Decoder",
}
# CAUSAL MATRICES
CM_PCMCI = np.array([[0.632,0.065,0.125,0.088,0.138,0.108,0.06,0.048],
[0.078,0.274,0.092,0.094,0.103,0.08,0.068,0.049],
[0.09,0.196,0.27,0.106,0.137,0.111,0.065,0.049],
[0.1,0.072,0.122,0.166,0.154,0.102,0.059,0],
[0.111,0.059,0.095,0.087,0.131,0.132,0.054,0.057],
[0.122,0.067,0.083,0.112,0.418,0.541,0,0],
[0.086,0,0.063,0.062,0.13,0.076,0,0.052],
[0,0,0.074,0.067,0.05,0,0.051,0.708]])
CM_FPCMCI = np.array([[0.794693885975173,0.0797596212634794,0,0,0.207147494884196,0,0,0],
[0,0.547118275252963,0.118972264307896,0,0,0,0,0],
[0,0.411013790019703,0.398058466007042,0,0,0,0,0.0622522550249479],
[0,0,0,0.400374937278639,0.147993676497357,0,0.0963358126783955,0.120359691147529],
[0,0,0,0,0.116815916574682,0,0,0],
[0,0,0,0,0.239283926475173,0.60894990342525,0,0],
[0,0,0,0,0,0,0.991103983176182,0],
[0,0.0684969047836938,0.0634190046412317,0,0,0,0,0.972004088748988]])
class CausalModel(Enum):
FPCMCI = "FPCMCI"
PCMCI = "PCMCI"
CAUSAL_MODELS = {CausalModel.FPCMCI.value : CM_FPCMCI,
CausalModel.PCMCI.value : CM_PCMCI}
| 1,886 | 36 | 105 | py |
cmm_ts | cmm_ts-main/Data.py | from copy import deepcopy
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import matplotlib
# matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
ALL = 'all'
class Data():
def __init__(self,
data: pd.DataFrame,
n_past: int,
n_delay: int,
n_future: int,
train_prec: float,
val_prec: float,
test_prec: float,
target: str = ALL):
"""
Data constructor
Args:
data (pd.DataFrame): loaded dataframe
n_past (int): observation window size
n_delay (int): forecasting delay
n_future (int): forecasting window size
train_prec (float): training percentage
val_prec (float): validation percentage
test_prec (float): testing percentage
target (str, optional): variable to forecast. Defaults to ALL.
"""
# Data
self.data = data
self.data_scaled = None
self.features = data.columns
self.N = len(self.features)
# Data parameters
self.n_past = n_past
self.n_delay = n_delay
self.n_future = n_future
# Splittng percentages
self.train_perc = train_prec
self.val_perc = val_prec
self.test_perc = test_prec
self.target = target
self.scaler = None
def get_sets(self, seq):
"""
Splits dataset into training, validation and testing sets
Args:
seq (np.array): timeseries data
Returns:
(np.array, np.array, np.array): training, validation and testing sets
"""
train_len = int(len(self.data) * self.train_perc)
val_len = int(len(self.data) * self.val_perc)
test_len = int(len(self.data) * self.test_perc)
return seq[:train_len], seq[train_len:train_len + val_len], seq[train_len + val_len:]
def downsample(self, step):
"""
Downsamples the dataset taking a sample each <step> samples
Args:
step (int): downsampling factor
"""
self.data = pd.DataFrame(self.data.values[::step, :], columns=self.data.columns)
def augment(self, nrepeat = 5, sigma = 0.05, scaling = 0.5):
"""
data augmentation adding gaussian noise and scaling data
Args:
nrepeat (int, optional): Number of concatenation of the same dataset. Defaults to 5.
sigma (float, optional): Gaussian noise sigma to apply to each repetition. Defaults to 0.1.
scaling (float, optional): Scaling factor. Default to 0.5
"""
np.random.seed(0)
list_d = list()
for _ in range(nrepeat):
d = deepcopy(self.data)
noise = np.random.normal(0, sigma, size = self.data.shape)
scaling_factor = np.random.uniform(scaling, 1)
rep = scaling_factor * d + noise
rep['g_seq'] = self.data['g_seq']
list_d.append(rep)
self.data = pd.concat(list_d, ignore_index = True)
def smooth(self, window_size):
"""
Smooths the dataset by applying a moving average window strategy
Args:
window_size (int): moving average window size
"""
for f in self.data.columns:
if f != 'g_seq': self.data[f] = self.data[f].rolling(window_size).mean()
self.data.dropna(inplace=True)
def plot_ts(self):
"""
Plots the timeseries
"""
self.data.plot(subplots=True)
plt.tight_layout()
plt.show()
def scale_data(self):
"""
Scales the dataset by a MinMaxScaler
"""
df = self.data.astype(float)
self.scaler = MinMaxScaler()
self.scaler = self.scaler.fit(df)
self.data_scaled = self.scaler.transform(df)
def split_sequence(self):
"""
Create the X and y timeseries
Returns:
(np.array, np.array): X and y timeseries
"""
X, y = list(), list()
for i in range(len(self.data_scaled)):
lag_end = i + self.n_past
forecast_end = self.n_delay + lag_end + self.n_future
if forecast_end > len(self.data_scaled):
break
if self.target == ALL:
seq_x, seq_y = self.data_scaled[i:lag_end], self.data_scaled[self.n_delay + lag_end:forecast_end]
else:
t = list(self.data.columns).index(self.target)
seq_x, seq_y = self.data_scaled[i:lag_end], self.data_scaled[self.n_delay + lag_end:forecast_end, t]
X.append(seq_x)
y.append(seq_y)
X = np.array(X)
y = np.array(y)
if self.target != ALL:
y = y.reshape((y.shape[0], y.shape[1], -1))
return X, y
def get_timeseries(self):
"""
Returns X and y timeseries for training, validation and testing set
Returns:
(np.array, np.array, np.array, np.array, np.array, np.array): X, y for each set
"""
self.scale_data()
X, y = self.split_sequence()
X_train, X_val, X_test = self.get_sets(X)
y_train, y_val, y_test = self.get_sets(y)
print("X train shape", X_train.shape)
print("y train shape", y_train.shape)
print("X val shape", X_val.shape)
print("y val shape", y_val.shape)
print("X test shape", X_test.shape)
print("y test shape", y_test.shape)
return X_train, y_train, X_val, y_val, X_test, y_test
| 5,701 | 31.033708 | 116 | py |
cmm_ts | cmm_ts-main/load.py | from Data import Data
from constants import RESULT_DIR
# IAED import
from models.IAED.mIAED import mIAED
from models.IAED.sIAED import sIAED
from models.IAED.config import config as cIAED
from models.utils import get_df
N_FUTURE = 48
N_PAST = 32
N_DELAY = 0
TRAIN_PERC = 0.0
VAL_PERC = 0.0
TEST_PERC = 1.0
TEST_AGENTS = [3, 4, 5, 6, 7, 8, 9, 10]
MODELS = ["256_mIAED_FPCMCI_t005", "256_mIAED_PCMCI_t005", "mIAED_FPCMCI_t01", "mIAED_PCMCI_t01"]
for M in MODELS:
for a in TEST_AGENTS:
df, features = get_df(a)
m = mIAED(df = df, folder = M)
d = Data(df, N_PAST, N_DELAY, N_FUTURE, TRAIN_PERC, VAL_PERC, TEST_PERC)
d.downsample(10)
d.smooth(window_size = 50)
_, _, _, _, X_test, y_test = d.get_timeseries()
# Model evaluation
m.MAE(X_test, y_test, d.scaler, folder = RESULT_DIR + "/" + M + '/test/' + str(a))
# # Model predictions
# m.predict(X_test, y_test, d.scaler, folder = RESULT_DIR + "/" + M + '/prediction/' + str(a))
| 1,027 | 27.555556 | 102 | py |
cmm_ts | cmm_ts-main/models/MyModel.py | from abc import ABC, abstractmethod
import os
from constants import RESULT_DIR
import models.utils as utils
import models.Words as W
from keras.models import *
from matplotlib import pyplot as plt
import pickle
import numpy as np
from tqdm import tqdm
class MyModel(ABC):
def __init__(self, name, df, config : dict = None, folder : str = None):
"""
Constructur, specify config if you want to create a new model, while, set folder if you want to load a pre-existing model
Args:
name (str): model name
df (dataframe): dataset
config (dict): configuration file. Default None.
folder (str): model's name to load. Default None.
"""
self.name = name
self.df = df
self.predY = None
if config:
self.dir = config[W.FOLDER]
with open(self.model_dir + '/config.pkl', 'wb') as file_pi:
pickle.dump(config, file_pi)
utils.no_warning()
self.config = config
self.model : Model = None
if folder:
self.dir = folder
with open(self.model_dir + '/config.pkl', 'rb') as pickle_file:
self.config = pickle.load(pickle_file)
self.model : Model = load_model(self.model_dir)
@property
def model_dir(self):
model_dir = RESULT_DIR + "/" + self.dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
return model_dir
@property
def plot_dir(self):
plot_dir = self.model_dir + "/plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
return plot_dir
@property
def pred_dir(self):
pred_dir = self.model_dir + "/predictions"
if not os.path.exists(pred_dir):
os.makedirs(pred_dir)
return pred_dir
@abstractmethod
def create_model(self) -> Model:
pass
def fit(self, X, y, validation_data, batch_size, epochs, callbacks = None):
"""
Fit wrapper
Args:
X (array): X training set
y (array): Y training set
validation_data (tuple): (x_val, y_val)
batch_size (int): batch size
epochs (int): # epochs
callbacks (list, optional): List of callbacks. Defaults to None.
"""
history = self.model.fit(x = X, y = y, batch_size = batch_size, epochs = epochs,
callbacks = callbacks, validation_data = validation_data, shuffle = False)
with open(self.model_dir + '/history.pkl', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
self.plot_history(history)
def MAE(self, X, y, scaler, folder = None, show = False):
"""
Prediction evaluation through MAE
Args:
X (np.array): network input
y (np.array): actual output
scaler (scaler): scaler used for the scaling
folder (str, optional): saving folder. Defaults to None.
show (bool, optional): bit to show the plots. Defaults to False.
Returns:
np.array: mean absolute error
"""
print('\n##')
print('## Prediction evaluation through MAE')
print('##')
if folder is None:
folder = self.plot_dir
else:
if not os.path.exists(folder): os.makedirs(folder)
if self.predY is None: self.predY = self.model.predict(X)
if self.name is utils.Models.mIAED or self.name == utils.Models.mCNN or self.name == utils.Models.mT2V:
ae_shape = (y.shape[1], self.config[W.NFEATURES])
elif self.name is utils.Models.sIAED or self.name is utils.Models.sT2V or self.name is utils.Models.sCNN:
ae_shape = (y.shape[1], 1)
ae = np.zeros(shape = ae_shape)
if self.name is utils.Models.sIAED or self.name is utils.Models.sT2V or self.name is utils.Models.sCNN:
t_idx = self.config[W.FEATURES].index(self.target_var)
dummy_y = np.zeros(shape = (y.shape[1], 8))
for t in tqdm(range(len(y)), desc = 'Abs error'):
# Invert scaling actual
actualY_t = np.squeeze(y[t,:,:])
if self.name is utils.Models.mIAED or self.name == utils.Models.mCNN or self.name == utils.Models.mT2V:
actualY_t = scaler.inverse_transform(actualY_t)
elif self.name is utils.Models.sIAED or self.name is utils.Models.sT2V or self.name is utils.Models.sCNN:
dummy_y[:, t_idx] = actualY_t
actualY_t = scaler.inverse_transform(dummy_y)[:, t_idx]
actualY_t = np.reshape(actualY_t, (actualY_t.shape[0], 1))
# Invert scaling pred
predY_t = np.squeeze(self.predY[t,:,:])
if self.name is utils.Models.mIAED or self.name == utils.Models.mCNN or self.name == utils.Models.mT2V:
predY_t = scaler.inverse_transform(predY_t)
elif self.name is utils.Models.sIAED or self.name is utils.Models.sT2V or self.name is utils.Models.sCNN:
dummy_y[:, t_idx] = predY_t
predY_t = scaler.inverse_transform(dummy_y)[:, t_idx]
predY_t = np.reshape(predY_t, (predY_t.shape[0], 1))
ae = ae + abs(actualY_t - predY_t)
ae_mean = ae/len(y)
with open(self.model_dir + '/ae.npy', 'wb') as file:
np.save(file, ae_mean)
self.plot_MAE(ae_mean, folder = folder, show = show)
return ae_mean
def predict(self, X, y, scaler, folder = None, plot = False):
"""
Predict output
Args:
X (np.array): network input
y (np.array): actual output
scaler (scaler): scaler used for the scaling
folder (str, optional): saving folder. Defaults to None.
plot (bool, optional): bit to plot the prediction. Defaults to False.
"""
print('\n##')
print('## Predictions')
print('##')
if folder is None:
folder = self.pred_dir
else:
if not os.path.exists(folder): os.makedirs(folder)
x_npy = list()
ya_npy = list()
yp_npy = list()
# Generate and save predictions
if self.predY is None: self.predY = self.model.predict(X)
if self.name is utils.Models.sIAED or self.name is utils.Models.sT2V or self.name is utils.Models.sCNN:
t_idx = self.config[W.FEATURES].index(self.target_var)
dummy_y = np.zeros(shape = (y.shape[1], 8))
for t in range(len(self.predY)):
# test X
X_t = np.squeeze(X[t,:,:])
X_t = scaler.inverse_transform(X_t)
x_npy.append(X_t)
# test y
Y_t = np.squeeze(y[t,:,:])
if self.name is utils.Models.mIAED or self.name == utils.Models.mCNN or self.name == utils.Models.mT2V:
Y_t = scaler.inverse_transform(Y_t)
elif self.name is utils.Models.sIAED or self.name == utils.Models.sCNN or self.name is utils.Models.sT2V:
dummy_y[:, t_idx] = Y_t
Y_t = scaler.inverse_transform(dummy_y)[:, t_idx]
ya_npy.append(Y_t)
# pred y
predY_t = np.squeeze(self.predY[t,:,:])
if self.name is utils.Models.mIAED or self.name == utils.Models.mCNN or self.name == utils.Models.mT2V:
predY_t = scaler.inverse_transform(predY_t)
elif self.name is utils.Models.sIAED or self.name == utils.Models.sCNN or self.name is utils.Models.sT2V:
dummy_y[:, t_idx] = predY_t
predY_t = scaler.inverse_transform(dummy_y)[:, t_idx]
yp_npy.append(predY_t)
with open(self.pred_dir + '/x_npy.npy', 'wb') as file:
np.save(file, x_npy)
with open(self.pred_dir + '/ya_npy.npy', 'wb') as file:
np.save(file, ya_npy)
with open(self.pred_dir + '/yp_npy.npy', 'wb') as file:
np.save(file, yp_npy)
target = self.target_var if self.name is utils.Models.sIAED or self.name == utils.Models.sCNN or self.name is utils.Models.sT2V else None
if plot: self.plot_prediction(x_npy, ya_npy, yp_npy, target_var = target)
def save_cmatrix(self):
"""
Save causal matrix after training
"""
if self.config[W.USECAUSAL]:
layers = self.model.layers
if self.name == utils.Models.mIAED or self.name == utils.Models.mCNN or self.name == utils.Models.mT2V:
ca_matrix = [layers[l].selfatt.Dalpha.bias.numpy() for l in range(1, len(layers) - 1)]
else:
ca_matrix = [layers[l].selfatt.Dalpha.bias.numpy() for l in range(1, len(layers))]
print(ca_matrix)
print(self.config[W.CMATRIX])
with open(self.model_dir + '/cmatrix.npy', 'wb') as file_pi:
np.save(file_pi, ca_matrix)
def plot_history(self, history):
"""
Plot history information
"""
if "loss" in history.history.keys():
plt.figure()
plt.plot(history.history["loss"], label = "Training loss")
plt.plot(history.history["val_loss"], label = "Validation loss")
plt.legend()
plt.grid()
plt.savefig(self.plot_dir + "/loss.png", dpi = 300)
plt.savefig(self.plot_dir + "/loss.eps", dpi = 300)
plt.close()
if "mae" in history.history.keys():
plt.figure()
plt.plot(history.history["mae"], label = "Training mae")
plt.plot(history.history["val_mae"], label = "Validation mae")
plt.legend()
plt.grid()
plt.savefig(self.plot_dir + "/mae.png", dpi = 300)
plt.savefig(self.plot_dir + "/mae.eps", dpi = 300)
plt.close()
if "mape" in history.history.keys():
plt.figure()
plt.plot(history.history["mape"], label = "Training mape")
plt.plot(history.history["val_mape"], label = "Validation mape")
plt.legend()
plt.grid()
plt.savefig(self.plot_dir + "/mape.png", dpi = 300)
plt.savefig(self.plot_dir + "/mape.eps", dpi = 300)
plt.close()
if "accuracy" in history.history.keys():
plt.figure()
plt.plot(history.history["accuracy"], label = "Training accuracy")
plt.plot(history.history["val_accuracy"], label = "Validation accuracy")
plt.legend()
plt.grid()
plt.savefig(self.plot_dir + "/accuracy.png", dpi = 300)
plt.savefig(self.plot_dir + "/accuracy.eps", dpi = 300)
plt.close()
def plot_MAE(self, ae, folder = None, show = False):
"""
Plot Mean Absolute Error for each variable involved in the prediction
Args:
ae (np.array): absolute error along horizon predition
folder (str, optional): saving folder. Defaults to None.
show (bool, optional): bit to show the plots. Defaults to False.
"""
if self.name is utils.Models.sIAED or self.name == utils.Models.sCNN or self.name is utils.Models.sT2V:
f = self.target_var
plt.figure()
plt.title(f + " NMAE " + str(round(ae.mean()/self.df[self.target_var].std(), 3)))
plt.plot(range(self.config[W.NFUTURE]), ae)
plt.ylabel("Abs error")
plt.xlabel("Time steps")
plt.grid()
if show:
plt.show()
else:
if folder is None: folder = self.plot_dir
plt.savefig(folder + "/" + f + "_nmae.png", dpi = 300)
plt.savefig(folder + "/" + f + "_nmae.eps", dpi = 300)
plt.close()
elif self.name is utils.Models.mIAED or self.name == utils.Models.mCNN or self.name == utils.Models.mT2V:
for f in range(self.config[W.NFEATURES]):
plt.figure()
plt.title(self.config[W.FEATURES][f] + " NMAE " + str(round(ae[:, f].mean()/self.df[self.config[W.FEATURES][f]].std(), 3)))
plt.plot(range(self.config[W.NFUTURE]), ae[:, f])
plt.ylabel("Abs error")
plt.xlabel("Time steps")
plt.grid()
if show:
plt.show()
else:
if folder is None: folder = self.plot_dir
plt.savefig(folder + "/" + str(self.config[W.FEATURES][f]) + "_nmae.png", dpi = 300)
plt.savefig(folder + "/" + str(self.config[W.FEATURES][f]) + "_nmae.eps", dpi = 300)
plt.close()
def plot_prediction(self, x, ya, yp, folder = None, target_var = None):
"""
Plot predicted output with observed input and actual output
Args:
x (np.array): observation timeseries
ya (np.array): actual output
yp (np.array): predicted output
folder (str, optional): saving folder. Defaults to None.
target_var (str, optional): target var to plot. Defaults to None.
"""
if folder is None: folder = self.pred_dir
plt.figure()
if target_var is None:
for f in self.config[W.FEATURES]:
# Create var folder
if not os.path.exists(folder + "/" + str(f) + "/"):
os.makedirs(folder + "/" + str(f) + "/")
f_idx = list(self.config[W.FEATURES]).index(f)
for t in tqdm(range(len(yp)), desc = f):
plt.plot(range(t, t + len(x[t][:, f_idx])), x[t][:, f_idx], color = 'green', label = "past")
plt.plot(range(t - 1 + len(x[t][:, f_idx]), t - 1 + len(x[t][:, f_idx]) + len(ya[t][:, f_idx])), ya[t][:, f_idx], color = 'blue', label = "actual")
plt.plot(range(t - 1 + len(x[t][:, f_idx]), t - 1 + len(x[t][:, f_idx]) + len(yp[t][:, f_idx])), yp[t][:, f_idx], color = 'red', label = "pred")
plt.title("Multi-step prediction - " + f)
plt.xlabel("step = 0.1s")
plt.ylabel(f)
plt.grid()
plt.legend()
plt.savefig(folder + "/" + str(f) + "/" + str(t) + ".png")
plt.clf()
else:
# Create var folder
if not os.path.exists(folder + "/" + str(target_var) + "/"):
os.makedirs(folder + "/" + str(target_var) + "/")
f_idx = list(self.config[W.FEATURES]).index(target_var)
for t in tqdm(range(len(yp)), desc = target_var):
plt.plot(range(t, t + len(x[t][:, f_idx])), x[t][:, f_idx], color = 'green', label = "past")
plt.plot(range(t - 1 + len(x[t][:, f_idx]), t - 1 + len(x[t][:, f_idx]) + len(ya[t])), ya[t], color = 'blue', label = "actual")
plt.plot(range(t - 1 + len(x[t][:, f_idx]), t - 1 + len(x[t][:, f_idx]) + len(yp[t])), yp[t], color = 'red', label = "pred")
plt.title("Multi-step prediction - " + target_var)
plt.xlabel("step = 0.1s")
plt.ylabel(target_var)
plt.grid()
plt.legend()
plt.savefig(folder + "/" + str(target_var) + "/" + str(t) + ".png")
plt.clf()
plt.close() | 15,549 | 40.246684 | 167 | py |
cmm_ts | cmm_ts-main/models/AdjLR.py | import keras
import tensorflow as tf
class AdjLR(keras.callbacks.Callback):
def __init__ (self, model, freq, factor, justOnce, verbose):
self.model = model
self.freq = freq
self.factor = factor
self.justOnce = justOnce
self.verbose = verbose
self.adj_epoch = freq
def on_epoch_end(self, epoch, logs=None):
if epoch + 1 == self.adj_epoch: # adjust the learning rate
lr=float(tf.keras.backend.get_value(self.model.optimizer.lr)) # get the current learning rate
new_lr=lr * self.factor
if not self.justOnce: self.adj_epoch += self.freq
if self.verbose == 1:
print('\n#')
print('# Learning rate updated :', new_lr)
print('#')
tf.keras.backend.set_value(self.model.optimizer.lr, new_lr) # set the learning rate in the optimizer | 905 | 38.391304 | 112 | py |
cmm_ts | cmm_ts-main/models/Evaluation.py | from enum import Enum
import numpy as np
class Metric(Enum):
NRMSEmean = {"name": "NRMSE", "value" : "NRMSEmean"}
NRMSEminmax = {"name": "NRMSE", "value" : "NRMSEminmax"}
NRMSEstd = {"name": "NRMSE", "value" : "NRMSEstd"}
NRMSEiq = {"name": "NRMSE", "value" : "NRMSEiq"}
NMAEmean = {"name": "NMAE", "value" : "NMAEmean"}
NMAEminmax = {"name": "NMAE", "value" : "NMAEminmax"}
NMAEstd = {"name": "NMAE", "value" : "NMAEstd"}
NMAEiq = {"name": "NMAE", "value" : "NMAEiq"}
RMSE = {"name": "RMSE", "value" : "rmse"}
MSE = {"name": "MSE", "value" : "mse"}
MAE = {"name": "MAE", "value" : "mae"}
def evaluate(mode, y_true, y_pred):
rmse = np.sqrt(np.mean((y_true - y_pred)**2))
mae = np.mean(abs(y_true - y_pred))
if mode == Metric.NRMSEmean:
return rmse/np.mean(y_true)
elif mode == Metric.NRMSEminmax:
return rmse/(np.max(y_true) - np.min(y_true))
elif mode == Metric.NRMSEstd:
return rmse/np.std(y_true)
elif mode == Metric.NRMSEiq:
return rmse/(np.quantile(y_true, 0.75) - np.quantile(y_true, 0.25))
elif mode == Metric.NMAEmean:
return mae/np.mean(y_true)
elif mode == Metric.NMAEminmax:
return mae/(np.max(y_true) - np.min(y_true))
elif mode == Metric.NMAEstd:
return mae/np.std(y_true)
elif mode == Metric.NMAEiq:
return mae/(np.quantile(y_true, 0.75) - np.quantile(y_true, 0.25))
elif mode == Metric.RMSE:
return rmse
elif mode == Metric.MSE:
return np.mean((y_true - y_pred)**2)
elif mode == Metric.MAE:
return mae | 1,623 | 29.074074 | 75 | py |
cmm_ts | cmm_ts-main/models/utils.py | import os
import logging
import tensorflow as tf
import absl.logging
from constants import *
import models.Words as Words
import pandas as pd
def init_config(config, folder, npast, nfuture, ndelay, nfeatures, features, initDEC = False,
use_att = False, use_cm = False, cm = None, cm_trainable = False, use_constraint = False, constraint = None):
"""
Init network configuration
Args:
config (dict): empty network configuration
folder (str): model folder
npast (int): observation window
nfuture (int): forecasting window
ndelay (int): forecasting delay
nfeatures (int): number of input variables
features (list[str]): input variables
initDEC (bool, optional): use encoder final state as initial state for decoder. Defaults to False.
use_att (bool, optional): use attention mechanism. Defaults to False.
use_cm (bool, optional): use causal model. Defaults to False.
cm (np.array, optional): causal matrix to use. Defaults to None.
cm_trainable (bool, optional): causal model trainable. Defaults to False.
use_constraint (bool, optional): causal model constraint flag. Defaults to False.
constraint (float, optional): causal model constraint. Defaults to None.
Returns:
dict: network configuration
"""
config[Words.FOLDER] = folder
config[Words.NPAST] = npast
config[Words.NFUTURE] = nfuture
config[Words.NDELAY] = ndelay
config[Words.NFEATURES] = nfeatures
config[Words.FEATURES] = features
config[Words.USEATT] = use_att
config[Words.USECAUSAL] = use_cm
config[Words.CMATRIX] = cm
config[Words.CTRAINABLE] = cm_trainable
config[Words.USECONSTRAINT] = use_constraint
config[Words.TRAINTHRESH] = constraint
config[Words.DECINIT] = initDEC
return config
def no_warning():
"""
Disable warning
"""
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.get_logger().setLevel(logging.ERROR)
absl.logging.set_verbosity(absl.logging.ERROR)
def cmd_attention_map(att, catt):
"""
Convert input from parser to boolean values
Args:
att (bool): --att parser option
catt ((str, bool, float)): --catt parser option
"""
def strTrue(s): return s == 'True'
def strNone(s): return s == 'None' or s is None
cm = CAUSAL_MODELS[catt[0]] if catt[0] is not None else None
cm_trainable = strTrue(catt[1])
constraint = float(catt[2]) if not strNone(catt[2]) else None
use_cm = cm is not None
use_constraint = constraint is not None
use_att = att or use_cm
return use_att, use_cm, cm, cm_trainable, use_constraint, constraint
def get_df(csv):
"""
load csv and remove NaNs
Args:
csv (str): path fo file.csv
Returns:
Dataframe: loaded dataframe
list[str]: dataframe var names
"""
csv_path = ROOT_DIR + "/data/" + str(csv)
df = pd.read_csv(csv_path)
df.fillna(method="ffill", inplace = True)
df.fillna(method="bfill", inplace = True)
features = list(df.columns)
return df, features
| 3,138 | 30.39 | 125 | py |
cmm_ts | cmm_ts-main/models/Words.py | FOLDER = "FOLDER"
NPAST = "NPAST"
NFUTURE = "NFUTURE"
NDELAY = "NDELAY"
NFEATURES = "NFEATURES"
FEATURES = "FEATURES"
USEATT = "USEATT"
USECAUSAL = "USECAUSAL"
CMATRIX = "CMATRIX"
CTRAINABLE = "CTRAINABLE"
USECONSTRAINT = "USECONSTRAINT"
TRAINTHRESH = "TRAINTHRESH"
ATTUNITS = "ATTUNITS"
ENCDECUNITS = "ENCDECUNITS"
DECINIT = "DECINIT"
DACT = "DACT"
DRATE = "DRATE"
T2VUNITS = "T2VUNITS" | 388 | 19.473684 | 31 | py |
cmm_ts | cmm_ts-main/models/DenseDropout.py | from keras.layers import *
from keras.models import *
class DenseDropout(Layer):
def __init__(self, units, activation, dropout):
super(DenseDropout, self).__init__()
self.dbit = dropout != 0
self.dense = Dense(units, activation = activation)
if self.dbit: self.dropout = Dropout(dropout)
def call(self, x):
y = self.dense(x)
if self.dbit: y = self.dropout(y)
return y | 437 | 23.333333 | 58 | py |
cmm_ts | cmm_ts-main/models/Constraints.py | from keras.constraints import Constraint
import keras.backend as K
import numpy as np
class Between(Constraint):
def __init__(self, init_value, adj_thres):
self.adj_thres = adj_thres
# self.min_value = init_value - self.adj_thres
self.max_value = init_value + self.adj_thres
self.min_value = np.clip(init_value - self.adj_thres, 0, init_value)
# self.max_value = np.clip(init_value + self.adj_thres, init_value, 1)
def __call__(self, w):
return K.clip(w, self.min_value, self.max_value)
# class ConstantTensorInitializer(tf.keras.initializers.Initializer):
# """Initializes tensors to `t`."""
# def __init__(self, t):
# self.t = t
# def __call__(self, shape, dtype=None):
# return self.t
# def get_config(self):
# return {'t': self.t}
class Constant(Constraint):
"""Constrains tensors to `t`."""
def __init__(self, t):
self.t = t
def __call__(self, w):
return self.t
def get_config(self):
return {'t': self.t} | 1,019 | 24.5 | 79 | py |
cmm_ts | cmm_ts-main/models/IAED/IAED2.py | import numpy as np
from constants import CM_FPCMCI
from models.attention.SelfAttention import SelfAttention
from models.attention.InputAttention import InputAttention
from keras.layers import *
from keras.models import *
import tensorflow as tf
import models.Words as W
from models.DenseDropout import DenseDropout
class IAED(Layer):
def __init__(self, config, target_var, name = "IAED", searchBest = False):
super(IAED, self).__init__(name = name)
self.config = config
self.target_var = target_var
self.searchBest = searchBest
if self.config[W.USEATT]:
# Causal vector definition
if searchBest:
causal_vec = CM_FPCMCI[0, :] if self.config[W.USECAUSAL] else None
else:
causal_vec = np.array(self.config[W.CMATRIX][self.config[W.FEATURES].index(self.target_var), :]) if self.config[W.USECAUSAL] else None
# Self attention
self.selfatt = SelfAttention(self.config, causal_vec, name = self.target_var + '_selfatt')
# Input attention
self.inatt = InputAttention(self.config, name = self.target_var + '_inatt')
# Encoders
self.selfenc1 = LSTM(int(self.config[W.ENCDECUNITS]/2),
name = target_var + '_selfENC1',
return_sequences = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
self.selfenc2 = LSTM(int(self.config[W.ENCDECUNITS]/2),
name = target_var + '_selfENC2',
return_state = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
self.inenc1 = LSTM(int(self.config[W.ENCDECUNITS]/2),
name = target_var + '_inENC1',
return_sequences = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
self.inenc2 = LSTM(int(self.config[W.ENCDECUNITS]/2),
name = target_var + '_inENC2',
return_state = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
# Initialization
self.past_h = tf.Variable(tf.zeros([int(self.config[W.ENCDECUNITS]/2), 1]),
trainable = False,
shape = (int(self.config[W.ENCDECUNITS]/2), 1),
name = self.target_var + '_pastH')
self.past_c = tf.Variable(tf.zeros([int(self.config[W.ENCDECUNITS]/2), 1]),
trainable = False,
shape = (int(self.config[W.ENCDECUNITS]/2), 1),
name = self.target_var + '_pastC')
else:
self.enc1 = LSTM(self.config[W.ENCDECUNITS],
name = target_var + '_ENC1',
return_sequences = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
self.enc2 = LSTM(self.config[W.ENCDECUNITS],
name = target_var + '_ENC2',
return_state = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
self.repeat = RepeatVector(self.config[W.NFUTURE], name = self.target_var + '_REPEAT')
# Decoder
self.dec1 = LSTM(self.config[W.ENCDECUNITS], return_sequences = True, name = self.target_var + '_DEC1')
self.dec2 = LSTM(self.config[W.ENCDECUNITS], name = self.target_var + '_DEC2')
# Dense
# self.outdense1 = DenseDropout(self.config[W.NFUTURE] * 3, self.config[W.D1ACT], self.config[W.DRATE])
self.outdense = DenseDropout(self.config[W.NFUTURE] * 2, self.config[W.DACT], self.config[W.DRATE])
self.out = DenseDropout(self.config[W.NFUTURE], 'linear', 0)
def call(self, x):
if self.config[W.USEATT]:
# Attention
x_selfatt = self.selfatt(x)
# if not self.searchBest: x_selfatt = Dropout(self.config[W.DRATE])(x_selfatt)
x_inatt = self.inatt([x, self.past_h, self.past_c])
# if not self.searchBest: x_inatt = Dropout(self.config[W.DRATE])(x_inatt)
# Encoders
enc1_1 = self.selfenc1(x_selfatt)
enc2_1 = self.inenc1(x_inatt)
enc1_2, h1, c1 = self.selfenc2(enc1_1)
enc2_2, h2, c2 = self.inenc2(enc2_1)
self.past_h.assign(tf.expand_dims(h2[-1], -1))
self.past_c.assign(tf.expand_dims(c2[-1], -1))
x = concatenate([enc1_2, enc2_2])
if self.config[W.DECINIT]:
h = concatenate([h1, h2])
c = concatenate([c1, c2])
else:
x = self.enc1(x)
x, h, c = self.enc2(x)
repeat = self.repeat(x)
# Decoder
if self.config[W.DECINIT]:
y = self.dec1(repeat, initial_state = [h, c])
else:
y = self.dec1(repeat)
y = self.dec2(y)
if not self.searchBest: y = Dropout(self.config[W.DRATE])(y)
# y = self.outdense1(y)
y = self.outdense(y)
y = self.out(y)
y = tf.expand_dims(y, axis = -1)
return y | 5,627 | 44.756098 | 150 | py |
cmm_ts | cmm_ts-main/models/IAED/config.py | from models.utils import Words as W
config = {
W.FOLDER : None,
W.NPAST : None,
W.NFUTURE : None,
W.NDELAY : None,
W.NFEATURES : None,
W.FEATURES : None,
W.USEATT : False,
W.USECAUSAL : False,
W.CTRAINABLE : None,
W.USECONSTRAINT : False,
W.TRAINTHRESH : None,
W.ATTUNITS : 32,
W.ENCDECUNITS : 32,
W.DECINIT : False,
W.DACT : "tanh",
W.DRATE : 0.4,
} | 416 | 18.857143 | 35 | py |
cmm_ts | cmm_ts-main/models/IAED/IAED.py | from matplotlib.pyplot import yscale
import numpy as np
from constants import CM_FPCMCI
from models.attention.SelfAttention import SelfAttention
from models.attention.InputAttention import InputAttention
from keras.layers import *
from keras.models import *
import tensorflow as tf
import models.Words as W
from models.DenseDropout import DenseDropout
class IAED(Layer):
def __init__(self, config, target_var, name = "IAED", searchBest = False):
super(IAED, self).__init__(name = name)
self.config = config
self.target_var = target_var
if self.config[W.USEATT]:
# Causal vector definition
if searchBest:
causal_vec = CM_FPCMCI[0, :] if self.config[W.USECAUSAL] else None
else:
causal_vec = np.array(self.config[W.CMATRIX][self.config[W.FEATURES].index(self.target_var), :]) if self.config[W.USECAUSAL] else None
# Self attention
self.selfatt = SelfAttention(self.config, causal_vec, name = self.target_var + '_selfatt')
# Input attention
self.inatt = InputAttention(self.config, name = self.target_var + '_inatt')
# Encoders
self.selfenc = LSTM(int(self.config[W.ENCDECUNITS]/2),
name = target_var + '_selfENC',
return_state = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
self.inenc = LSTM(int(self.config[W.ENCDECUNITS]/2),
name = target_var + '_inENC',
return_state = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
# Initialization
self.past_h = tf.Variable(tf.zeros([int(self.config[W.ENCDECUNITS]/2), 1]),
trainable = False,
shape = (int(self.config[W.ENCDECUNITS]/2), 1),
name = self.target_var + '_pastH')
self.past_c = tf.Variable(tf.zeros([int(self.config[W.ENCDECUNITS]/2), 1]),
trainable = False,
shape = (int(self.config[W.ENCDECUNITS]/2), 1),
name = self.target_var + '_pastC')
else:
self.enc = LSTM(self.config[W.ENCDECUNITS],
name = target_var + '_ENC',
return_state = True,
input_shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
self.repeat = RepeatVector(self.config[W.NFUTURE], name = self.target_var + '_REPEAT')
# Decoder
self.dec = LSTM(self.config[W.ENCDECUNITS], name = self.target_var + '_DEC')
# Dense
# self.outdense1 = DenseDropout(self.config[W.NFUTURE] * 3, self.config[W.D1ACT], self.config[W.DRATE])
self.outdense = DenseDropout(self.config[W.NFUTURE] * 2, self.config[W.DACT], self.config[W.DRATE])
self.out = DenseDropout(self.config[W.NFUTURE], 'linear', 0)
def call(self, x):
if self.config[W.USEATT]:
# Attention
x_selfatt = self.selfatt(x)
# x_selfatt = Dropout(self.config[W.DRATE])(x_selfatt)
x_inatt = self.inatt([x, self.past_h, self.past_c])
# x_inatt = Dropout(self.config[W.DRATE])(x_inatt)
# Encoders
enc1, h1, c1 = self.selfenc(x_selfatt)
enc2, h2, c2 = self.inenc(x_inatt)
self.past_h.assign(tf.expand_dims(h2[-1], -1))
self.past_c.assign(tf.expand_dims(c2[-1], -1))
x = concatenate([enc1, enc2])
if self.config[W.DECINIT]:
h = concatenate([h1, h2])
c = concatenate([c1, c2])
else:
x, h, c = self.enc(x)
repeat = self.repeat(x)
# Decoder
if self.config[W.DECINIT]:
y = self.dec(repeat, initial_state = [h, c])
else:
y = self.dec(repeat)
y = Dropout(self.config[W.DRATE])(y)
# y = self.outdense1(y)
y = self.outdense(y)
y = self.out(y)
y = tf.expand_dims(y, axis = -1)
return y | 4,444 | 40.542056 | 150 | py |
cmm_ts | cmm_ts-main/models/IAED/mIAED.py | from keras.layers import *
from keras.models import *
from keras.utils.vis_utils import plot_model
from constants import LIST_FEATURES
from models.MyModel import MyModel
from .IAED import IAED
from models.utils import Models
import models.Words as W
class mIAED(MyModel):
def __init__(self, df, config : dict = None, folder : str = None):
super().__init__(name = Models.mIAED, df = df, config = config, folder = folder)
def create_model(self, loss, optimizer, metrics, searchBest = False) -> Model:
inp = Input(shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
# Multihead
channels = list()
list_f = LIST_FEATURES if searchBest else self.config[W.FEATURES]
for var in list_f:
channels.append(IAED(self.config, var, name = var + "_IAED", searchBest = searchBest)(inp))
# Concatenation
y = concatenate(channels, axis = 2)
self.model = Model(inp, y)
self.model.compile(loss = loss, optimizer = optimizer, metrics = metrics)
self.model.summary()
# plot_model(self.model, to_file = self.model_dir + '/model_plot.png', show_shapes = True, show_layer_names = True, expand_nested = True)
return self.model | 1,267 | 36.294118 | 145 | py |
cmm_ts | cmm_ts-main/models/IAED/sIAED.py | from keras.layers import *
from keras.models import *
from keras.utils.vis_utils import plot_model
from models.utils import Models
from models.MyModel import MyModel
from .IAED2 import IAED
import models.Words as W
class sIAED(MyModel):
def __init__(self, df, config : dict = None, folder : str = None):
super().__init__(name = Models.sIAED, df = df, config = config, folder = folder)
def create_model(self, target_var, loss, optimizer, metrics, searchBest = False) -> Model:
self.target_var = target_var
inp = Input(shape = (self.config[W.NPAST], self.config[W.NFEATURES]))
x = IAED(self.config, target_var, name = target_var + "_IAED", searchBest = searchBest)(inp)
self.model = Model(inp, x)
self.model.compile(loss = loss, optimizer = optimizer, metrics = metrics)
self.model.summary()
# plot_model(self.model, to_file = self.model_dir + '/model_plot.png', show_shapes = True, show_layer_names = True, expand_nested = True)
return self.model | 1,051 | 39.461538 | 145 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.